Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Mar 2011 03:01:36 +0000 (20:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Mar 2011 03:01:36 +0000 (20:01 -0700)
* 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (27 commits)
  x86: Clean up apic.c and apic.h
  x86: Remove superflous goal definition of tsc_sync
  x86: dt: Correct local apic documentation in device tree bindings
  x86: dt: Cleanup local apic setup
  x86: dt: Fix OLPC=y/INTEL_CE=n build
  rtc: cmos: Add OF bindings
  x86: ce4100: Use OF to setup devices
  x86: ioapic: Add OF bindings for IO_APIC
  x86: dtb: Add generic bus probe
  x86: dtb: Add support for PCI devices backed by dtb nodes
  x86: dtb: Add device tree support for HPET
  x86: dtb: Add early parsing of IO_APIC
  x86: dtb: Add irq domain abstraction
  x86: dtb: Add a device tree for CE4100
  x86: Add device tree support
  x86: e820: Remove conditional early mapping in parse_e820_ext
  x86: OLPC: Make OLPC=n build again
  x86: OLPC: Remove extra OLPC_OPENFIRMWARE_DT indirection
  x86: OLPC: Cleanup config maze completely
  x86: OLPC: Hide OLPC_OPENFIRMWARE config switch
  ...

Fix up conflicts in arch/x86/platform/ce4100/ce4100.c

1000 files changed:
.gitignore
Documentation/kernel-parameters.txt
Documentation/networking/00-INDEX
Documentation/networking/dns_resolver.txt
Documentation/rtc.txt
Documentation/spinlocks.txt
Documentation/trace/ftrace-design.txt
Documentation/trace/ftrace.txt
Documentation/trace/kprobetrace.txt
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/include/asm/futex.h
arch/alpha/include/asm/rwsem.h
arch/alpha/kernel/irq.c
arch/alpha/kernel/irq_alpha.c
arch/alpha/kernel/irq_i8259.c
arch/alpha/kernel/irq_impl.h
arch/alpha/kernel/irq_pyxis.c
arch/alpha/kernel/irq_srm.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/sys_alcor.c
arch/alpha/kernel/sys_cabriolet.c
arch/alpha/kernel/sys_dp264.c
arch/alpha/kernel/sys_eb64p.c
arch/alpha/kernel/sys_eiger.c
arch/alpha/kernel/sys_jensen.c
arch/alpha/kernel/sys_marvel.c
arch/alpha/kernel/sys_mikasa.c
arch/alpha/kernel/sys_noritake.c
arch/alpha/kernel/sys_rawhide.c
arch/alpha/kernel/sys_rx164.c
arch/alpha/kernel/sys_sable.c
arch/alpha/kernel/sys_takara.c
arch/alpha/kernel/sys_titan.c
arch/alpha/kernel/sys_wildfire.c
arch/alpha/kernel/time.c
arch/arm/common/Kconfig
arch/arm/include/asm/futex.h
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/pgalloc.h
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/time.c
arch/arm/mach-clps711x/include/mach/time.h
arch/arm/mach-davinci/cpufreq.c
arch/arm/mach-davinci/devices-da8xx.c
arch/arm/mach-davinci/gpio-tnetv107x.c
arch/arm/mach-davinci/include/mach/clkdev.h
arch/arm/mach-omap2/clkt_dpll.c
arch/arm/mach-omap2/mailbox.c
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/pm-debug.c
arch/arm/mach-omap2/prcm_mpu44xx.h
arch/arm/mach-omap2/smartreflex.c
arch/arm/mach-omap2/timer-gp.c
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/tosa-bt.c
arch/arm/mach-pxa/tosa.c
arch/arm/mach-s3c2440/Kconfig
arch/arm/mach-s3c2440/include/mach/gta02.h
arch/arm/mach-s3c64xx/clock.c
arch/arm/mach-s3c64xx/dma.c
arch/arm/mach-s3c64xx/gpiolib.c
arch/arm/mach-s3c64xx/mach-smdk6410.c
arch/arm/mach-s3c64xx/setup-keypad.c
arch/arm/mach-s3c64xx/setup-sdhci.c
arch/arm/mach-s5p64x0/include/mach/gpio.h
arch/arm/mach-shmobile/board-ag5evm.c
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
arch/arm/mach-shmobile/include/mach/head-mackerel.txt
arch/arm/mach-tegra/include/mach/kbc.h
arch/arm/plat-omap/mailbox.c
arch/arm/plat-samsung/dev-uart.c
arch/blackfin/kernel/time.c
arch/blackfin/lib/outs.S
arch/blackfin/mach-common/cache.S
arch/cris/arch-v10/kernel/time.c
arch/cris/arch-v32/kernel/smp.c
arch/cris/arch-v32/kernel/time.c
arch/cris/kernel/vmlinux.lds.S
arch/frv/include/asm/futex.h
arch/frv/kernel/futex.c
arch/frv/kernel/time.c
arch/h8300/kernel/time.c
arch/h8300/kernel/timer/timer8.c
arch/ia64/include/asm/futex.h
arch/ia64/include/asm/rwsem.h
arch/ia64/include/asm/xen/hypercall.h
arch/ia64/kernel/time.c
arch/ia64/xen/suspend.c
arch/ia64/xen/time.c
arch/m32r/kernel/time.c
arch/m68k/bvme6000/config.c
arch/m68k/kernel/time.c
arch/m68k/mvme147/config.c
arch/m68k/mvme16x/config.c
arch/m68k/sun3/sun3ints.c
arch/m68knommu/kernel/time.c
arch/microblaze/include/asm/futex.h
arch/mips/Kconfig
arch/mips/alchemy/mtx-1/board_setup.c
arch/mips/alchemy/mtx-1/platform.c
arch/mips/alchemy/xxs1500/board_setup.c
arch/mips/include/asm/futex.h
arch/mips/include/asm/perf_event.h
arch/mips/kernel/ftrace.c
arch/mips/kernel/perf_event.c
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/signal.c
arch/mips/kernel/signal32.c
arch/mips/kernel/smp.c
arch/mips/kernel/syscall.c
arch/mips/kernel/vpe.c
arch/mips/loongson/Kconfig
arch/mips/loongson/common/cmdline.c
arch/mips/loongson/common/machtype.c
arch/mips/math-emu/ieee754int.h
arch/mips/mm/init.c
arch/mips/mm/tlbex.c
arch/mips/pci/ops-pmcmsp.c
arch/mips/pmc-sierra/Kconfig
arch/mips/pmc-sierra/msp71xx/msp_time.c
arch/mn10300/include/asm/atomic.h
arch/mn10300/include/asm/uaccess.h
arch/mn10300/kernel/time.c
arch/mn10300/mm/cache-inv-icache.c
arch/parisc/hpux/sys_hpux.c
arch/parisc/include/asm/futex.h
arch/parisc/kernel/time.c
arch/powerpc/include/asm/futex.h
arch/powerpc/include/asm/lppaca.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/rwsem.h
arch/powerpc/kernel/machine_kexec.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/process.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/tlb_hash64.c
arch/powerpc/platforms/cell/spufs/syscalls.c
arch/powerpc/platforms/iseries/dt.c
arch/powerpc/platforms/iseries/setup.c
arch/s390/include/asm/futex.h
arch/s390/include/asm/rwsem.h
arch/s390/include/asm/uaccess.h
arch/s390/lib/uaccess.h
arch/s390/lib/uaccess_pt.c
arch/s390/lib/uaccess_std.c
arch/sh/include/asm/futex-irq.h
arch/sh/include/asm/futex.h
arch/sh/include/asm/rwsem.h
arch/sh/include/asm/sections.h
arch/sh/kernel/cpu/sh4/setup-sh7750.c
arch/sh/lib/delay.c
arch/sh/mm/cache.c
arch/sparc/include/asm/futex_64.h
arch/sparc/include/asm/rwsem.h
arch/sparc/kernel/pcic.c
arch/sparc/kernel/time_32.c
arch/sparc/lib/atomic32.c
arch/tile/include/asm/futex.h
arch/um/Kconfig.common
arch/um/Kconfig.x86
arch/um/drivers/mconsole_kern.c
arch/um/drivers/ubd_kern.c
arch/um/kernel/irq.c
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/boot/compressed/mkpiggy.c
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/acpi.h
arch/x86/include/asm/amd_nb.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/ce4100.h [new file with mode: 0644]
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/entry_arch.h
arch/x86/include/asm/frame.h
arch/x86/include/asm/futex.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/init.h
arch/x86/include/asm/ipi.h
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/mpspec.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nmi.h
arch/x86/include/asm/numa.h
arch/x86/include/asm/numa_32.h
arch/x86/include/asm/numa_64.h
arch/x86/include/asm/page_types.h
arch/x86/include/asm/perf_event_p4.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/rwsem.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/smpboot_hooks.h
arch/x86/include/asm/system.h
arch/x86/include/asm/topology.h
arch/x86/include/asm/unistd_32.h
arch/x86/include/asm/unistd_64.h
arch/x86/include/asm/uv/uv_bau.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/asm/xen/page.h
arch/x86/include/asm/xen/pci.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apb_timer.c
arch/x86/kernel/aperture_64.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic_flat_64.c
arch/x86/kernel/apic/apic_noop.c
arch/x86/kernel/apic/bigsmp_32.c
arch/x86/kernel/apic/es7000_32.c
arch/x86/kernel/apic/hw_nmi.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/ipi.c
arch/x86/kernel/apic/numaq_32.c
arch/x86/kernel/apic/probe_32.c
arch/x86/kernel/apic/summit_32.c
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/apic/x2apic_phys.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/check.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/cpu/perf_event_p6.c
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/e820.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/ftrace.c
arch/x86/kernel/head_32.S
arch/x86/kernel/hpet.c
arch/x86/kernel/i8259.c
arch/x86/kernel/ioport.c
arch/x86/kernel/irq.c
arch/x86/kernel/irqinit.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/kprobes.c
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/microcode_core.c
arch/x86/kernel/process.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/syscall_table_32.S
arch/x86/kernel/vmlinux.lds.S
arch/x86/kernel/x8664_ksyms_64.c
arch/x86/kvm/svm.c
arch/x86/kvm/trace.h
arch/x86/lguest/boot.c
arch/x86/lib/atomic64_386_32.S
arch/x86/lib/atomic64_cx8_32.S
arch/x86/lib/checksum_32.S
arch/x86/lib/memmove_64.S [new file with mode: 0644]
arch/x86/lib/memmove_64.c [deleted file]
arch/x86/lib/rwsem_64.S
arch/x86/lib/semaphore_32.S
arch/x86/lib/thunk_32.S
arch/x86/lib/thunk_64.S
arch/x86/mm/Makefile
arch/x86/mm/amdtopology_64.c
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/numa.c
arch/x86/mm/numa_32.c
arch/x86/mm/numa_64.c
arch/x86/mm/numa_emulation.c [new file with mode: 0644]
arch/x86/mm/numa_internal.h [new file with mode: 0644]
arch/x86/mm/pageattr.c
arch/x86/mm/pgtable.c
arch/x86/mm/srat_32.c
arch/x86/mm/srat_64.c
arch/x86/mm/tlb.c
arch/x86/pci/amd_bus.c
arch/x86/pci/ce4100.c
arch/x86/pci/xen.c
arch/x86/platform/ce4100/ce4100.c
arch/x86/platform/olpc/olpc_dt.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/platform/uv/uv_irq.c
arch/x86/platform/visws/visws_quirks.c
arch/x86/xen/Kconfig
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
arch/x86/xen/suspend.c
arch/x86/xen/time.c
arch/x86/xen/xen-head.S
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/rwsem.h
arch/xtensa/kernel/time.c
block/blk-core.c
block/blk-flush.c
block/blk-lib.c
block/blk-throttle.c
block/cfq-iosched.c
block/elevator.c
block/genhd.c
block/ioctl.c
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/debugfs.c
drivers/acpi/numa.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/xen-blkfront.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/char/agp/amd64-agp.c
drivers/char/agp/intel-agp.h
drivers/char/agp/intel-gtt.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/mmtimer.c
drivers/char/pcmcia/cm4000_cs.c
drivers/char/pcmcia/ipwireless/main.c
drivers/char/tpm/tpm.c
drivers/char/virtio_console.c
drivers/cpufreq/cpufreq.c
drivers/gpio/ml_ioh_gpio.c
drivers/gpio/pch_gpio.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_dma.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_mm.c
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/nouveau/nv50_vm.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_blit_kms.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv770.c
drivers/hwmon/ad7414.c
drivers/hwmon/adt7411.c
drivers/hwmon/f71882fg.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-stu300.c
drivers/idle/intel_idle.c
drivers/input/gameport/gameport.c
drivers/input/keyboard/tegra-kbc.c
drivers/input/mouse/synaptics.h
drivers/input/serio/serio.c
drivers/isdn/hardware/eicon/istream.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/tuners/tda8290.c
drivers/media/dvb/dvb-usb/dib0700_devices.c
drivers/media/dvb/dvb-usb/lmedm04.c
drivers/media/dvb/frontends/dib7000m.c
drivers/media/dvb/frontends/dib7000m.h
drivers/media/dvb/mantis/mantis_pci.c
drivers/media/rc/ir-raw.c
drivers/media/rc/mceusb.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/nuvoton-cir.h
drivers/media/rc/rc-main.c
drivers/media/video/au0828/au0828-video.c
drivers/media/video/cx18/cx18-cards.c
drivers/media/video/cx18/cx18-driver.c
drivers/media/video/cx18/cx18-driver.h
drivers/media/video/cx18/cx18-dvb.c
drivers/media/video/cx23885/cx23885-i2c.c
drivers/media/video/cx25840/cx25840-core.c
drivers/media/video/ivtv/ivtv-irq.c
drivers/media/video/mem2mem_testdev.c
drivers/media/video/s2255drv.c
drivers/mfd/asic3.c
drivers/mfd/davinci_voicecodec.c
drivers/mfd/tps6586x.c
drivers/mfd/ucb1x00-ts.c
drivers/mfd/wm8994-core.c
drivers/misc/bmp085.c
drivers/mmc/core/core.c
drivers/mmc/core/sdio.c
drivers/mtd/chips/cfi_cmdset_0001.c
drivers/mtd/chips/jedec_probe.c
drivers/mtd/maps/amd76xrom.c
drivers/mtd/mtd_blkdevs.c
drivers/mtd/nand/omap2.c
drivers/mtd/onenand/generic.c
drivers/mtd/onenand/omap2.c
drivers/net/ariadne.c
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_cmn.h
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/bnx2x/bnx2x_init.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bnx2x/bnx2x_stats.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_3ad.h
drivers/net/can/softing/softing_main.c
drivers/net/cnic.c
drivers/net/davinci_emac.c
drivers/net/dm9000.c
drivers/net/dnet.c
drivers/net/e1000/e1000_osdep.h
drivers/net/e1000e/netdev.c
drivers/net/fec.c
drivers/net/igbvf/vf.c
drivers/net/macb.c
drivers/net/macvtap.c
drivers/net/pcmcia/fmvj18x_cs.c
drivers/net/r6040.c
drivers/net/r8169.c
drivers/net/sfc/ethtool.c
drivers/net/skge.c
drivers/net/smsc911x.c
drivers/net/usb/dm9601.c
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/p54/p54pci.c
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/nfc/Kconfig
drivers/nfc/pn544.c
drivers/of/pdt.c
drivers/pci/xen-pcifront.c
drivers/pcmcia/pcmcia_resource.c
drivers/pcmcia/pxa2xx_base.c
drivers/pcmcia/pxa2xx_base.h
drivers/pcmcia/pxa2xx_colibri.c
drivers/pcmcia/pxa2xx_lubbock.c
drivers/pps/generators/Kconfig
drivers/pps/kapi.c
drivers/rapidio/rio-sysfs.c
drivers/regulator/mc13xxx-regulator-core.c
drivers/regulator/wm831x-dcdc.c
drivers/rtc/class.c
drivers/rtc/interface.c
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-at91sam9.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-davinci.c
drivers/rtc/rtc-ds1511.c
drivers/rtc/rtc-ds1553.c
drivers/rtc/rtc-ds3232.c
drivers/rtc/rtc-jz4740.c
drivers/rtc/rtc-mc13xxx.c
drivers/rtc/rtc-mpc5121.c
drivers/rtc/rtc-mrst.c
drivers/rtc/rtc-mxc.c
drivers/rtc/rtc-nuc900.c
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-pcap.c
drivers/rtc/rtc-pcf50633.c
drivers/rtc/rtc-pl030.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-proc.c
drivers/rtc/rtc-pxa.c
drivers/rtc/rtc-rs5c372.c
drivers/rtc/rtc-rx8025.c
drivers/rtc/rtc-s3c.c
drivers/rtc/rtc-sa1100.c
drivers/rtc/rtc-sh.c
drivers/rtc/rtc-stmp3xxx.c
drivers/rtc/rtc-test.c
drivers/rtc/rtc-twl.c
drivers/rtc/rtc-vr41xx.c
drivers/rtc/rtc-wm831x.c
drivers/rtc/rtc-wm8350.c
drivers/s390/block/xpram.c
drivers/s390/char/keyboard.c
drivers/s390/char/tape.h
drivers/s390/char/tape_34xx.c
drivers/s390/char/tape_3590.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_fc.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/thermal/Kconfig
drivers/thermal/thermal_sys.c
drivers/tty/serial/serial_cs.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/gadget/f_phonet.c
drivers/usb/host/ehci-xilinx-of.c
drivers/usb/host/xhci-dbg.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/omap2430.c
drivers/usb/serial/sierra.c
drivers/usb/serial/usb_wwan.c
drivers/usb/serial/visor.c
drivers/video/backlight/ltv350qv.c
drivers/watchdog/cpwd.c
drivers/watchdog/hpwdt.c
drivers/watchdog/sbc_fitpc2_wdt.c
drivers/watchdog/sch311x_wdt.c
drivers/watchdog/w83697ug_wdt.c
drivers/xen/balloon.c
drivers/xen/events.c
drivers/xen/manage.c
drivers/xen/platform-pci.c
fs/Kconfig
fs/Makefile
fs/afs/write.c
fs/aio.c
fs/block_dev.c
fs/btrfs/ctree.h
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/lzo.c
fs/btrfs/relocation.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/super.h
fs/compat.c
fs/dcache.c
fs/eventpoll.c
fs/exec.c
fs/exofs/namei.c
fs/exportfs/expfs.c
fs/ext2/namei.c
fs/ext3/namei.c
fs/ext3/super.c
fs/ext4/namei.c
fs/ext4/super.c
fs/fat/inode.c
fs/fat/namei_vfat.c
fs/fcntl.c
fs/fhandle.c [new file with mode: 0644]
fs/file_table.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/dentry.c
fs/gfs2/export.c
fs/gfs2/main.c
fs/hfs/dir.c
fs/inode.c
fs/internal.h
fs/isofs/export.c
fs/jfs/namei.c
fs/minix/namei.c
fs/namei.c
fs/namespace.c
fs/nfs/inode.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/nfsroot.c
fs/nfs/unlink.c
fs/nfs/write.c
fs/nfsctl.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/mdt.c
fs/nilfs2/namei.c
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/segment.c
fs/nilfs2/super.c
fs/ocfs2/dcache.c
fs/ocfs2/export.c
fs/ocfs2/journal.h
fs/ocfs2/refcounttree.c
fs/ocfs2/super.c
fs/open.c
fs/partitions/ldm.c
fs/partitions/osf.c
fs/proc/base.c
fs/proc/inode.c
fs/proc/proc_devtree.c
fs/proc/proc_sysctl.c
fs/reiserfs/inode.c
fs/reiserfs/namei.c
fs/reiserfs/xattr.c
fs/stat.c
fs/statfs.c
fs/sysv/namei.c
fs/ubifs/dir.c
fs/udf/namei.c
fs/ufs/namei.c
fs/xfs/linux-2.6/xfs_discard.c
fs/xfs/linux-2.6/xfs_export.c
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/xfs_fsops.c
include/asm-generic/cputime.h
include/asm-generic/fcntl.h
include/asm-generic/futex.h
include/asm-generic/pgtable.h
include/asm-generic/sections.h
include/asm-generic/unistd.h
include/asm-generic/vmlinux.lds.h
include/drm/drmP.h
include/keys/rxrpc-type.h
include/linux/blkdev.h
include/linux/blktrace_api.h
include/linux/ceph/messenger.h
include/linux/cgroup.h
include/linux/cgroup_subsys.h
include/linux/dcbnl.h
include/linux/debugobjects.h
include/linux/exportfs.h
include/linux/fcntl.h
include/linux/file.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/ftrace_event.h
include/linux/gfp.h
include/linux/hrtimer.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/irqdesc.h
include/linux/jiffies.h
include/linux/kthread.h
include/linux/mfd/wm8994/core.h
include/linux/mm.h
include/linux/namei.h
include/linux/netdevice.h
include/linux/nfs_fs_sb.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/plist.h
include/linux/pm.h
include/linux/pm_wakeup.h
include/linux/posix-clock.h [new file with mode: 0644]
include/linux/posix-timers.h
include/linux/ptrace.h
include/linux/ring_buffer.h
include/linux/rio_regs.h
include/linux/rtc.h
include/linux/rwlock_types.h
include/linux/rwsem-spinlock.h
include/linux/rwsem.h
include/linux/sched.h
include/linux/security.h
include/linux/spinlock_types.h
include/linux/sunrpc/sched.h
include/linux/syscalls.h
include/linux/sysctl.h
include/linux/thermal.h
include/linux/thread_info.h
include/linux/time.h
include/linux/timex.h
include/net/ipv6.h
include/net/netfilter/nf_tproxy_core.h
include/net/sch_generic.h
include/pcmcia/ds.h
include/sound/wm8903.h
include/target/target_core_transport.h
include/trace/events/block.h
include/trace/events/mce.h
include/trace/events/module.h
include/trace/events/skb.h
include/xen/events.h
include/xen/interface/io/blkif.h
include/xen/interface/xen.h
include/xen/xen-ops.h
init/Kconfig
kernel/audit_watch.c
kernel/cgroup.c
kernel/compat.c
kernel/cpuset.c
kernel/cred.c
kernel/futex.c
kernel/hrtimer.c
kernel/irq/Kconfig
kernel/irq/autoprobe.c
kernel/irq/chip.c
kernel/irq/compat.h [new file with mode: 0644]
kernel/irq/debug.h [new file with mode: 0644]
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/irq/migration.c
kernel/irq/pm.c
kernel/irq/proc.c
kernel/irq/resend.c
kernel/irq/settings.h [new file with mode: 0644]
kernel/irq/spurious.c
kernel/perf_event.c
kernel/posix-cpu-timers.c
kernel/posix-timers.c
kernel/ptrace.c
kernel/rtmutex-debug.c
kernel/rtmutex-tester.c
kernel/rtmutex.c
kernel/rtmutex_common.h
kernel/sched.c
kernel/sched_autogroup.c
kernel/sched_autogroup.h
kernel/sched_debug.c
kernel/sched_fair.c
kernel/sched_idletask.c
kernel/sched_rt.c
kernel/sched_stoptask.c
kernel/softirq.c
kernel/sys_ni.c
kernel/sysctl.c
kernel/sysctl_binary.c
kernel/time.c
kernel/time/Makefile
kernel/time/clockevents.c
kernel/time/jiffies.c
kernel/time/ntp.c
kernel/time/posix-clock.c [new file with mode: 0644]
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/time/tick-internal.h
kernel/time/tick-oneshot.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/timer.c
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_entries.h
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_output.c
kernel/trace/trace_sched_switch.c
kernel/trace/trace_syscalls.c
kernel/workqueue.c
lib/debugobjects.c
lib/nlattr.c
lib/plist.c
lib/rwsem.c
lib/swiotlb.c
mm/Makefile
mm/bootmem.c
mm/huge_memory.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/mremap.c
mm/nobootmem.c [new file with mode: 0644]
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/swapfile.c
mm/truncate.c
mm/vmscan.c
net/Makefile
net/bluetooth/rfcomm/tty.c
net/bridge/Kconfig
net/bridge/br_multicast.c
net/ceph/messenger.c
net/ceph/pagevec.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/pktgen.c
net/core/scm.c
net/dcb/dcbnl.c
net/dccp/input.c
net/dns_resolver/dns_key.c
net/ipv4/devinet.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_gre.c
net/ipv4/ipip.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/netfilter/ip6t_LOG.c
net/ipv6/route.c
net/ipv6/sit.c
net/mac80211/iface.c
net/mac80211/mlme.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_log.c
net/netfilter/nf_tproxy_core.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/rds/ib_send.c
net/rds/loop.c
net/rxrpc/ar-input.c
net/rxrpc/ar-key.c
net/sched/sch_generic.c
net/sctp/sm_make_chunk.c
net/sunrpc/sched.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/sunrpc/xprtsock.c
net/unix/af_unix.c
net/unix/garbage.c
net/wireless/wext-compat.c
scripts/basic/fixdep.c
scripts/checkpatch.pl
scripts/kconfig/streamline_config.pl
scripts/mod/sumversion.c
scripts/recordmcount.c
scripts/recordmcount.pl
scripts/rt-tester/rt-tester.py
scripts/rt-tester/t2-l1-2rt-sameprio.tst
scripts/rt-tester/t2-l1-pi.tst
scripts/rt-tester/t2-l1-signal.tst
scripts/rt-tester/t2-l2-2rt-deadlock.tst
scripts/rt-tester/t3-l1-pi-1rt.tst
scripts/rt-tester/t3-l1-pi-2rt.tst
scripts/rt-tester/t3-l1-pi-3rt.tst
scripts/rt-tester/t3-l1-pi-signal.tst
scripts/rt-tester/t3-l1-pi-steal.tst
scripts/rt-tester/t3-l2-pi.tst
scripts/rt-tester/t4-l2-pi-deboost.tst
scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
scripts/rt-tester/t5-l4-pi-boost-deboost.tst
security/commoncap.c
security/security.c
sound/core/jack.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/patch_via.c
sound/soc/codecs/cx20442.c
sound/soc/codecs/wm8903.c
sound/soc/codecs/wm8903.h
sound/soc/codecs/wm8978.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm9081.c
sound/soc/codecs/wm_hubs.c
sound/soc/imx/eukrea-tlv320.c
sound/soc/omap/am3517evm.c
sound/soc/pxa/e740_wm9705.c
sound/soc/pxa/e750_wm9705.c
sound/soc/pxa/e800_wm9712.c
sound/soc/pxa/em-x270.c
sound/soc/pxa/mioa701_wm9713.c
sound/soc/pxa/palm27x.c
sound/soc/pxa/tosa.c
sound/soc/pxa/zylonite.c
sound/soc/soc-dapm.c
sound/usb/card.c
sound/usb/pcm.c
sound/usb/usbaudio.h
tools/perf/.gitignore
tools/perf/Documentation/Makefile
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-lock.txt
tools/perf/Documentation/perf-probe.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Makefile
tools/perf/bench/sched-pipe.c
tools/perf/builtin-annotate.c
tools/perf/builtin-diff.c
tools/perf/builtin-inject.c
tools/perf/builtin-kmem.c
tools/perf/builtin-list.c
tools/perf/builtin-lock.c
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-test.c
tools/perf/builtin-timechart.c
tools/perf/builtin-top.c
tools/perf/perf.h
tools/perf/python/twatch.py [new file with mode: 0755]
tools/perf/util/annotate.c [new file with mode: 0644]
tools/perf/util/annotate.h [new file with mode: 0644]
tools/perf/util/build-id.c
tools/perf/util/cache.h
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/cgroup.c [new file with mode: 0644]
tools/perf/util/cgroup.h [new file with mode: 0644]
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/debug.c
tools/perf/util/debug.h
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evlist.c [new file with mode: 0644]
tools/perf/util/evlist.h [new file with mode: 0644]
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/exec_cmd.c
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/linux/list.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/probe-event.c
tools/perf/util/probe-event.h
tools/perf/util/probe-finder.c
tools/perf/util/python.c [new file with mode: 0644]
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/setup.py [new file with mode: 0644]
tools/perf/util/strfilter.c [new file with mode: 0644]
tools/perf/util/strfilter.h [new file with mode: 0644]
tools/perf/util/svghelper.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/thread_map.c [new file with mode: 0644]
tools/perf/util/thread_map.h [new file with mode: 0644]
tools/perf/util/top.c [new file with mode: 0644]
tools/perf/util/top.h [new file with mode: 0644]
tools/perf/util/trace-event-parse.c
tools/perf/util/ui/browser.c
tools/perf/util/ui/browser.h
tools/perf/util/ui/browsers/annotate.c
tools/perf/util/ui/browsers/hists.c
tools/perf/util/ui/browsers/map.c
tools/perf/util/ui/browsers/top.c [new file with mode: 0644]
tools/perf/util/ui/helpline.c
tools/perf/util/ui/libslang.h
tools/perf/util/ui/setup.c
tools/perf/util/ui/ui.h [new file with mode: 0644]
tools/perf/util/ui/util.c
tools/perf/util/util.h
tools/testing/ktest/ktest.pl

index 8faa6c02b39ec3896bd4e989c82ce7f6dbdaf04a..5d56a3fd0de6b9d4d8acc0a26495bd24c489d31f 100644 (file)
@@ -28,6 +28,7 @@ modules.builtin
 *.gz
 *.bz2
 *.lzma
+*.xz
 *.lzo
 *.patch
 *.gcno
index f4a04c0c7edcaf9ecb21c3c3ee9f7f1b2c77b06f..738c6fda3fb015dce6ae90af393aeda543254380 100644 (file)
@@ -2444,6 +2444,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        <deci-seconds>: poll all this frequency
                        0: no polling (default)
 
+       threadirqs      [KNL]
+                       Force threading of all interrupt handlers except those
+                       marked explicitely IRQF_NO_THREAD.
+
        topology=       [S390]
                        Format: {off | on}
                        Specify if the kernel should make use of the cpu
index fe5c099b8fc8884210aecc8ccfca1c642d2beb95..4edd78dfb3622f195a4b05d5948a8e39ceb43070 100644 (file)
@@ -40,8 +40,6 @@ decnet.txt
        - info on using the DECnet networking layer in Linux.
 depca.txt
        - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
-dgrs.txt
-       - the Digi International RightSwitch SE-X Ethernet driver
 dmfe.txt
        - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
 e100.txt
@@ -50,8 +48,6 @@ e1000.txt
        - info on Intel's E1000 line of gigabit ethernet boards
 eql.txt
        - serial IP load balancing
-ethertap.txt
-       - the Ethertap user space packet reception and transmission driver
 ewrk3.txt
        - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
 filter.txt
@@ -104,8 +100,6 @@ tuntap.txt
        - TUN/TAP device driver, allowing user space Rx/Tx of packets.
 vortex.txt
        - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
-wavelan.txt
-       - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
 x25.txt
        - general info on X.25 development.
 x25-iface.txt
index aefd1e681804b2759f196249883ddeacd029bce6..04ca06325b087157b8f21afb3f571350b7e5ef3f 100644 (file)
@@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
        create  dns_resolver    foo:*   *       /usr/sbin/dns.foo %k
 
 
-
 =====
 USAGE
 =====
@@ -104,6 +103,14 @@ implemented in the module can be called after doing:
      returned also.
 
 
+===============================
+READING DNS KEYS FROM USERSPACE
+===============================
+
+Keys of dns_resolver type can be read from userspace using keyctl_read() or
+"keyctl read/print/pipe".
+
+
 =========
 MECHANISM
 =========
index 9104c10620840fcc45c47938a1b96dea48309aa7..250160469d83e65c3a5235ee7b00b83b40abb68d 100644 (file)
@@ -178,38 +178,29 @@ RTC class framework, but can't be supported by the older driver.
        setting the longer alarm time and enabling its IRQ using a single
        request (using the same model as EFI firmware).
 
-    *  RTC_UIE_ON, RTC_UIE_OFF ... if the RTC offers IRQs, it probably
-       also offers update IRQs whenever the "seconds" counter changes.
-       If needed, the RTC framework can emulate this mechanism.
+    *  RTC_UIE_ON, RTC_UIE_OFF ... if the RTC offers IRQs, the RTC framework
+       will emulate this mechanism.
 
-    *  RTC_PIE_ON, RTC_PIE_OFF, RTC_IRQP_SET, RTC_IRQP_READ ... another
-       feature often accessible with an IRQ line is a periodic IRQ, issued
-       at settable frequencies (usually 2^N Hz).
+    *  RTC_PIE_ON, RTC_PIE_OFF, RTC_IRQP_SET, RTC_IRQP_READ ... these icotls
+       are emulated via a kernel hrtimer.
 
 In many cases, the RTC alarm can be a system wake event, used to force
 Linux out of a low power sleep state (or hibernation) back to a fully
 operational state.  For example, a system could enter a deep power saving
 state until it's time to execute some scheduled tasks.
 
-Note that many of these ioctls need not actually be implemented by your
-driver.  The common rtc-dev interface handles many of these nicely if your
-driver returns ENOIOCTLCMD.  Some common examples:
+Note that many of these ioctls are handled by the common rtc-dev interface.
+Some common examples:
 
     *  RTC_RD_TIME, RTC_SET_TIME: the read_time/set_time functions will be
        called with appropriate values.
 
-    *  RTC_ALM_SET, RTC_ALM_READ, RTC_WKALM_SET, RTC_WKALM_RD: the
-       set_alarm/read_alarm functions will be called.
+    *  RTC_ALM_SET, RTC_ALM_READ, RTC_WKALM_SET, RTC_WKALM_RD: gets or sets
+       the alarm rtc_timer. May call the set_alarm driver function.
 
-    *  RTC_IRQP_SET, RTC_IRQP_READ: the irq_set_freq function will be called
-       to set the frequency while the framework will handle the read for you
-       since the frequency is stored in the irq_freq member of the rtc_device
-       structure.  Your driver needs to initialize the irq_freq member during
-       init.  Make sure you check the requested frequency is in range of your
-       hardware in the irq_set_freq function.  If it isn't, return -EINVAL.  If
-       you cannot actually change the frequency, do not define irq_set_freq.
+    *  RTC_IRQP_SET, RTC_IRQP_READ: These are emulated by the generic code.
 
-    *  RTC_PIE_ON, RTC_PIE_OFF: the irq_set_state function will be called.
+    *  RTC_PIE_ON, RTC_PIE_OFF: These are also emulated by the generic code.
 
 If all else fails, check out the rtc-test.c driver!
 
index 178c831b907d08c8096b72c307508d0b272fccbe..2e3c64b1a6a5a64ede63c9e76377f79514e95088 100644 (file)
@@ -86,7 +86,7 @@ to change the variables it has to get an exclusive write lock.
 
 The routines look the same as above:
 
-   rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
+   rwlock_t xxx_lock = __RW_LOCK_UNLOCKED(xxx_lock);
 
        unsigned long flags;
 
@@ -196,25 +196,3 @@ appropriate:
 
 For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or
 __SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate.
-
-SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED are deprecated.  These interfere
-with lockdep state tracking.
-
-Most of the time, you can simply turn:
-       static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
-into:
-       static DEFINE_SPINLOCK(xxx_lock);
-
-Static structure member variables go from:
-
-       struct foo bar {
-               .lock   =       SPIN_LOCK_UNLOCKED;
-       };
-
-to:
-
-       struct foo bar {
-               .lock   =       __SPIN_LOCK_UNLOCKED(bar.lock);
-       };
-
-Declaration of static rw_locks undergo a similar transformation.
index dc52bd442c92aa11b5c717ad811bcd2f92a169fd..79fcafc7fd64119c6924625a1faf5a93c04acf8e 100644 (file)
@@ -247,6 +247,13 @@ You need very few things to get the syscalls tracing in an arch.
 - Support the TIF_SYSCALL_TRACEPOINT thread flags.
 - Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
   in the ptrace syscalls tracing path.
+- If the system call table on this arch is more complicated than a simple array
+  of addresses of the system calls, implement an arch_syscall_addr to return
+  the address of a given system call.
+- If the symbol names of the system calls do not match the function names on
+  this arch, define ARCH_HAS_SYSCALL_MATCH_SYM_NAME in asm/ftrace.h and
+  implement arch_syscall_match_sym_name with the appropriate logic to return
+  true if the function name corresponds with the symbol name.
 - Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
 
 
index 557c1edeccaf72535464298743cddd3c8eb01bea..1ebc24cf9a55af0bf1b9607d1b452e2f828ab601 100644 (file)
@@ -80,11 +80,11 @@ of ftrace. Here is a list of some of the key files:
        tracers listed here can be configured by
        echoing their name into current_tracer.
 
-  tracing_enabled:
+  tracing_on:
 
-       This sets or displays whether the current_tracer
-       is activated and tracing or not. Echo 0 into this
-       file to disable the tracer or 1 to enable it.
+       This sets or displays whether writing to the trace
+       ring buffer is enabled. Echo 0 into this file to disable
+       the tracer or 1 to enable it.
 
   trace:
 
@@ -202,10 +202,6 @@ Here is the list of current tracers that may be configured.
        to draw a graph of function calls similar to C code
        source.
 
-  "sched_switch"
-
-       Traces the context switches and wakeups between tasks.
-
   "irqsoff"
 
        Traces the areas that disable interrupts and saves
@@ -273,39 +269,6 @@ format, the function name that was traced "path_put" and the
 parent function that called this function "path_walk". The
 timestamp is the time at which the function was entered.
 
-The sched_switch tracer also includes tracing of task wakeups
-and context switches.
-
-     ksoftirqd/1-7     [01]  1453.070013:      7:115:R   +  2916:115:S
-     ksoftirqd/1-7     [01]  1453.070013:      7:115:R   +    10:115:S
-     ksoftirqd/1-7     [01]  1453.070013:      7:115:R ==>    10:115:R
-        events/1-10    [01]  1453.070013:     10:115:S ==>  2916:115:R
-     kondemand/1-2916  [01]  1453.070013:   2916:115:S ==>     7:115:R
-     ksoftirqd/1-7     [01]  1453.070013:      7:115:S ==>     0:140:R
-
-Wake ups are represented by a "+" and the context switches are
-shown as "==>".  The format is:
-
- Context switches:
-
-       Previous task              Next Task
-
-  <pid>:<prio>:<state>  ==>  <pid>:<prio>:<state>
-
- Wake ups:
-
-       Current task               Task waking up
-
-  <pid>:<prio>:<state>    +  <pid>:<prio>:<state>
-
-The prio is the internal kernel priority, which is the inverse
-of the priority that is usually displayed by user-space tools.
-Zero represents the highest priority (99). Prio 100 starts the
-"nice" priorities with 100 being equal to nice -20 and 139 being
-nice 19. The prio "140" is reserved for the idle task which is
-the lowest priority thread (pid 0).
-
-
 Latency trace format
 --------------------
 
@@ -491,78 +454,10 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
                    latencies, as described in "Latency
                    trace format".
 
-sched_switch
-------------
-
-This tracer simply records schedule switches. Here is an example
-of how to use it.
-
- # echo sched_switch > current_tracer
- # echo 1 > tracing_enabled
- # sleep 1
- # echo 0 > tracing_enabled
- # cat trace
-
-# tracer: sched_switch
-#
-#           TASK-PID   CPU#    TIMESTAMP  FUNCTION
-#              | |      |          |         |
-            bash-3997  [01]   240.132281:   3997:120:R   +  4055:120:R
-            bash-3997  [01]   240.132284:   3997:120:R ==>  4055:120:R
-           sleep-4055  [01]   240.132371:   4055:120:S ==>  3997:120:R
-            bash-3997  [01]   240.132454:   3997:120:R   +  4055:120:S
-            bash-3997  [01]   240.132457:   3997:120:R ==>  4055:120:R
-           sleep-4055  [01]   240.132460:   4055:120:D ==>  3997:120:R
-            bash-3997  [01]   240.132463:   3997:120:R   +  4055:120:D
-            bash-3997  [01]   240.132465:   3997:120:R ==>  4055:120:R
-          <idle>-0     [00]   240.132589:      0:140:R   +     4:115:S
-          <idle>-0     [00]   240.132591:      0:140:R ==>     4:115:R
-     ksoftirqd/0-4     [00]   240.132595:      4:115:S ==>     0:140:R
-          <idle>-0     [00]   240.132598:      0:140:R   +     4:115:S
-          <idle>-0     [00]   240.132599:      0:140:R ==>     4:115:R
-     ksoftirqd/0-4     [00]   240.132603:      4:115:S ==>     0:140:R
-           sleep-4055  [01]   240.133058:   4055:120:S ==>  3997:120:R
- [...]
-
-
-As we have discussed previously about this format, the header
-shows the name of the trace and points to the options. The
-"FUNCTION" is a misnomer since here it represents the wake ups
-and context switches.
-
-The sched_switch file only lists the wake ups (represented with
-'+') and context switches ('==>') with the previous task or
-current task first followed by the next task or task waking up.
-The format for both of these is PID:KERNEL-PRIO:TASK-STATE.
-Remember that the KERNEL-PRIO is the inverse of the actual
-priority with zero (0) being the highest priority and the nice
-values starting at 100 (nice -20). Below is a quick chart to map
-the kernel priority to user land priorities.
-
-   Kernel Space                     User Space
- ===============================================================
-   0(high) to  98(low)     user RT priority 99(high) to 1(low)
-                           with SCHED_RR or SCHED_FIFO
- ---------------------------------------------------------------
-  99                       sched_priority is not used in scheduling
-                           decisions(it must be specified as 0)
- ---------------------------------------------------------------
- 100(high) to 139(low)     user nice -20(high) to 19(low)
- ---------------------------------------------------------------
- 140                       idle task priority
- ---------------------------------------------------------------
-
-The task states are:
-
- R - running : wants to run, may not actually be running
- S - sleep   : process is waiting to be woken up (handles signals)
- D - disk sleep (uninterruptible sleep) : process must be woken up
-                                       (ignores signals)
- T - stopped : process suspended
- t - traced  : process is being traced (with something like gdb)
- Z - zombie  : process waiting to be cleaned up
- X - unknown
-
+  overwrite - This controls what happens when the trace buffer is
+              full. If "1" (default), the oldest events are
+              discarded and overwritten. If "0", then the newest
+              events are discarded.
 
 ftrace_enabled
 --------------
@@ -607,10 +502,10 @@ an example:
  # echo irqsoff > current_tracer
  # echo latency-format > trace_options
  # echo 0 > tracing_max_latency
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
  # ls -ltr
  [...]
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
  # cat trace
 # tracer: irqsoff
 #
@@ -715,10 +610,10 @@ is much like the irqsoff tracer.
  # echo preemptoff > current_tracer
  # echo latency-format > trace_options
  # echo 0 > tracing_max_latency
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
  # ls -ltr
  [...]
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
  # cat trace
 # tracer: preemptoff
 #
@@ -863,10 +758,10 @@ tracers.
  # echo preemptirqsoff > current_tracer
  # echo latency-format > trace_options
  # echo 0 > tracing_max_latency
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
  # ls -ltr
  [...]
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
  # cat trace
 # tracer: preemptirqsoff
 #
@@ -1026,9 +921,9 @@ Instead of performing an 'ls', we will run 'sleep 1' under
  # echo wakeup > current_tracer
  # echo latency-format > trace_options
  # echo 0 > tracing_max_latency
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
  # chrt -f 5 sleep 1
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
  # cat trace
 # tracer: wakeup
 #
@@ -1140,9 +1035,9 @@ ftrace_enabled is set; otherwise this tracer is a nop.
 
  # sysctl kernel.ftrace_enabled=1
  # echo function > current_tracer
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
  # usleep 1
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
  # cat trace
 # tracer: function
 #
@@ -1180,7 +1075,7 @@ int trace_fd;
 [...]
 int main(int argc, char *argv[]) {
        [...]
-       trace_fd = open(tracing_file("tracing_enabled"), O_WRONLY);
+       trace_fd = open(tracing_file("tracing_on"), O_WRONLY);
        [...]
        if (condition_hit()) {
                write(trace_fd, "0", 1);
@@ -1631,9 +1526,9 @@ If I am only interested in sys_nanosleep and hrtimer_interrupt:
  # echo sys_nanosleep hrtimer_interrupt \
                > set_ftrace_filter
  # echo function > current_tracer
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
  # usleep 1
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
  # cat trace
 # tracer: ftrace
 #
@@ -1879,9 +1774,9 @@ different. The trace is live.
  # echo function > current_tracer
  # cat trace_pipe > /tmp/trace.out &
 [1] 4153
- # echo 1 > tracing_enabled
+ # echo 1 > tracing_on
  # usleep 1
- # echo 0 > tracing_enabled
+ # echo 0 > tracing_on
  # cat trace
 # tracer: function
 #
index 5f77d94598dd577aca9a9f36c9cdee13ca86fec6..6d27ab8d6e9fcc5b00c42f5a9bc6eee6f2a36086 100644 (file)
@@ -42,11 +42,25 @@ Synopsis of kprobe_events
   +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
   NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
   FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
-                 (u8/u16/u32/u64/s8/s16/s32/s64) and string are supported.
+                 (u8/u16/u32/u64/s8/s16/s32/s64), "string" and bitfield
+                 are supported.
 
   (*) only for return probe.
   (**) this is useful for fetching a field of data structures.
 
+Types
+-----
+Several types are supported for fetch-args. Kprobe tracer will access memory
+by given type. Prefix 's' and 'u' means those types are signed and unsigned
+respectively. Traced arguments are shown in decimal (signed) or hex (unsigned).
+String type is a special type, which fetches a "null-terminated" string from
+kernel space. This means it will fail and store NULL if the string container
+has been paged out.
+Bitfield is another special type, which takes 3 parameters, bit-width, bit-
+offset, and container-size (usually 32). The syntax is;
+
+ b<bit-width>@<bit-offset>/<container-size>
+
 
 Per-Probe Event Filtering
 -------------------------
index 6f99e1260db835105b4c8624898e9b8a054acf70..f1bc3dc6b3699de3c072bfce91b3ec25417e41cb 100644 (file)
@@ -1010,6 +1010,15 @@ L:       linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-s5p*/
 
+ARM/SAMSUNG MOBILE MACHINE SUPPORT
+M:     Kyungmin Park <kyungmin.park@samsung.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-s5pv210/mach-aquila.c
+F:     arch/arm/mach-s5pv210/mach-goni.c
+F:     arch/arm/mach-exynos4/mach-universal_c210.c
+F:     arch/arm/mach-exynos4/mach-nuri.c
+
 ARM/SAMSUNG S5P SERIES FIMC SUPPORT
 M:     Kyungmin Park <kyungmin.park@samsung.com>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
@@ -1467,6 +1476,7 @@ F:        include/net/bluetooth/
 
 BONDING DRIVER
 M:     Jay Vosburgh <fubar@us.ibm.com>
+M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
 S:     Supported
@@ -1692,6 +1702,13 @@ M:       Andy Whitcroft <apw@canonical.com>
 S:     Supported
 F:     scripts/checkpatch.pl
 
+CHINESE DOCUMENTATION
+M:     Harry Wei <harryxiyou@gmail.com>
+L:     xiyoulinuxkernelgroup@googlegroups.com
+L:     linux-kernel@zh-kernel.org (moderated for non-subscribers)
+S:     Maintained
+F:     Documentation/zh_CN/
+
 CISCO VIC ETHERNET NIC DRIVER
 M:     Vasanthy Kolluri <vkolluri@cisco.com>
 M:     Roopa Prabhu <roprabhu@cisco.com>
@@ -2026,7 +2043,7 @@ F:        Documentation/scsi/dc395x.txt
 F:     drivers/scsi/dc395x.*
 
 DCCP PROTOCOL
-M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+M:     Gerrit Renker <gerrit@erg.abdn.ac.uk>
 L:     dccp@vger.kernel.org
 W:     http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
 S:     Maintained
@@ -3512,7 +3529,7 @@ F:        drivers/hwmon/jc42.c
 F:     Documentation/hwmon/jc42
 
 JFS FILESYSTEM
-M:     Dave Kleikamp <shaggy@linux.vnet.ibm.com>
+M:     Dave Kleikamp <shaggy@kernel.org>
 L:     jfs-discussion@lists.sourceforge.net
 W:     http://jfs.sourceforge.net/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
@@ -4275,10 +4292,7 @@ S:       Maintained
 F:     net/sched/sch_netem.c
 
 NETERION 10GbE DRIVERS (s2io/vxge)
-M:     Ramkrishna Vepa <ramkrishna.vepa@exar.com>
-M:     Sivakumar Subramani <sivakumar.subramani@exar.com>
-M:     Sreenivasa Honnur <sreenivasa.honnur@exar.com>
-M:     Jon Mason <jon.mason@exar.com>
+M:     Jon Mason <jdmason@kudzu.us>
 L:     netdev@vger.kernel.org
 W:     http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
 W:     http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
@@ -5164,6 +5178,7 @@ F:        drivers/char/random.c
 
 RAPIDIO SUBSYSTEM
 M:     Matt Porter <mporter@kernel.crashing.org>
+M:     Alexandre Bounine <alexandre.bounine@idt.com>
 S:     Maintained
 F:     drivers/rapidio/
 
@@ -5266,7 +5281,7 @@ S:        Maintained
 F:     drivers/net/wireless/rtl818x/rtl8180/
 
 RTL8187 WIRELESS DRIVER
-M:     Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M:     Herton Ronaldo Krzesinski <herton@canonical.com>
 M:     Hin-Tak Leung <htl10@users.sourceforge.net>
 M:     Larry Finger <Larry.Finger@lwfinger.net>
 L:     linux-wireless@vger.kernel.org
@@ -6104,7 +6119,7 @@ S:        Maintained
 F:     security/tomoyo/
 
 TOPSTAR LAPTOP EXTRAS DRIVER
-M:     Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M:     Herton Ronaldo Krzesinski <herton@canonical.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/topstar-laptop.c
index 26d7d824db51ce8b78ac035820935448f5be6b2c..d6592b63c8cb689dabdec1a01f4d69ce938a5d00 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 38
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
index 47f63d480141575adbb11a0ad2fa82e2d5aee22e..cc31bec2e3163378a1084a217b78b32fbe3c854b 100644 (file)
@@ -11,6 +11,7 @@ config ALPHA
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_PROBE
        select AUTO_IRQ_AFFINITY if SMP
+       select GENERIC_HARDIRQS_NO_DEPRECATED
        help
          The Alpha is a 64-bit general-purpose processor designed and
          marketed by the Digital Equipment Corporation of blessed memory,
index 945de222ab91a3409836ce070a4bcd9f525061cb..e8a761aee088a9bc5a8d2ce62d6bf1fde2183a92 100644 (file)
@@ -29,7 +29,7 @@
        :       "r" (uaddr), "r"(oparg)                         \
        :       "memory")
 
-static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
@@ -81,21 +81,23 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
-       int prev, cmp;
+       int ret = 0, cmp;
+       u32 prev;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        __asm__ __volatile__ (
                __ASM_SMP_MB
-       "1:     ldl_l   %0,0(%2)\n"
-       "       cmpeq   %0,%3,%1\n"
-       "       beq     %1,3f\n"
-       "       mov     %4,%1\n"
-       "2:     stl_c   %1,0(%2)\n"
-       "       beq     %1,4f\n"
+       "1:     ldl_l   %1,0(%3)\n"
+       "       cmpeq   %1,%4,%2\n"
+       "       beq     %2,3f\n"
+       "       mov     %5,%2\n"
+       "2:     stl_c   %2,0(%3)\n"
+       "       beq     %2,4f\n"
        "3:     .subsection 2\n"
        "4:     br      1b\n"
        "       .previous\n"
@@ -105,11 +107,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
        "       .long   2b-.\n"
        "       lda     $31,3b-2b(%0)\n"
        "       .previous\n"
-       :       "=&r"(prev), "=&r"(cmp)
+       :       "+r"(ret), "=&r"(prev), "=&r"(cmp)
        :       "r"(uaddr), "r"((long)oldval), "r"(newval)
        :       "memory");
 
-       return prev;
+       *uval = prev;
+       return ret;
 }
 
 #endif /* __KERNEL__ */
index 1570c0b5433649e4bafeb6601bfa77aeee8d6a44..a83bbea62c67481442cb0bd04ba033b7ef95b848 100644 (file)
 #ifdef __KERNEL__
 
 #include <linux/compiler.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
 
-struct rwsem_waiter;
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
-       long                    count;
 #define RWSEM_UNLOCKED_VALUE           0x0000000000000000L
 #define RWSEM_ACTIVE_BIAS              0x0000000000000001L
 #define RWSEM_ACTIVE_MASK              0x00000000ffffffffL
 #define RWSEM_WAITING_BIAS             (-0x0000000100000000L)
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-       spinlock_t              wait_lock;
-       struct list_head        wait_list;
-};
-
-#define __RWSEM_INITIALIZER(name) \
-       { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
-       LIST_HEAD_INIT((name).wait_list) }
-
-#define DECLARE_RWSEM(name) \
-       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
-       sem->count = RWSEM_UNLOCKED_VALUE;
-       spin_lock_init(&sem->wait_lock);
-       INIT_LIST_HEAD(&sem->wait_list);
-}
 
 static inline void __down_read(struct rw_semaphore *sem)
 {
@@ -250,10 +219,5 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
 #endif
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-       return (sem->count != 0);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ALPHA_RWSEM_H */
index 9ab234f48dd899c2ad0f9c9a34ecbc5bc8ef449b..a19d600822991c847eb65e0d943e4b8ff9dd82e1 100644 (file)
@@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
 
 int irq_select_affinity(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc[irq];
+       struct irq_data *data = irq_get_irq_data(irq);
+       struct irq_chip *chip;
        static int last_cpu;
        int cpu = last_cpu + 1;
 
-       if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq])
+       if (!data)
+               return 1;
+       chip = irq_data_get_irq_chip(data);
+
+       if (!chip->irq_set_affinity || irq_user_affinity[irq])
                return 1;
 
        while (!cpu_possible(cpu) ||
@@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
                cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
        last_cpu = cpu;
 
-       cpumask_copy(desc->affinity, cpumask_of(cpu));
-       get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu));
+       cpumask_copy(data->affinity, cpumask_of(cpu));
+       chip->irq_set_affinity(data, cpumask_of(cpu), false);
        return 0;
 }
 #endif /* CONFIG_SMP */
index 2d0679b609393ce8f0af8d10cbadc2ead2757669..411ca11d0a18172ba63523acc8170d337e7dadd3 100644 (file)
@@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
 void __init
 init_rtc_irq(void)
 {
-       struct irq_desc *desc = irq_to_desc(RTC_IRQ);
-
-       if (desc) {
-               desc->status |= IRQ_DISABLED;
-               set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
-                       handle_simple_irq, "RTC");
-               setup_irq(RTC_IRQ, &timer_irqaction);
-       }
+       set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
+                                     handle_simple_irq, "RTC");
+       setup_irq(RTC_IRQ, &timer_irqaction);
 }
 
 /* Dummy irqactions.  */
index 956ea0ed169428c5608389f36aae73a2eedfc012..c7cc9813e45fe002c2e39d0c7a4fe6312c8fe3ce 100644 (file)
@@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
 }
 
 inline void
-i8259a_enable_irq(unsigned int irq)
+i8259a_enable_irq(struct irq_data *d)
 {
        spin_lock(&i8259_irq_lock);
-       i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+       i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
        spin_unlock(&i8259_irq_lock);
 }
 
@@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
 }
 
 void
-i8259a_disable_irq(unsigned int irq)
+i8259a_disable_irq(struct irq_data *d)
 {
        spin_lock(&i8259_irq_lock);
-       __i8259a_disable_irq(irq);
+       __i8259a_disable_irq(d->irq);
        spin_unlock(&i8259_irq_lock);
 }
 
 void
-i8259a_mask_and_ack_irq(unsigned int irq)
+i8259a_mask_and_ack_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        spin_lock(&i8259_irq_lock);
        __i8259a_disable_irq(irq);
 
@@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
 
 struct irq_chip i8259a_irq_type = {
        .name           = "XT-PIC",
-       .unmask         = i8259a_enable_irq,
-       .mask           = i8259a_disable_irq,
-       .mask_ack       = i8259a_mask_and_ack_irq,
+       .irq_unmask     = i8259a_enable_irq,
+       .irq_mask       = i8259a_disable_irq,
+       .irq_mask_ack   = i8259a_mask_and_ack_irq,
 };
 
 void __init
index b63ccd7386f18230293c036acddab148f22b015a..d507a234b05da8ec6b3eefe2b07d140ac19a5e1a 100644 (file)
@@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
 
 extern void common_init_isa_dma(void);
 
-extern void i8259a_enable_irq(unsigned int);
-extern void i8259a_disable_irq(unsigned int);
-extern void i8259a_mask_and_ack_irq(unsigned int);
-extern unsigned int i8259a_startup_irq(unsigned int);
-extern void i8259a_end_irq(unsigned int);
+extern void i8259a_enable_irq(struct irq_data *d);
+extern void i8259a_disable_irq(struct irq_data *d);
+extern void i8259a_mask_and_ack_irq(struct irq_data *d);
 extern struct irq_chip i8259a_irq_type;
 extern void init_i8259a_irqs(void);
 
index 2863458c853e45f7e91c22d60b1f0f4cd46dbff6..b30227fa7f5f6f5e9bc2ddfeaad8942a6bc02cc7 100644 (file)
@@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-pyxis_enable_irq(unsigned int irq)
+pyxis_enable_irq(struct irq_data *d)
 {
-       pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+       pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-pyxis_disable_irq(unsigned int irq)
+pyxis_disable_irq(struct irq_data *d)
 {
-       pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+       pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static void
-pyxis_mask_and_ack_irq(unsigned int irq)
+pyxis_mask_and_ack_irq(struct irq_data *d)
 {
-       unsigned long bit = 1UL << (irq - 16);
+       unsigned long bit = 1UL << (d->irq - 16);
        unsigned long mask = cached_irq_mask &= ~bit;
 
        /* Disable the interrupt.  */
@@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip pyxis_irq_type = {
        .name           = "PYXIS",
-       .mask_ack       = pyxis_mask_and_ack_irq,
-       .mask           = pyxis_disable_irq,
-       .unmask         = pyxis_enable_irq,
+       .irq_mask_ack   = pyxis_mask_and_ack_irq,
+       .irq_mask       = pyxis_disable_irq,
+       .irq_unmask     = pyxis_enable_irq,
 };
 
 void 
@@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
                if ((ignore_mask >> i) & 1)
                        continue;
                set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
-               irq_to_desc(i)->status |= IRQ_LEVEL;
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        setup_irq(16+7, &isa_cascade_irqaction);
index 0e57e828b41370498647460dff1a31de564b8ddb..82a47bba41c4940a8c309a932819232882d81266 100644 (file)
 DEFINE_SPINLOCK(srm_irq_lock);
 
 static inline void
-srm_enable_irq(unsigned int irq)
+srm_enable_irq(struct irq_data *d)
 {
        spin_lock(&srm_irq_lock);
-       cserve_ena(irq - 16);
+       cserve_ena(d->irq - 16);
        spin_unlock(&srm_irq_lock);
 }
 
 static void
-srm_disable_irq(unsigned int irq)
+srm_disable_irq(struct irq_data *d)
 {
        spin_lock(&srm_irq_lock);
-       cserve_dis(irq - 16);
+       cserve_dis(d->irq - 16);
        spin_unlock(&srm_irq_lock);
 }
 
 /* Handle interrupts from the SRM, assuming no additional weirdness.  */
 static struct irq_chip srm_irq_type = {
        .name           = "SRM",
-       .unmask         = srm_enable_irq,
-       .mask           = srm_disable_irq,
-       .mask_ack       = srm_disable_irq,
+       .irq_unmask     = srm_enable_irq,
+       .irq_mask       = srm_disable_irq,
+       .irq_mask_ack   = srm_disable_irq,
 };
 
 void __init
@@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
                if (i < 64 && ((ignore_mask >> i) & 1))
                        continue;
                set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
-               irq_to_desc(i)->status |= IRQ_LEVEL;
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
index fe698b5045e97d8863e921d4fb0069069cf8be0a..376f2213079190f65196b0e3f6554f0f55834183 100644 (file)
@@ -230,44 +230,24 @@ linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_st
        return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0;
 }
 
-static int
-do_osf_statfs(struct path *path, struct osf_statfs __user *buffer,
-             unsigned long bufsiz)
+SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
+               struct osf_statfs __user *, buffer, unsigned long, bufsiz)
 {
        struct kstatfs linux_stat;
-       int error = vfs_statfs(path, &linux_stat);
+       int error = user_statfs(pathname, &linux_stat);
        if (!error)
                error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
        return error;   
 }
 
-SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
-               struct osf_statfs __user *, buffer, unsigned long, bufsiz)
-{
-       struct path path;
-       int retval;
-
-       retval = user_path(pathname, &path);
-       if (!retval) {
-               retval = do_osf_statfs(&path, buffer, bufsiz);
-               path_put(&path);
-       }
-       return retval;
-}
-
 SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd,
                struct osf_statfs __user *, buffer, unsigned long, bufsiz)
 {
-       struct file *file;
-       int retval;
-
-       retval = -EBADF;
-       file = fget(fd);
-       if (file) {
-               retval = do_osf_statfs(&file->f_path, buffer, bufsiz);
-               fput(file);
-       }
-       return retval;
+       struct kstatfs linux_stat;
+       int error = fd_statfs(fd, &linux_stat);
+       if (!error)
+               error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
+       return error;
 }
 
 /*
index 7bef61768236040f11d272e0611661d40535e35f..88d95e872f55f9389ede1778f39952fff141d1aa 100644 (file)
@@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-alcor_enable_irq(unsigned int irq)
+alcor_enable_irq(struct irq_data *d)
 {
-       alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+       alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-alcor_disable_irq(unsigned int irq)
+alcor_disable_irq(struct irq_data *d)
 {
-       alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+       alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static void
-alcor_mask_and_ack_irq(unsigned int irq)
+alcor_mask_and_ack_irq(struct irq_data *d)
 {
-       alcor_disable_irq(irq);
+       alcor_disable_irq(d);
 
        /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
-       *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb();
+       *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
        *(vuip)GRU_INT_CLEAR = 0; mb();
 }
 
 static void
-alcor_isa_mask_and_ack_irq(unsigned int irq)
+alcor_isa_mask_and_ack_irq(struct irq_data *d)
 {
-       i8259a_mask_and_ack_irq(irq);
+       i8259a_mask_and_ack_irq(d);
 
        /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
        *(vuip)GRU_INT_CLEAR = 0x80000000; mb();
@@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip alcor_irq_type = {
        .name           = "ALCOR",
-       .unmask         = alcor_enable_irq,
-       .mask           = alcor_disable_irq,
-       .mask_ack       = alcor_mask_and_ack_irq,
+       .irq_unmask     = alcor_enable_irq,
+       .irq_mask       = alcor_disable_irq,
+       .irq_mask_ack   = alcor_mask_and_ack_irq,
 };
 
 static void
@@ -126,9 +126,9 @@ alcor_init_irq(void)
                if (i >= 16+20 && i <= 16+30)
                        continue;
                set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
-               irq_to_desc(i)->status |= IRQ_LEVEL;
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
-       i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
+       i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
 
        init_i8259a_irqs();
        common_init_isa_dma();
index b0c916493aeaad3925fb89a10e0a44b5904c28a7..57eb6307bc2759d43a07c917f7e50594c5b514c5 100644 (file)
@@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
 }
 
 static inline void
-cabriolet_enable_irq(unsigned int irq)
+cabriolet_enable_irq(struct irq_data *d)
 {
-       cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq));
+       cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
 }
 
 static void
-cabriolet_disable_irq(unsigned int irq)
+cabriolet_disable_irq(struct irq_data *d)
 {
-       cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
+       cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
 }
 
 static struct irq_chip cabriolet_irq_type = {
        .name           = "CABRIOLET",
-       .unmask         = cabriolet_enable_irq,
-       .mask           = cabriolet_disable_irq,
-       .mask_ack       = cabriolet_disable_irq,
+       .irq_unmask     = cabriolet_enable_irq,
+       .irq_mask       = cabriolet_disable_irq,
+       .irq_mask_ack   = cabriolet_disable_irq,
 };
 
 static void 
@@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
                for (i = 16; i < 35; ++i) {
                        set_irq_chip_and_handler(i, &cabriolet_irq_type,
                                handle_level_irq);
-                       irq_to_desc(i)->status |= IRQ_LEVEL;
+                       irq_set_status_flags(i, IRQ_LEVEL);
                }
        }
 
index edad5f759ccd12b37067456360a582e12604d0c6..481df4ecb651258ed280d062cf9c187261874857 100644 (file)
@@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
 }
 
 static void
-dp264_enable_irq(unsigned int irq)
+dp264_enable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask |= 1UL << irq;
+       cached_irq_mask |= 1UL << d->irq;
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
 
 static void
-dp264_disable_irq(unsigned int irq)
+dp264_disable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask &= ~(1UL << irq);
+       cached_irq_mask &= ~(1UL << d->irq);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
 
 static void
-clipper_enable_irq(unsigned int irq)
+clipper_enable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask |= 1UL << (irq - 16);
+       cached_irq_mask |= 1UL << (d->irq - 16);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
 
 static void
-clipper_disable_irq(unsigned int irq)
+clipper_disable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask &= ~(1UL << (irq - 16));
+       cached_irq_mask &= ~(1UL << (d->irq - 16));
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
@@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 }
 
 static int
-dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
-{ 
+dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
+                  bool force)
+{
        spin_lock(&dp264_irq_lock);
-       cpu_set_irq_affinity(irq, *affinity);
+       cpu_set_irq_affinity(d->irq, *affinity);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 
@@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
 }
 
 static int
-clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
-{ 
+clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
+                    bool force)
+{
        spin_lock(&dp264_irq_lock);
-       cpu_set_irq_affinity(irq - 16, *affinity);
+       cpu_set_irq_affinity(d->irq - 16, *affinity);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 
@@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
 }
 
 static struct irq_chip dp264_irq_type = {
-       .name           = "DP264",
-       .unmask         = dp264_enable_irq,
-       .mask           = dp264_disable_irq,
-       .mask_ack       = dp264_disable_irq,
-       .set_affinity   = dp264_set_affinity,
+       .name                   = "DP264",
+       .irq_unmask             = dp264_enable_irq,
+       .irq_mask               = dp264_disable_irq,
+       .irq_mask_ack           = dp264_disable_irq,
+       .irq_set_affinity       = dp264_set_affinity,
 };
 
 static struct irq_chip clipper_irq_type = {
-       .name           = "CLIPPER",
-       .unmask         = clipper_enable_irq,
-       .mask           = clipper_disable_irq,
-       .mask_ack       = clipper_disable_irq,
-       .set_affinity   = clipper_set_affinity,
+       .name                   = "CLIPPER",
+       .irq_unmask             = clipper_enable_irq,
+       .irq_mask               = clipper_disable_irq,
+       .irq_mask_ack           = clipper_disable_irq,
+       .irq_set_affinity       = clipper_set_affinity,
 };
 
 static void
@@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
 {
        long i;
        for (i = imin; i <= imax; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
index ae5f29d127b06263c461a38dd87769053fc728c7..402e908ffb3e130cf39d2ecba458b5c94504aaa2 100644 (file)
@@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
 }
 
 static inline void
-eb64p_enable_irq(unsigned int irq)
+eb64p_enable_irq(struct irq_data *d)
 {
-       eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+       eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
 }
 
 static void
-eb64p_disable_irq(unsigned int irq)
+eb64p_disable_irq(struct irq_data *d)
 {
-       eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
+       eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
 }
 
 static struct irq_chip eb64p_irq_type = {
        .name           = "EB64P",
-       .unmask         = eb64p_enable_irq,
-       .mask           = eb64p_disable_irq,
-       .mask_ack       = eb64p_disable_irq,
+       .irq_unmask     = eb64p_enable_irq,
+       .irq_mask       = eb64p_disable_irq,
+       .irq_mask_ack   = eb64p_disable_irq,
 };
 
 static void 
@@ -118,9 +118,9 @@ eb64p_init_irq(void)
        init_i8259a_irqs();
 
        for (i = 16; i < 32; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
-       }               
+               irq_set_status_flags(i, IRQ_LEVEL);
+       }
 
        common_init_isa_dma();
        setup_irq(16+5, &isa_cascade_irqaction);
index 1121bc5c6c6cf3908ba493996a8b10201f7a32b7..0b44a54c1522ad87caad7c4d90853e4a37ebb7a5 100644 (file)
@@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
 }
 
 static inline void
-eiger_enable_irq(unsigned int irq)
+eiger_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
        eiger_update_irq_hw(irq, mask);
 }
 
 static void
-eiger_disable_irq(unsigned int irq)
+eiger_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
        eiger_update_irq_hw(irq, mask);
@@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
 
 static struct irq_chip eiger_irq_type = {
        .name           = "EIGER",
-       .unmask         = eiger_enable_irq,
-       .mask           = eiger_disable_irq,
-       .mask_ack       = eiger_disable_irq,
+       .irq_unmask     = eiger_enable_irq,
+       .irq_mask       = eiger_disable_irq,
+       .irq_mask_ack   = eiger_disable_irq,
 };
 
 static void
@@ -136,8 +138,8 @@ eiger_init_irq(void)
        init_i8259a_irqs();
 
        for (i = 16; i < 128; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
index 34f55e03d331aae677050df449e37f6cdcfef235..00341b75c8b25d81b0e203c8771b40bb1cfa6f14 100644 (file)
  */
 
 static void
-jensen_local_enable(unsigned int irq)
+jensen_local_enable(struct irq_data *d)
 {
        /* the parport is really hw IRQ 1, silly Jensen.  */
-       if (irq == 7)
-               i8259a_enable_irq(1);
+       if (d->irq == 7)
+               i8259a_enable_irq(d);
 }
 
 static void
-jensen_local_disable(unsigned int irq)
+jensen_local_disable(struct irq_data *d)
 {
        /* the parport is really hw IRQ 1, silly Jensen.  */
-       if (irq == 7)
-               i8259a_disable_irq(1);
+       if (d->irq == 7)
+               i8259a_disable_irq(d);
 }
 
 static void
-jensen_local_mask_ack(unsigned int irq)
+jensen_local_mask_ack(struct irq_data *d)
 {
        /* the parport is really hw IRQ 1, silly Jensen.  */
-       if (irq == 7)
-               i8259a_mask_and_ack_irq(1);
+       if (d->irq == 7)
+               i8259a_mask_and_ack_irq(d);
 }
 
 static struct irq_chip jensen_local_irq_type = {
        .name           = "LOCAL",
-       .unmask         = jensen_local_enable,
-       .mask           = jensen_local_disable,
-       .mask_ack       = jensen_local_mask_ack,
+       .irq_unmask     = jensen_local_enable,
+       .irq_mask       = jensen_local_disable,
+       .irq_mask_ack   = jensen_local_mask_ack,
 };
 
 static void 
index 2bfc9f1b1ddcbd24bca2a3f3b524ffaf0a3f469a..e61910734e413f997e31fcdb4db52876f983bfd4 100644 (file)
@@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
 }
 
 static void
-io7_enable_irq(unsigned int irq)
+io7_enable_irq(struct irq_data *d)
 {
        volatile unsigned long *ctl;
+       unsigned int irq = d->irq;
        struct io7 *io7;
 
        ctl = io7_get_irq_ctl(irq, &io7);
@@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
                       __func__, irq);
                return;
        }
-               
+
        spin_lock(&io7->irq_lock);
        *ctl |= 1UL << 24;
        mb();
@@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
 }
 
 static void
-io7_disable_irq(unsigned int irq)
+io7_disable_irq(struct irq_data *d)
 {
        volatile unsigned long *ctl;
+       unsigned int irq = d->irq;
        struct io7 *io7;
 
        ctl = io7_get_irq_ctl(irq, &io7);
@@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
                       __func__, irq);
                return;
        }
-               
+
        spin_lock(&io7->irq_lock);
        *ctl &= ~(1UL << 24);
        mb();
@@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
 }
 
 static void
-marvel_irq_noop(unsigned int irq) 
-{ 
-       return; 
-}
-
-static unsigned int
-marvel_irq_noop_return(unsigned int irq) 
-{ 
-       return 0; 
+marvel_irq_noop(struct irq_data *d)
+{
+       return;
 }
 
 static struct irq_chip marvel_legacy_irq_type = {
        .name           = "LEGACY",
-       .mask           = marvel_irq_noop,
-       .unmask         = marvel_irq_noop,
+       .irq_mask       = marvel_irq_noop,
+       .irq_unmask     = marvel_irq_noop,
 };
 
 static struct irq_chip io7_lsi_irq_type = {
        .name           = "LSI",
-       .unmask         = io7_enable_irq,
-       .mask           = io7_disable_irq,
-       .mask_ack       = io7_disable_irq,
+       .irq_unmask     = io7_enable_irq,
+       .irq_mask       = io7_disable_irq,
+       .irq_mask_ack   = io7_disable_irq,
 };
 
 static struct irq_chip io7_msi_irq_type = {
        .name           = "MSI",
-       .unmask         = io7_enable_irq,
-       .mask           = io7_disable_irq,
-       .ack            = marvel_irq_noop,
+       .irq_unmask     = io7_enable_irq,
+       .irq_mask       = io7_disable_irq,
+       .irq_ack        = marvel_irq_noop,
 };
 
 static void
@@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
 
        /* Set up the lsi irqs.  */
        for (i = 0; i < 128; ++i) {
-               irq_to_desc(base + i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        /* Disable the implemented irqs in hardware.  */
@@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
 
        /* Set up the msi irqs.  */
        for (i = 128; i < (128 + 512); ++i) {
-               irq_to_desc(base + i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        for (i = 0; i < 16; ++i)
index bcc1639e8efb64be789f8b7877fafd7346783870..cf7f43dd3147c49d7fbf922d4c59506b2bde325a 100644 (file)
@@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
 }
 
 static inline void
-mikasa_enable_irq(unsigned int irq)
+mikasa_enable_irq(struct irq_data *d)
 {
-       mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16));
+       mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
 }
 
 static void
-mikasa_disable_irq(unsigned int irq)
+mikasa_disable_irq(struct irq_data *d)
 {
-       mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
+       mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
 }
 
 static struct irq_chip mikasa_irq_type = {
        .name           = "MIKASA",
-       .unmask         = mikasa_enable_irq,
-       .mask           = mikasa_disable_irq,
-       .mask_ack       = mikasa_disable_irq,
+       .irq_unmask     = mikasa_enable_irq,
+       .irq_mask       = mikasa_disable_irq,
+       .irq_mask_ack   = mikasa_disable_irq,
 };
 
 static void 
@@ -98,8 +98,8 @@ mikasa_init_irq(void)
        mikasa_update_irq_hw(0);
 
        for (i = 16; i < 32; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index e88f4ae1260ef93c53373a045619bc0cc2ea891f..92bc188e94a981458f4380262cdf2042f4e12b06 100644 (file)
@@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
 }
 
 static void
-noritake_enable_irq(unsigned int irq)
+noritake_enable_irq(struct irq_data *d)
 {
-       noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16));
+       noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
 }
 
 static void
-noritake_disable_irq(unsigned int irq)
+noritake_disable_irq(struct irq_data *d)
 {
-       noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
+       noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
 }
 
 static struct irq_chip noritake_irq_type = {
        .name           = "NORITAKE",
-       .unmask         = noritake_enable_irq,
-       .mask           = noritake_disable_irq,
-       .mask_ack       = noritake_disable_irq,
+       .irq_unmask     = noritake_enable_irq,
+       .irq_mask       = noritake_disable_irq,
+       .irq_mask_ack   = noritake_disable_irq,
 };
 
 static void 
@@ -127,8 +127,8 @@ noritake_init_irq(void)
        outw(0, 0x54c);
 
        for (i = 16; i < 48; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index 6a51364dd1cc6bb4742d29465dcc18a4953bef5d..936d4140ed5fc68e6addf5a9ae1437a77b011f12 100644 (file)
@@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
   (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
 
 static inline void 
-rawhide_enable_irq(unsigned int irq)
+rawhide_enable_irq(struct irq_data *d)
 {
        unsigned int mask, hose;
+       unsigned int irq = d->irq;
 
        irq -= 16;
        hose = irq / 24;
@@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
 }
 
 static void 
-rawhide_disable_irq(unsigned int irq)
+rawhide_disable_irq(struct irq_data *d)
 {
        unsigned int mask, hose;
+       unsigned int irq = d->irq;
 
        irq -= 16;
        hose = irq / 24;
@@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
 }
 
 static void
-rawhide_mask_and_ack_irq(unsigned int irq)
+rawhide_mask_and_ack_irq(struct irq_data *d)
 {
        unsigned int mask, mask1, hose;
+       unsigned int irq = d->irq;
 
        irq -= 16;
        hose = irq / 24;
@@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip rawhide_irq_type = {
        .name           = "RAWHIDE",
-       .unmask         = rawhide_enable_irq,
-       .mask           = rawhide_disable_irq,
-       .mask_ack       = rawhide_mask_and_ack_irq,
+       .irq_unmask     = rawhide_enable_irq,
+       .irq_mask       = rawhide_disable_irq,
+       .irq_mask_ack   = rawhide_mask_and_ack_irq,
 };
 
 static void 
@@ -177,8 +180,8 @@ rawhide_init_irq(void)
        }
 
        for (i = 16; i < 128; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index 89e7e37ec84cc486328baf015bbe6c871cbeabac..cea22a62913b4d9a4ea18092e98bec29a87bf8cd 100644 (file)
@@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-rx164_enable_irq(unsigned int irq)
+rx164_enable_irq(struct irq_data *d)
 {
-       rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+       rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-rx164_disable_irq(unsigned int irq)
+rx164_disable_irq(struct irq_data *d)
 {
-       rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+       rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static struct irq_chip rx164_irq_type = {
        .name           = "RX164",
-       .unmask         = rx164_enable_irq,
-       .mask           = rx164_disable_irq,
-       .mask_ack       = rx164_disable_irq,
+       .irq_unmask     = rx164_enable_irq,
+       .irq_mask       = rx164_disable_irq,
+       .irq_mask_ack   = rx164_disable_irq,
 };
 
 static void 
@@ -99,8 +99,8 @@ rx164_init_irq(void)
 
        rx164_update_irq_hw(0);
        for (i = 16; i < 40; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index 5c4423d1b06c2f4c02e97f86b2e076001fd2e128..a349538aabc94a1e74397a4f15fffaec64e8c86d 100644 (file)
@@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
 /* GENERIC irq routines */
 
 static inline void
-sable_lynx_enable_irq(unsigned int irq)
+sable_lynx_enable_irq(struct irq_data *d)
 {
        unsigned long bit, mask;
 
-       bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+       bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
        spin_lock(&sable_lynx_irq_lock);
        mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
        sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
 }
 
 static void
-sable_lynx_disable_irq(unsigned int irq)
+sable_lynx_disable_irq(struct irq_data *d)
 {
        unsigned long bit, mask;
 
-       bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+       bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
        spin_lock(&sable_lynx_irq_lock);
        mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
        sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
 }
 
 static void
-sable_lynx_mask_and_ack_irq(unsigned int irq)
+sable_lynx_mask_and_ack_irq(struct irq_data *d)
 {
        unsigned long bit, mask;
 
-       bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+       bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
        spin_lock(&sable_lynx_irq_lock);
        mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
        sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip sable_lynx_irq_type = {
        .name           = "SABLE/LYNX",
-       .unmask         = sable_lynx_enable_irq,
-       .mask           = sable_lynx_disable_irq,
-       .mask_ack       = sable_lynx_mask_and_ack_irq,
+       .irq_unmask     = sable_lynx_enable_irq,
+       .irq_mask       = sable_lynx_disable_irq,
+       .irq_mask_ack   = sable_lynx_mask_and_ack_irq,
 };
 
 static void 
@@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
        long i;
 
        for (i = 0; i < nr_of_irqs; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &sable_lynx_irq_type,
                        handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        common_init_isa_dma();
index f8a1e8a862fb26c8810f090d35e6bc62f2ddc151..42a5331f13c4faa0f5c20e51099924fbb9ef27cf 100644 (file)
@@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
 }
 
 static inline void
-takara_enable_irq(unsigned int irq)
+takara_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
        takara_update_irq_hw(irq, mask);
 }
 
 static void
-takara_disable_irq(unsigned int irq)
+takara_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
        takara_update_irq_hw(irq, mask);
@@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
 
 static struct irq_chip takara_irq_type = {
        .name           = "TAKARA",
-       .unmask         = takara_enable_irq,
-       .mask           = takara_disable_irq,
-       .mask_ack       = takara_disable_irq,
+       .irq_unmask     = takara_enable_irq,
+       .irq_mask       = takara_disable_irq,
+       .irq_mask_ack   = takara_disable_irq,
 };
 
 static void
@@ -136,8 +138,8 @@ takara_init_irq(void)
                takara_update_irq_hw(i, -1);
 
        for (i = 16; i < 128; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        common_init_isa_dma();
index e02494bf5ef3541edae07756e622867e67f44729..8c13a0c77830c576a2f9030fedb3b44951f654de 100644 (file)
@@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-titan_enable_irq(unsigned int irq)
+titan_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        spin_lock(&titan_irq_lock);
        titan_cached_irq_mask |= 1UL << (irq - 16);
        titan_update_irq_hw(titan_cached_irq_mask);
@@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
 }
 
 static inline void
-titan_disable_irq(unsigned int irq)
+titan_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        spin_lock(&titan_irq_lock);
        titan_cached_irq_mask &= ~(1UL << (irq - 16));
        titan_update_irq_hw(titan_cached_irq_mask);
@@ -144,8 +146,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 }
 
 static int
-titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
+titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
+                      bool force)
 { 
+       unsigned int irq = d->irq;
        spin_lock(&titan_irq_lock);
        titan_cpu_set_irq_affinity(irq - 16, *affinity);
        titan_update_irq_hw(titan_cached_irq_mask);
@@ -175,17 +179,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
 {
        long i;
        for (i = imin; i <= imax; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
 static struct irq_chip titan_irq_type = {
-       .name           = "TITAN",
-       .unmask         = titan_enable_irq,
-       .mask           = titan_disable_irq,
-       .mask_ack       = titan_disable_irq,
-       .set_affinity   = titan_set_irq_affinity,
+       .name                   = "TITAN",
+       .irq_unmask             = titan_enable_irq,
+       .irq_mask               = titan_disable_irq,
+       .irq_mask_ack           = titan_disable_irq,
+       .irq_set_affinity       = titan_set_irq_affinity,
 };
 
 static irqreturn_t
index eec52594d410f7f52b1921c3c5250289edb6c977..ca60a387ef0a60c08f904cb416063145cd6263bf 100644 (file)
@@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
 }
 
 static void
-wildfire_enable_irq(unsigned int irq)
+wildfire_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        if (irq < 16)
-               i8259a_enable_irq(irq);
+               i8259a_enable_irq(d);
 
        spin_lock(&wildfire_irq_lock);
        set_bit(irq, &cached_irq_mask);
@@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
 }
 
 static void
-wildfire_disable_irq(unsigned int irq)
+wildfire_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        if (irq < 16)
-               i8259a_disable_irq(irq);
+               i8259a_disable_irq(d);
 
        spin_lock(&wildfire_irq_lock);
        clear_bit(irq, &cached_irq_mask);
@@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
 }
 
 static void
-wildfire_mask_and_ack_irq(unsigned int irq)
+wildfire_mask_and_ack_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        if (irq < 16)
-               i8259a_mask_and_ack_irq(irq);
+               i8259a_mask_and_ack_irq(d);
 
        spin_lock(&wildfire_irq_lock);
        clear_bit(irq, &cached_irq_mask);
@@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip wildfire_irq_type = {
        .name           = "WILDFIRE",
-       .unmask         = wildfire_enable_irq,
-       .mask           = wildfire_disable_irq,
-       .mask_ack       = wildfire_mask_and_ack_irq,
+       .irq_unmask     = wildfire_enable_irq,
+       .irq_mask       = wildfire_disable_irq,
+       .irq_mask_ack   = wildfire_mask_and_ack_irq,
 };
 
 static void __init
@@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
        for (i = 0; i < 16; ++i) {
                if (i == 2)
                        continue;
-               irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
                        handle_level_irq);
+               irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
        }
 
-       irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
        set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
                handle_level_irq);
+       irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
        for (i = 40; i < 64; ++i) {
-               irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
                        handle_level_irq);
+               irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
        }
 
-       setup_irq(32+irq_bias, &isa_enable);    
+       setup_irq(32+irq_bias, &isa_enable);
 }
 
 static void __init
index c1f3e7cb82a4945e6345c1e4be80cad88be36c53..a58e84f1a63b76cc3e96856d10736a1c03ba6872 100644 (file)
@@ -159,7 +159,7 @@ void read_persistent_clock(struct timespec *ts)
 
 /*
  * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
  */
 irqreturn_t timer_interrupt(int irq, void *dev)
 {
@@ -172,8 +172,6 @@ irqreturn_t timer_interrupt(int irq, void *dev)
        profile_tick(CPU_PROFILING);
 #endif
 
-       write_seqlock(&xtime_lock);
-
        /*
         * Calculate how many ticks have passed since the last update,
         * including any previous partial leftover.  Save any resulting
@@ -187,9 +185,7 @@ irqreturn_t timer_interrupt(int irq, void *dev)
        nticks = delta >> FIX_SHIFT;
 
        if (nticks)
-               do_timer(nticks);
-
-       write_sequnlock(&xtime_lock);
+               xtime_update(nticks);
 
        if (test_irq_work_pending()) {
                clear_irq_work_pending();
index 778655f0257a9f27f73d3c1a0fddd81eb28919f4..ea5ee4d067f34e99d40bc58a78e3a75c1e9e3df8 100644 (file)
@@ -6,6 +6,8 @@ config ARM_VIC
 
 config ARM_VIC_NR
        int
+       default 4 if ARCH_S5PV210
+       default 3 if ARCH_S5P6442 || ARCH_S5PC100
        default 2
        depends on ARM_VIC
        help
index b33fe7065b38694fa1d3047c6147a74348d4b732..199a6b6de7f48f7704e2cf3024cbdfcae6ad73d3 100644 (file)
@@ -35,7 +35,7 @@
        : "cc", "memory")
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();    /* implies preempt_disable() */
@@ -88,36 +88,35 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
-       int val;
+       int ret = 0;
+       u32 val;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
-       pagefault_disable();    /* implies preempt_disable() */
-
        __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
-       "1:     " T(ldr) "      %0, [%3]\n"
-       "       teq     %0, %1\n"
+       "1:     " T(ldr) "      %1, [%4]\n"
+       "       teq     %1, %2\n"
        "       it      eq      @ explicit IT needed for the 2b label\n"
-       "2:     " T(streq) "    %2, [%3]\n"
+       "2:     " T(streq) "    %3, [%4]\n"
        "3:\n"
        "       .pushsection __ex_table,\"a\"\n"
        "       .align  3\n"
        "       .long   1b, 4f, 2b, 4f\n"
        "       .popsection\n"
        "       .pushsection .fixup,\"ax\"\n"
-       "4:     mov     %0, %4\n"
+       "4:     mov     %0, %5\n"
        "       b       3b\n"
        "       .popsection"
-       : "=&r" (val)
+       : "+r" (ret), "=&r" (val)
        : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
        : "cc", "memory");
 
-       pagefault_enable();     /* subsumes preempt_enable() */
-
-       return val;
+       *uval = val;
+       return ret;
 }
 
 #endif /* !SMP */
index 3a0893a76a3b23abbb8ecaf4c4c9ce1a528f3bd4..bf13b814c1b8c574256d5a383956f9b1a6d1f822 100644 (file)
@@ -15,10 +15,6 @@ struct meminfo;
 struct sys_timer;
 
 struct machine_desc {
-       /*
-        * Note! The first two elements are used
-        * by assembler code in head.S, head-common.S
-        */
        unsigned int            nr;             /* architecture number  */
        const char              *name;          /* architecture name    */
        unsigned long           boot_params;    /* tagged list          */
index 9763be04f77ee9e15b8e759a1b392cb7f66b3b6f..22de005f159ce7599166d749fdf8cfdea0905512 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef _ASMARM_PGALLOC_H
 #define _ASMARM_PGALLOC_H
 
+#include <linux/pagemap.h>
+
 #include <asm/domain.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/processor.h>
index d600bd350704965577a63266afaab2772eb8b894..44b84fe6e1b0fdc544c6f306ed33384a33734fab 100644 (file)
@@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
 /*
  * One-time initialisation.
  */
-static void reset_ctrl_regs(void *unused)
+static void reset_ctrl_regs(void *info)
 {
-       int i;
+       int i, cpu = smp_processor_id();
+       u32 dbg_power;
+       cpumask_t *cpumask = info;
 
        /*
         * v7 debug contains save and restore registers so that debug state
@@ -849,6 +851,17 @@ static void reset_ctrl_regs(void *unused)
         * later on.
         */
        if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
+               /*
+                * Ensure sticky power-down is clear (i.e. debug logic is
+                * powered up).
+                */
+               asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
+               if ((dbg_power & 0x1) == 0) {
+                       pr_warning("CPU %d debug is powered down!\n", cpu);
+                       cpumask_or(cpumask, cpumask, cpumask_of(cpu));
+                       return;
+               }
+
                /*
                 * Unconditionally clear the lock by writing a value
                 * other than 0xC5ACCE55 to the access register.
@@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
 static int __init arch_hw_breakpoint_init(void)
 {
        u32 dscr;
+       cpumask_t cpumask = { CPU_BITS_NONE };
 
        debug_arch = get_debug_arch();
 
@@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void)
         * Reset the breakpoint resources. We assume that a halting
         * debugger will leave the world in a nice state for us.
         */
-       on_each_cpu(reset_ctrl_regs, NULL, 1);
+       on_each_cpu(reset_ctrl_regs, &cpumask, 1);
+       if (!cpumask_empty(&cpumask)) {
+               core_num_brps = 0;
+               core_num_reserved_brps = 0;
+               core_num_wrps = 0;
+               return 0;
+       }
 
        ARM_DBG_READ(c1, 0, dscr);
        if (dscr & ARM_DSCR_HDBGEN) {
index 19c6816db61ebe5bc92a7995c6ed76fa59bcd18c..b13e70f63d7121f2931b955b60d70530cda8967f 100644 (file)
@@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
                while (!(arch_ctrl.len & 0x1))
                        arch_ctrl.len >>= 1;
 
-               if (idx & 0x1)
-                       reg = encode_ctrl_reg(arch_ctrl);
-               else
+               if (num & 0x1)
                        reg = bp->attr.bp_addr;
+               else
+                       reg = encode_ctrl_reg(arch_ctrl);
        }
 
 put:
index 3d76bf2337347fb24b7b4b0783a8f4911ca3c518..1ff46cabc7efd4cd65c97ab0447895b3c21cf533 100644 (file)
@@ -107,9 +107,7 @@ void timer_tick(void)
 {
        profile_tick(CPU_PROFILING);
        do_leds();
-       write_seqlock(&xtime_lock);
-       do_timer(1);
-       write_sequnlock(&xtime_lock);
+       xtime_update(1);
 #ifndef CONFIG_SMP
        update_process_times(user_mode(get_irq_regs()));
 #endif
index 8fe283ccd1f3dc75aa64d20466649f1aa052156b..61fef9129c6a09ecc33284665146414efede2e66 100644 (file)
@@ -30,7 +30,7 @@ p720t_timer_interrupt(int irq, void *dev_id)
 {
        struct pt_regs *regs = get_irq_regs();
        do_leds();
-       do_timer(1);
+       xtime_update(1);
 #ifndef CONFIG_SMP
        update_process_times(user_mode(regs));
 #endif
index 343de73161fad0a72aca262717e07494b26cf461..4a68c2b1ec11f7f809b9f54eadf3f7f5595f2417 100644 (file)
@@ -132,7 +132,7 @@ out:
        return ret;
 }
 
-static int __init davinci_cpu_init(struct cpufreq_policy *policy)
+static int davinci_cpu_init(struct cpufreq_policy *policy)
 {
        int result = 0;
        struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
index 9eec63070e0c35b42c2fc55a6f7282635d99b737..beda8a4133a05a10fc469d5a4aa2fea05bd98c65 100644 (file)
@@ -480,8 +480,15 @@ static struct platform_device da850_mcasp_device = {
        .resource       = da850_mcasp_resources,
 };
 
+struct platform_device davinci_pcm_device = {
+       .name   = "davinci-pcm-audio",
+       .id     = -1,
+};
+
 void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
 {
+       platform_device_register(&davinci_pcm_device);
+
        /* DA830/OMAP-L137 has 3 instances of McASP */
        if (cpu_is_davinci_da830() && id == 1) {
                da830_mcasp1_device.dev.platform_data = pdata;
index d10298620e2c2fddac99c32a61557766fbf667db..3fa3e2867e19b8dfcbd3ca2275b0fbd194cb0693 100644 (file)
@@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
 
        spin_lock_irqsave(&ctlr->lock, flags);
 
-       gpio_reg_set_bit(&regs->enable, gpio);
+       gpio_reg_set_bit(regs->enable, gpio);
 
        spin_unlock_irqrestore(&ctlr->lock, flags);
 
@@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
 
        spin_lock_irqsave(&ctlr->lock, flags);
 
-       gpio_reg_clear_bit(&regs->enable, gpio);
+       gpio_reg_clear_bit(regs->enable, gpio);
 
        spin_unlock_irqrestore(&ctlr->lock, flags);
 }
@@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
 
        spin_lock_irqsave(&ctlr->lock, flags);
 
-       gpio_reg_set_bit(&regs->direction, gpio);
+       gpio_reg_set_bit(regs->direction, gpio);
 
        spin_unlock_irqrestore(&ctlr->lock, flags);
 
@@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
        spin_lock_irqsave(&ctlr->lock, flags);
 
        if (value)
-               gpio_reg_set_bit(&regs->data_out, gpio);
+               gpio_reg_set_bit(regs->data_out, gpio);
        else
-               gpio_reg_clear_bit(&regs->data_out, gpio);
+               gpio_reg_clear_bit(regs->data_out, gpio);
 
-       gpio_reg_clear_bit(&regs->direction, gpio);
+       gpio_reg_clear_bit(regs->direction, gpio);
 
        spin_unlock_irqrestore(&ctlr->lock, flags);
 
@@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
        unsigned gpio = chip->base + offset;
        int ret;
 
-       ret = gpio_reg_get_bit(&regs->data_in, gpio);
+       ret = gpio_reg_get_bit(regs->data_in, gpio);
 
        return ret ? 1 : 0;
 }
@@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip,
        spin_lock_irqsave(&ctlr->lock, flags);
 
        if (value)
-               gpio_reg_set_bit(&regs->data_out, gpio);
+               gpio_reg_set_bit(regs->data_out, gpio);
        else
-               gpio_reg_clear_bit(&regs->data_out, gpio);
+               gpio_reg_clear_bit(regs->data_out, gpio);
 
        spin_unlock_irqrestore(&ctlr->lock, flags);
 }
index 730c49d1ebd85970c19d8dfea0e12017cb146fc3..14a504887189db3ad441562ce6138dfb577ac492 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __MACH_CLKDEV_H
 #define __MACH_CLKDEV_H
 
+struct clk;
+
 static inline int __clk_get(struct clk *clk)
 {
        return 1;
index 337392c3f549deb5124326fa67f09dd657673bba..acb7ae5b0a25eb179056855f36b0b21fcd9ff466 100644 (file)
@@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
        dd = clk->dpll_data;
 
        /* DPLL divider must result in a valid jitter correction val */
-       fint = clk->parent->rate / (n + 1);
+       fint = clk->parent->rate / n;
        if (fint < DPLL_FINT_BAND1_MIN) {
 
                pr_debug("rejecting n=%d due to Fint failure, "
index 394413dc7deb59dbdb982cc40a0eb8be05dff8bc..24b88504df0f403227c3a0e20fb229f876649902 100644 (file)
@@ -193,10 +193,12 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
                omap_mbox_type_t irq)
 {
        struct omap_mbox2_priv *p = mbox->priv;
-       u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
-       l = mbox_read_reg(p->irqdisable);
-       l &= ~bit;
-       mbox_write_reg(l, p->irqdisable);
+       u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
+
+       if (!cpu_is_omap44xx())
+               bit = mbox_read_reg(p->irqdisable) & ~bit;
+
+       mbox_write_reg(bit, p->irqdisable);
 }
 
 static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
@@ -334,7 +336,7 @@ static struct omap_mbox mbox_iva_info = {
        .priv   = &omap2_mbox_iva_priv,
 };
 
-struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
+struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
 #endif
 
 #if defined(CONFIG_ARCH_OMAP4)
index 98148b6c36e9a33fedec64f1e9afc1a9796c4ce4..6c84659cf846373a7c2792f5f6ef16117c38702c 100644 (file)
@@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry(
        list_for_each_entry(e, &partition->muxmodes, node) {
                struct omap_mux *m = &e->mux;
 
-               (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
+               (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
                                          m, &omap_mux_dbg_signal_fops);
        }
 }
index 125f56591fb56bb135442f7dcf0eeb8c855e894b..a5a83b358ddd89724de71eace26e11d8289c8e65 100644 (file)
@@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
 
                }
 
-       (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
+       (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
                                   &enable_off_mode, &pm_dbg_option_fops);
-       (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
+       (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
                                   &sleep_while_idle, &pm_dbg_option_fops);
-       (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
+       (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
                                   &wakeup_timer_seconds, &pm_dbg_option_fops);
        (void) debugfs_create_file("wakeup_timer_milliseconds",
-                       S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
+                       S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
                        &pm_dbg_option_fops);
        pm_dbg_init_done = 1;
 
index 729a644ce8523fe14bf624b13f5f80ea294412d7..3300ff6e3cfe80a1d0261db142939b73da7d6d5b 100644 (file)
@@ -38,8 +38,8 @@
 #define OMAP4430_PRCM_MPU_CPU1_INST            0x0800
 
 /* PRCM_MPU clockdomain register offsets (from instance start) */
-#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS      0x0000
-#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS      0x0000
+#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS      0x0018
+#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS      0x0018
 
 
 /*
index c37e823266d352cde6e0914d9654e89dfb70f2cf..1a777e34d0c2b33088393d2551187223566d3b68 100644 (file)
@@ -282,6 +282,7 @@ error:
                dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
                        "interrupt handler. Smartreflex will"
                        "not function as desired\n", __func__);
+               kfree(name);
                kfree(sr_info);
                return ret;
 }
@@ -879,7 +880,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                ret = sr_late_init(sr_info);
                if (ret) {
                        pr_warning("%s: Error in SR late init\n", __func__);
-                       return ret;
+                       goto err_release_region;
                }
        }
 
@@ -890,17 +891,20 @@ static int __init omap_sr_probe(struct platform_device *pdev)
         * not try to create rest of the debugfs entries.
         */
        vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
-       if (!vdd_dbg_dir)
-               return -EINVAL;
+       if (!vdd_dbg_dir) {
+               ret = -EINVAL;
+               goto err_release_region;
+       }
 
        dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
        if (IS_ERR(dbg_dir)) {
                dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
                        __func__);
-               return PTR_ERR(dbg_dir);
+               ret = PTR_ERR(dbg_dir);
+               goto err_release_region;
        }
 
-       (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
+       (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
                                (void *)sr_info, &pm_sr_fops);
        (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
                        &sr_info->err_weight);
@@ -913,7 +917,8 @@ static int __init omap_sr_probe(struct platform_device *pdev)
        if (IS_ERR(nvalue_dir)) {
                dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
                        "for n-values\n", __func__);
-               return PTR_ERR(nvalue_dir);
+               ret = PTR_ERR(nvalue_dir);
+               goto err_release_region;
        }
 
        omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
@@ -922,24 +927,16 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                        " corresponding vdd vdd_%s. Cannot create debugfs"
                        "entries for n-values\n",
                        __func__, sr_info->voltdm->name);
-               return -ENODATA;
+               ret = -ENODATA;
+               goto err_release_region;
        }
 
        for (i = 0; i < sr_info->nvalue_count; i++) {
-               char *name;
-               char volt_name[32];
-
-               name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL);
-               if (!name) {
-                       dev_err(&pdev->dev, "%s: Unable to allocate memory"
-                               " for n-value directory name\n",  __func__);
-                       return -ENOMEM;
-               }
+               char name[NVALUE_NAME_LEN + 1];
 
-               strcpy(name, "volt_");
-               sprintf(volt_name, "%d", volt_data[i].volt_nominal);
-               strcat(name, volt_name);
-               (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
+               snprintf(name, sizeof(name), "volt_%d",
+                        volt_data[i].volt_nominal);
+               (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
                                &(sr_info->nvalue_table[i].nvalue));
        }
 
index 7b7c2683ae7bb15cad4a2d32b4eab681884a0864..0fc550e7e4825a04093a4e1980b1bd169f8cd893 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/mach/time.h>
 #include <plat/dmtimer.h>
 #include <asm/localtimer.h>
+#include <asm/sched_clock.h>
 
 #include "timer-gp.h"
 
@@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void)
 /*
  * clocksource
  */
+static DEFINE_CLOCK_DATA(cd);
 static struct omap_dm_timer *gpt_clocksource;
 static cycle_t clocksource_read_cycles(struct clocksource *cs)
 {
@@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static void notrace dmtimer_update_sched_clock(void)
+{
+       u32 cyc;
+
+       cyc = omap_dm_timer_read_counter(gpt_clocksource);
+
+       update_sched_clock(&cd, cyc, (u32)~0);
+}
+
 /* Setup free-running counter for clocksource */
 static void __init omap2_gp_clocksource_init(void)
 {
@@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void)
 
        omap_dm_timer_set_load_start(gpt, 1, 0);
 
+       init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
+
        if (clocksource_register_hz(&clocksource_gpt, tick_rate))
                printk(err2, clocksource_gpt.name);
 }
index fbc5b775f895bbdadef12e1933bcfbd1b631ddbf..b166b1d845d76cbc6b8039336bc955344f362f48 100644 (file)
@@ -347,6 +347,7 @@ static struct platform_device *pxa25x_devices[] __initdata = {
        &pxa25x_device_assp,
        &pxa25x_device_pwm0,
        &pxa25x_device_pwm1,
+       &pxa_device_asoc_platform,
 };
 
 static struct sys_device pxa25x_sysdev[] = {
index c31e601eb49ccb069b99dcd806d4eabd8d27a34d..b9b1e5c2b29048247910612448a5498cb4513b7f 100644 (file)
@@ -81,8 +81,6 @@ static int tosa_bt_probe(struct platform_device *dev)
                goto err_rfk_alloc;
        }
 
-       rfkill_set_led_trigger_name(rfk, "tosa-bt");
-
        rc = rfkill_register(rfk);
        if (rc)
                goto err_rfkill;
index af152e70cfcfc09d7e8c29eb39388de04b3cd9e9..f2582ec300d9ef7311b8392ba9e532628a471c6d 100644 (file)
@@ -875,6 +875,11 @@ static struct platform_device sharpsl_rom_device = {
        .dev.platform_data = &sharpsl_rom_data,
 };
 
+static struct platform_device wm9712_device = {
+       .name   = "wm9712-codec",
+       .id     = -1,
+};
+
 static struct platform_device *devices[] __initdata = {
        &tosascoop_device,
        &tosascoop_jc_device,
@@ -885,6 +890,7 @@ static struct platform_device *devices[] __initdata = {
        &tosaled_device,
        &tosa_bt_device,
        &sharpsl_rom_device,
+       &wm9712_device,
 };
 
 static void tosa_poweroff(void)
index a0cb2581894fa9fbe322347de25781e7b9a4ba81..50825a3f91cced3988a3e01e02f5f334215765bd 100644 (file)
@@ -99,6 +99,7 @@ config MACH_NEO1973_GTA02
        select POWER_SUPPLY
        select MACH_NEO1973
        select S3C2410_PWM
+       select S3C_DEV_USB_HOST
        help
           Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
 
index 953331d8d56a4c0f1f430af3add508261c7ffc05..3a56a229cac6634e47f3910e95dd21c99b527a50 100644 (file)
 #define GTA02v3_GPIO_nUSB_FLT  S3C2410_GPG(10) /* v3 + v4 only */
 #define GTA02v3_GPIO_nGSM_OC   S3C2410_GPG(11) /* v3 + v4 only */
 
-#define GTA02_GPIO_AMP_SHUT    S3C2440_GPJ1    /* v2 + v3 + v4 only */
-#define GTA02v1_GPIO_WLAN_GPIO10       S3C2440_GPJ2
-#define GTA02_GPIO_HP_IN       S3C2440_GPJ2    /* v2 + v3 + v4 only */
-#define GTA02_GPIO_INT0                S3C2440_GPJ3    /* v2 + v3 + v4 only */
-#define GTA02_GPIO_nGSM_EN     S3C2440_GPJ4
-#define GTA02_GPIO_3D_RESET    S3C2440_GPJ5
-#define GTA02_GPIO_nDL_GSM     S3C2440_GPJ6    /* v4 + v5 only */
-#define GTA02_GPIO_WLAN_GPIO0  S3C2440_GPJ7
-#define GTA02v1_GPIO_BAT_ID    S3C2440_GPJ8
-#define GTA02_GPIO_KEEPACT     S3C2440_GPJ8
-#define GTA02v1_GPIO_HP_IN     S3C2440_GPJ10
-#define GTA02_CHIP_PWD         S3C2440_GPJ11   /* v2 + v3 + v4 only */
-#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12   /* v2 + v3 + v4 only */
+#define GTA02_GPIO_AMP_SHUT    S3C2410_GPJ(1)  /* v2 + v3 + v4 only */
+#define GTA02v1_GPIO_WLAN_GPIO10       S3C2410_GPJ(2)
+#define GTA02_GPIO_HP_IN       S3C2410_GPJ(2)  /* v2 + v3 + v4 only */
+#define GTA02_GPIO_INT0                S3C2410_GPJ(3)  /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nGSM_EN     S3C2410_GPJ(4)
+#define GTA02_GPIO_3D_RESET    S3C2410_GPJ(5)
+#define GTA02_GPIO_nDL_GSM     S3C2410_GPJ(6)  /* v4 + v5 only */
+#define GTA02_GPIO_WLAN_GPIO0  S3C2410_GPJ(7)
+#define GTA02v1_GPIO_BAT_ID    S3C2410_GPJ(8)
+#define GTA02_GPIO_KEEPACT     S3C2410_GPJ(8)
+#define GTA02v1_GPIO_HP_IN     S3C2410_GPJ(10)
+#define GTA02_CHIP_PWD         S3C2410_GPJ(11) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */
 
 #define GTA02_IRQ_GSENSOR_1    IRQ_EINT0
 #define GTA02_IRQ_MODEM                IRQ_EINT1
index dd37820645087701d7245fb379b664b2189772f3..fdfc4d5e37a13ae98255593286f13979c91f52e5 100644 (file)
@@ -150,6 +150,12 @@ static struct clk init_clocks_off[] = {
                .parent         = &clk_p,
                .enable         = s3c64xx_pclk_ctrl,
                .ctrlbit        = S3C_CLKCON_PCLK_IIC,
+       }, {
+               .name           = "i2c",
+               .id             = 1,
+               .parent         = &clk_p,
+               .enable         = s3c64xx_pclk_ctrl,
+               .ctrlbit        = S3C6410_CLKCON_PCLK_I2C1,
        }, {
                .name           = "iis",
                .id             = 0,
index 135db1b41252718d1f38db1d0b9d0af8f2f2171b..c35585cf8c4f516ef234651c304ea508bfa0e464 100644 (file)
@@ -690,12 +690,12 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
 
        regptr = regs + PL080_Cx_BASE(0);
 
-       for (ch = 0; ch < 8; ch++, chno++, chptr++) {
-               printk(KERN_INFO "%s: registering DMA %d (%p)\n",
-                      __func__, chno, regptr);
+       for (ch = 0; ch < 8; ch++, chptr++) {
+               pr_debug("%s: registering DMA %d (%p)\n",
+                        __func__, chno + ch, regptr);
 
                chptr->bit = 1 << ch;
-               chptr->number = chno;
+               chptr->number = chno + ch;
                chptr->dmac = dmac;
                chptr->regs = regptr;
                regptr += PL080_Cx_STRIDE;
@@ -704,7 +704,8 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
        /* for the moment, permanently enable the controller */
        writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
 
-       printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs);
+       printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
+              irq, regs, chno, chno+8);
 
        return 0;
 
index fd99a82e82c486e13a92f5d2ec35570b326db08d..92b09085caaa0e69bd698af7b5a2b61c02427df1 100644 (file)
@@ -72,7 +72,7 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = {
        .get_pull       = s3c_gpio_getpull_updown,
 };
 
-int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
+static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
 {
        return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
 }
@@ -138,7 +138,7 @@ static struct s3c_gpio_chip gpio_4bit[] = {
        },
 };
 
-int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
+static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
 {
        return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO;
 }
index e85192a86fbe1a9f51bb9da15f20899bf0b4ab3e..a80a3163dd3034bbd55136432aca0d2372f77421 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/delay.h>
 #include <linux/smsc911x.h>
 #include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
 
 #ifdef CONFIG_SMDK6410_WM1190_EV1
 #include <linux/mfd/wm8350/core.h>
@@ -351,7 +352,7 @@ static struct regulator_init_data smdk6410_vddpll = {
 /* VDD_UH_MMC, LDO5 on J5 */
 static struct regulator_init_data smdk6410_vdduh_mmc = {
        .constraints = {
-               .name = "PVDD_UH/PVDD_MMC",
+               .name = "PVDD_UH+PVDD_MMC",
                .always_on = 1,
        },
 };
@@ -417,7 +418,7 @@ static struct regulator_init_data smdk6410_vddaudio = {
 /* S3C64xx internal logic & PLL */
 static struct regulator_init_data wm8350_dcdc1_data = {
        .constraints = {
-               .name = "PVDD_INT/PVDD_PLL",
+               .name = "PVDD_INT+PVDD_PLL",
                .min_uV = 1200000,
                .max_uV = 1200000,
                .always_on = 1,
@@ -452,7 +453,7 @@ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = {
 
 static struct regulator_init_data wm8350_dcdc4_data = {
        .constraints = {
-               .name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV",
+               .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV",
                .min_uV = 3000000,
                .max_uV = 3000000,
                .always_on = 1,
@@ -464,7 +465,7 @@ static struct regulator_init_data wm8350_dcdc4_data = {
 /* OTGi/1190-EV1 HPVDD & AVDD */
 static struct regulator_init_data wm8350_ldo4_data = {
        .constraints = {
-               .name = "PVDD_OTGI/HPVDD/AVDD",
+               .name = "PVDD_OTGI+HPVDD+AVDD",
                .min_uV = 1200000,
                .max_uV = 1200000,
                .apply_uV = 1,
@@ -552,7 +553,7 @@ static struct wm831x_backlight_pdata wm1192_backlight_pdata = {
 
 static struct regulator_init_data wm1192_dcdc3 = {
        .constraints = {
-               .name = "PVDD_MEM/PVDD_GPS",
+               .name = "PVDD_MEM+PVDD_GPS",
                .always_on = 1,
        },
 };
@@ -563,7 +564,7 @@ static struct regulator_consumer_supply wm1192_ldo1_consumers[] = {
 
 static struct regulator_init_data wm1192_ldo1 = {
        .constraints = {
-               .name = "PVDD_LCD/PVDD_EXT",
+               .name = "PVDD_LCD+PVDD_EXT",
                .always_on = 1,
        },
        .consumer_supplies = wm1192_ldo1_consumers,
index f8ed0d22db70c0a1140940e7d505c393a87e7999..1d4d0ee9e8704160dd99e452438cefcaba8b23e5 100644 (file)
@@ -17,7 +17,7 @@
 void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
 {
        /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */
-       s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3));
+       s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3));
 
        /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */
        s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
index 1a942037c4eff85966f4b72bbe88d34afb363467..f344a222bc84965b1ddc000ff4eb931af71886fe 100644 (file)
@@ -56,7 +56,7 @@ void s3c6400_setup_sdhci_cfg_card(struct platform_device *dev,
        else
                ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
 
-       printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
+       pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
        writel(ctrl2, r + S3C_SDHCI_CONTROL2);
        writel(ctrl3, r + S3C_SDHCI_CONTROL3);
 }
index 5486c8f01f1d30f5b466de94dfd5aa02c4c50a99..adb5f298ead895a65db824705c7c8a7f38459ea6 100644 (file)
@@ -23,7 +23,7 @@
 #define S5P6440_GPIO_A_NR      (6)
 #define S5P6440_GPIO_B_NR      (7)
 #define S5P6440_GPIO_C_NR      (8)
-#define S5P6440_GPIO_F_NR      (2)
+#define S5P6440_GPIO_F_NR      (16)
 #define S5P6440_GPIO_G_NR      (7)
 #define S5P6440_GPIO_H_NR      (10)
 #define S5P6440_GPIO_I_NR      (16)
@@ -36,7 +36,7 @@
 #define S5P6450_GPIO_B_NR      (7)
 #define S5P6450_GPIO_C_NR      (8)
 #define S5P6450_GPIO_D_NR      (8)
-#define S5P6450_GPIO_F_NR      (2)
+#define S5P6450_GPIO_F_NR      (16)
 #define S5P6450_GPIO_G_NR      (14)
 #define S5P6450_GPIO_H_NR      (10)
 #define S5P6450_GPIO_I_NR      (16)
index 2123b96b563822abe183140aba3de1f385993d05..4303a86e6e3848624a9ac1f6261a037c7021c6d9 100644 (file)
@@ -454,6 +454,7 @@ static void __init ag5evm_init(void)
        gpio_direction_output(GPIO_PORT217, 0);
        mdelay(1);
        gpio_set_value(GPIO_PORT217, 1);
+       mdelay(100);
 
        /* LCD backlight controller */
        gpio_request(GPIO_PORT235, NULL); /* RESET */
index 3cf0951caa2dd73d164f04a5ad1ef1f9ba356c1a..81d6536552a97c61eec6976fe19c53b47432c20c 100644 (file)
@@ -1303,7 +1303,7 @@ static void __init ap4evb_init(void)
 
        lcdc_info.clock_source                  = LCDC_CLK_BUS;
        lcdc_info.ch[0].interface_type          = RGB18;
-       lcdc_info.ch[0].clock_divider           = 2;
+       lcdc_info.ch[0].clock_divider           = 3;
        lcdc_info.ch[0].flags                   = 0;
        lcdc_info.ch[0].lcd_size_cfg.width      = 152;
        lcdc_info.ch[0].lcd_size_cfg.height     = 91;
index fb4213a4e15a66ac06ff09070ec330d9463f0787..1657eac5dde2c1bfee1a3a4bd799e47a884d0c76 100644 (file)
@@ -303,7 +303,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
                .lcd_cfg = mackerel_lcdc_modes,
                .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
                .interface_type         = RGB24,
-               .clock_divider          = 2,
+               .clock_divider          = 3,
                .flags                  = 0,
                .lcd_size_cfg.width     = 152,
                .lcd_size_cfg.height    = 91,
index ddd4a1b775f030a0b7635fe666aa31d5867aae63..7e58904c1c8c506aa63789d80cf52674b209afd8 100644 (file)
@@ -263,7 +263,7 @@ static struct clk div6_clks[DIV6_NR] = {
 };
 
 enum { MSTP001,
-       MSTP125, MSTP118, MSTP116, MSTP100,
+       MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
        MSTP219,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
        MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
@@ -275,6 +275,10 @@ enum { MSTP001,
 
 static struct clk mstp_clks[MSTP_NR] = {
        [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
+       [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */
+       [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */
+       [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */
+       [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */
        [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
        [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
        [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
@@ -306,6 +310,9 @@ static struct clk_lookup lookups[] = {
        CLKDEV_CON_ID("r_clk", &r_clk),
 
        /* DIV6 clocks */
+       CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
+       CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
+       CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
        CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
        CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
        CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
@@ -313,11 +320,15 @@ static struct clk_lookup lookups[] = {
 
        /* MSTP32 clocks */
        CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
-       CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
+       CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */
+       CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */
+       CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */
+       CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */
        CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
        CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
-       CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
        CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
+       CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
+       CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
        CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
        CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
        CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
index efd3687ba19080ca303dec38801cbcf5c629eeac..3029aba38688e846ade96eefda7ecbceeb4f6732 100644 (file)
@@ -6,13 +6,10 @@ LIST "RWT Setting"
 EW 0xE6020004, 0xA500
 EW 0xE6030004, 0xA500
 
-DD 0x01001000, 0x01001000
-
 LIST "GPIO Setting"
 EB 0xE6051013, 0xA2
 
 LIST "CPG"
-ED 0xE6150080, 0x00000180
 ED 0xE61500C0, 0x00000002
 
 WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
 
 WAIT 1, 0xFE40009C
 
+LIST "SUB/USBClk"
+ED 0xE6150080, 0x00000180
+
 LIST "BSC"
 ED 0xFEC10000, 0x00E0001B
 
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
 ED 0xFE40004C, 0x00110209
 ED 0xFE400010, 0x00000087
 
-WAIT 10, 0xFE40009C
+WAIT 30, 0xFE40009C
 
 ED 0xFE400084, 0x0000003F
 EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
 
 WAIT 1, 0xFE40009C
 
-ED 0xE6150354, 0x00000002
+ED 0xFE400354, 0x01AD8002
 
 LIST "SCIF0 - Serial port for earlyprintk"
 EB 0xE6053098, 0x11
index efd3687ba19080ca303dec38801cbcf5c629eeac..3029aba38688e846ade96eefda7ecbceeb4f6732 100644 (file)
@@ -6,13 +6,10 @@ LIST "RWT Setting"
 EW 0xE6020004, 0xA500
 EW 0xE6030004, 0xA500
 
-DD 0x01001000, 0x01001000
-
 LIST "GPIO Setting"
 EB 0xE6051013, 0xA2
 
 LIST "CPG"
-ED 0xE6150080, 0x00000180
 ED 0xE61500C0, 0x00000002
 
 WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
 
 WAIT 1, 0xFE40009C
 
+LIST "SUB/USBClk"
+ED 0xE6150080, 0x00000180
+
 LIST "BSC"
 ED 0xFEC10000, 0x00E0001B
 
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
 ED 0xFE40004C, 0x00110209
 ED 0xFE400010, 0x00000087
 
-WAIT 10, 0xFE40009C
+WAIT 30, 0xFE40009C
 
 ED 0xFE400084, 0x0000003F
 EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
 
 WAIT 1, 0xFE40009C
 
-ED 0xE6150354, 0x00000002
+ED 0xFE400354, 0x01AD8002
 
 LIST "SCIF0 - Serial port for earlyprintk"
 EB 0xE6053098, 0x11
index 66ad2760c621461d8ade165f82d9817ed4b1e535..04c779832c78efb86eb590cd063171925af91a25 100644 (file)
@@ -57,5 +57,6 @@ struct tegra_kbc_platform_data {
        const struct matrix_keymap_data *keymap_data;
 
        bool wakeup;
+       bool use_fn_map;
 };
 #endif
index 459b319a9faddc43ee46e3baf4eb2f4cd1367cfc..49d3208793e5308e16ee0c0e4ae4f7fdd08d5f46 100644 (file)
@@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
 
 struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
 {
-       struct omap_mbox *mbox;
-       int ret;
+       struct omap_mbox *_mbox, *mbox = NULL;
+       int i, ret;
 
        if (!mboxes)
                return ERR_PTR(-EINVAL);
 
-       for (mbox = *mboxes; mbox; mbox++)
-               if (!strcmp(mbox->name, name))
+       for (i = 0; (_mbox = mboxes[i]); i++) {
+               if (!strcmp(_mbox->name, name)) {
+                       mbox = _mbox;
                        break;
+               }
+       }
 
        if (!mbox)
                return ERR_PTR(-ENOENT);
index 3776cd9524501c74784c9e1040b9e1643aae99b1..5928105490fac41e3705abeed4567579f724e7a8 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 
+#include <plat/devs.h>
+
 /* uart devices */
 
 static struct platform_device s3c24xx_uart_device0 = {
index c9113619029f67f4a7353b923cdd42483ffb894c..8d73724c0092a79f14555b8f8d0fdb54069b9425 100644 (file)
@@ -114,16 +114,14 @@ u32 arch_gettimeoffset(void)
 
 /*
  * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
  */
 #ifdef CONFIG_CORE_TIMER_IRQ_L1
 __attribute__((l1_text))
 #endif
 irqreturn_t timer_interrupt(int irq, void *dummy)
 {
-       write_seqlock(&xtime_lock);
-       do_timer(1);
-       write_sequnlock(&xtime_lock);
+       xtime_update(1);
 
 #ifdef CONFIG_IPIPE
        update_root_process_times(get_irq_regs());
index 250f4d4b94368c6158917d701dabf12026f9eb0d..06a5e674401f02d64d1ccc6ae200c6e4156969fd 100644 (file)
@@ -13,6 +13,8 @@
 .align 2
 
 ENTRY(_outsl)
+       CC = R2 == 0;
+       IF CC JUMP 1f;
        P0 = R0;        /* P0 = port */
        P1 = R1;        /* P1 = address */
        P2 = R2;        /* P2 = count */
@@ -20,10 +22,12 @@ ENTRY(_outsl)
        LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
 .Llong_loop_s: R0 = [P1++];
 .Llong_loop_e: [P0] = R0;
-       RTS;
+1:     RTS;
 ENDPROC(_outsl)
 
 ENTRY(_outsw)
+       CC = R2 == 0;
+       IF CC JUMP 1f;
        P0 = R0;        /* P0 = port */
        P1 = R1;        /* P1 = address */
        P2 = R2;        /* P2 = count */
@@ -31,10 +35,12 @@ ENTRY(_outsw)
        LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
 .Lword_loop_s: R0 = W[P1++];
 .Lword_loop_e: W[P0] = R0;
-       RTS;
+1:     RTS;
 ENDPROC(_outsw)
 
 ENTRY(_outsb)
+       CC = R2 == 0;
+       IF CC JUMP 1f;
        P0 = R0;        /* P0 = port */
        P1 = R1;        /* P1 = address */
        P2 = R2;        /* P2 = count */
@@ -42,10 +48,12 @@ ENTRY(_outsb)
        LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
 .Lbyte_loop_s: R0 = B[P1++];
 .Lbyte_loop_e: B[P0] = R0;
-       RTS;
+1:     RTS;
 ENDPROC(_outsb)
 
 ENTRY(_outsw_8)
+       CC = R2 == 0;
+       IF CC JUMP 1f;
        P0 = R0;        /* P0 = port */
        P1 = R1;        /* P1 = address */
        P2 = R2;        /* P2 = count */
@@ -56,5 +64,5 @@ ENTRY(_outsw_8)
                R0 = R0 << 8;
                R0 = R0 + R1;
 .Lword8_loop_e: W[P0] = R0;
-       RTS;
+1:     RTS;
 ENDPROC(_outsw_8)
index 790c767ca95af8dc82ffc3fc56e7f69c5fb9df81..ab4a925a443e4e82001df0beb146b7b7b8e3b40a 100644 (file)
@@ -58,6 +58,8 @@
 1:
 .ifeqs "\flushins", BROK_FLUSH_INST
        \flushins [P0++];
+       nop;
+       nop;
 2:     nop;
 .else
 2:     \flushins [P0++];
index 00eb36f8debf10484912f35fb12af2452eb947fe..20c85b5dc7d0950e860193def61e5ffa5e14ffee 100644 (file)
@@ -140,7 +140,7 @@ stop_watchdog(void)
 
 /*
  * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
  */
 
 //static unsigned short myjiff; /* used by our debug routine print_timestamp */
@@ -176,7 +176,7 @@ timer_interrupt(int irq, void *dev_id)
 
        /* call the real timer interrupt handler */
 
-       do_timer(1);
+       xtime_update(1);
        
         cris_do_profile(regs); /* Save profiling information */
         return IRQ_HANDLED;
index 84fed3b4b0799dd35991e57eaa22aca18767c6a3..4c9e3e1ba5d12ccb08424e6ad04ccda7341f576c 100644 (file)
@@ -26,7 +26,9 @@
 #define FLUSH_ALL (void*)0xffffffff
 
 /* Vector of locks used for various atomic operations */
-spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
+spinlock_t cris_atomic_locks[] = {
+       [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks)
+};
 
 /* CPU masks */
 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
index a545211e999d374dc49b9bc9ac04202c14f8f2ba..bb978ede89852ef06bc7d41f6520cb9bb20769b5 100644 (file)
@@ -183,7 +183,7 @@ void handle_watchdog_bite(struct pt_regs *regs)
 
 /*
  * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick.
+ * as well as call the "xtime_update()" routine every clocktick.
  */
 extern void cris_do_profile(struct pt_regs *regs);
 
@@ -216,9 +216,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
                return IRQ_HANDLED;
 
        /* Call the real timer interrupt handler */
-       write_seqlock(&xtime_lock);
-       do_timer(1);
-       write_sequnlock(&xtime_lock);
+       xtime_update(1);
         return IRQ_HANDLED;
 }
 
index 442218980db02e716b3be48e1071661598ab7c10..c49be845f96a1ae848a96fd9bf834737f6081541 100644 (file)
@@ -72,11 +72,6 @@ SECTIONS
        INIT_TEXT_SECTION(PAGE_SIZE)
        .init.data : { INIT_DATA }
        .init.setup : { INIT_SETUP(16) }
-#ifdef CONFIG_ETRAX_ARCH_V32
-       __start___param = .;
-       __param : { *(__param) }
-       __stop___param = .;
-#endif
        .initcall.init : {
                INIT_CALLS
        }
index 08b3d1da358398e111ccf01f50227ef8f60a7ffc..4bea27f50a7ab4f72d8cbb28cecf181c89e9cf28 100644 (file)
@@ -7,10 +7,11 @@
 #include <asm/errno.h>
 #include <asm/uaccess.h>
 
-extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
+extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
        return -ENOSYS;
 }
index 14f64b054c7ebe726ee0388f5ee5fe068dc7669c..d155ca9e5098c8963f6ea5b0d8c987a6963f0538 100644 (file)
@@ -18,7 +18,7 @@
  * the various futex operations; MMU fault checking is ignored under no-MMU
  * conditions
  */
-static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval)
+static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval)
 {
        int oldval, ret;
 
@@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_o
        return ret;
 }
 
-static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval)
+static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval)
 {
        int oldval, ret;
 
@@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_o
        return ret;
 }
 
-static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval)
+static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval)
 {
        int oldval, ret;
 
@@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_ol
        return ret;
 }
 
-static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval)
+static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval)
 {
        int oldval, ret;
 
@@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_o
        return ret;
 }
 
-static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval)
+static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval)
 {
        int oldval, ret;
 
@@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_o
 /*
  * do the futex operations
  */
-int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
index 0ddbbae83cb2997e891999f7d3610d71e3b14a74..b457de496b7052272e54857b6687f15d190365ad 100644 (file)
@@ -50,21 +50,13 @@ static struct irqaction timer_irq  = {
 
 /*
  * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
  */
 static irqreturn_t timer_interrupt(int irq, void *dummy)
 {
        profile_tick(CPU_PROFILING);
-       /*
-        * Here we are in the timer irq handler. We just have irqs locally
-        * disabled but we don't know if the timer_bh is running on the other
-        * CPU. We need to avoid to SMP race with it. NOTE: we don't need
-        * the irq version of write_lock because as just said we have irq
-        * locally disabled. -arca
-        */
-       write_seqlock(&xtime_lock);
 
-       do_timer(1);
+       xtime_update(1);
 
 #ifdef CONFIG_HEARTBEAT
        static unsigned short n;
@@ -72,8 +64,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
        __set_LEDS(n);
 #endif /* CONFIG_HEARTBEAT */
 
-       write_sequnlock(&xtime_lock);
-
        update_process_times(user_mode(get_irq_regs()));
 
        return IRQ_HANDLED;
index 165005aff9dfbc52d291f27bfd59ef99653b0ee3..32263a138aa6ccf9a2a871a859254f2a18df03d9 100644 (file)
@@ -35,9 +35,7 @@ void h8300_timer_tick(void)
 {
        if (current->pid)
                profile_tick(CPU_PROFILING);
-       write_seqlock(&xtime_lock);
-       do_timer(1);
-       write_sequnlock(&xtime_lock);
+       xtime_update(1);
        update_process_times(user_mode(get_irq_regs()));
 }
 
index 3946c0fa8374d40113f3c2be685994c2378b69aa..7a1533fad47d93773dc191981f19f76233f30c98 100644 (file)
@@ -61,7 +61,7 @@
 
 /*
  * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
  */
 
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
index c7f0f062239cd541112ecbe10cdd34dc54672eec..8428525ddb225de4cf1ea49eb783b3f64d34e4a0 100644 (file)
@@ -46,7 +46,7 @@ do {                                                                  \
 } while (0)
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
@@ -100,23 +100,26 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        {
-               register unsigned long r8 __asm ("r8");
+               register unsigned long r8 __asm ("r8") = 0;
+               unsigned long prev;
                __asm__ __volatile__(
                        "       mf;;                                    \n"
                        "       mov ar.ccv=%3;;                         \n"
                        "[1:]   cmpxchg4.acq %0=[%1],%2,ar.ccv          \n"
                        "       .xdata4 \"__ex_table\", 1b-., 2f-.      \n"
                        "[2:]"
-                       : "=r" (r8)
+                       : "=r" (prev)
                        : "r" (uaddr), "r" (newval),
                          "rO" ((long) (unsigned) oldval)
                        : "memory");
+               *uval = prev;
                return r8;
        }
 }
index 215d5454c7d30596f16dbe7266038633c40484f7..3027e7516d8502ea352afa38c6515925b5fe2d77 100644 (file)
 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
 #endif
 
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
 #include <asm/intrinsics.h>
 
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
-       signed long             count;
-       spinlock_t              wait_lock;
-       struct list_head        wait_list;
-};
-
 #define RWSEM_UNLOCKED_VALUE           __IA64_UL_CONST(0x0000000000000000)
 #define RWSEM_ACTIVE_BIAS              (1L)
 #define RWSEM_ACTIVE_MASK              (0xffffffffL)
@@ -46,26 +34,6 @@ struct rw_semaphore {
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
-#define __RWSEM_INITIALIZER(name) \
-       { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
-         LIST_HEAD_INIT((name).wait_list) }
-
-#define DECLARE_RWSEM(name) \
-       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-static inline void
-init_rwsem (struct rw_semaphore *sem)
-{
-       sem->count = RWSEM_UNLOCKED_VALUE;
-       spin_lock_init(&sem->wait_lock);
-       INIT_LIST_HEAD(&sem->wait_list);
-}
-
 /*
  * lock for reading
  */
@@ -174,9 +142,4 @@ __downgrade_write (struct rw_semaphore *sem)
 #define rwsem_atomic_add(delta, sem)   atomic64_add(delta, (atomic64_t *)(&(sem)->count))
 #define rwsem_atomic_update(delta, sem)        atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-       return (sem->count != 0);
-}
-
 #endif /* _ASM_IA64_RWSEM_H */
index 96fc62366aa44bfe83191fc9310e2f43f0182a1c..ed28bcd5bb85c1973535384127763f800340975c 100644 (file)
@@ -107,7 +107,7 @@ extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
 static inline int
 xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
 {
-       return _hypercall2(int, sched_op_new, cmd, arg);
+       return _hypercall2(int, sched_op, cmd, arg);
 }
 
 static inline long
index 9702fa92489edb3f7aa04d21ad1a801afc8a1807..156ad803d5b7aedce2b059545cf1bde0cd44d32e 100644 (file)
@@ -190,19 +190,10 @@ timer_interrupt (int irq, void *dev_id)
 
                new_itm += local_cpu_data->itm_delta;
 
-               if (smp_processor_id() == time_keeper_id) {
-                       /*
-                        * Here we are in the timer irq handler. We have irqs locally
-                        * disabled, but we don't know if the timer_bh is running on
-                        * another CPU. We need to avoid to SMP race by acquiring the
-                        * xtime_lock.
-                        */
-                       write_seqlock(&xtime_lock);
-                       do_timer(1);
-                       local_cpu_data->itm_next = new_itm;
-                       write_sequnlock(&xtime_lock);
-               } else
-                       local_cpu_data->itm_next = new_itm;
+               if (smp_processor_id() == time_keeper_id)
+                       xtime_update(1);
+
+               local_cpu_data->itm_next = new_itm;
 
                if (time_after(new_itm, ia64_get_itc()))
                        break;
@@ -222,7 +213,7 @@ skip_process_time_accounting:
                 * comfort, we increase the safety margin by
                 * intentionally dropping the next tick(s).  We do NOT
                 * update itm.next because that would force us to call
-                * do_timer() which in turn would let our clock run
+                * xtime_update() which in turn would let our clock run
                 * too fast (with the potentially devastating effect
                 * of losing monotony of time).
                 */
index fd66b048c6fa2b719de1f983779aaf91175b7942..419c8620945ae2dd4d8e710a947470986b025e86 100644 (file)
@@ -37,19 +37,14 @@ xen_mm_unpin_all(void)
        /* nothing */
 }
 
-void xen_pre_device_suspend(void)
-{
-       /* nothing */
-}
-
 void
-xen_pre_suspend()
+xen_arch_pre_suspend()
 {
        /* nothing */
 }
 
 void
-xen_post_suspend(int suspend_cancelled)
+xen_arch_post_suspend(int suspend_cancelled)
 {
        if (suspend_cancelled)
                return;
index c1c544513e8d0926a8a4bd879f0056240f067bb0..1f8244a78bee026948376340024c50c29f922c59 100644 (file)
@@ -139,14 +139,11 @@ consider_steal_time(unsigned long new_itm)
                run_posix_cpu_timers(p);
                delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
 
-               if (cpu == time_keeper_id) {
-                       write_seqlock(&xtime_lock);
-                       do_timer(stolen + blocked);
-                       local_cpu_data->itm_next = delta_itm + new_itm;
-                       write_sequnlock(&xtime_lock);
-               } else {
-                       local_cpu_data->itm_next = delta_itm + new_itm;
-               }
+               if (cpu == time_keeper_id)
+                       xtime_update(stolen + blocked);
+
+               local_cpu_data->itm_next = delta_itm + new_itm;
+
                per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
                per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
        }
index bda86820bffdc19e078c5e23aeccc48c5e9581d2..84dd04048db9dc182e8b7ebab31eb44894c3726d 100644 (file)
@@ -107,15 +107,14 @@ u32 arch_gettimeoffset(void)
 
 /*
  * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
  */
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
 #ifndef CONFIG_SMP
        profile_tick(CPU_PROFILING);
 #endif
-       /* XXX FIXME. Uh, the xtime_lock should be held here, no? */
-       do_timer(1);
+       xtime_update(1);
 
 #ifndef CONFIG_SMP
        update_process_times(user_mode(get_irq_regs()));
index 9fe6fefb5e142f5c238441d885fe6ddc076fe19e..1edd95095cb4f6c9b1bc59a6fd219693f19454e6 100644 (file)
@@ -45,8 +45,8 @@ extern int bvme6000_set_clock_mmss (unsigned long);
 extern void bvme6000_reset (void);
 void bvme6000_set_vectors (void);
 
-/* Save tick handler routine pointer, will point to do_timer() in
- * kernel/sched.c, called via bvme6000_process_int() */
+/* Save tick handler routine pointer, will point to xtime_update() in
+ * kernel/timer/timekeeping.c, called via bvme6000_process_int() */
 
 static irq_handler_t tick_handler;
 
index 06438dac08ff64172ac577ee8336973c2c946b75..18b34ee5db3bb94f9fadd5d5e2935a3056dc39e5 100644 (file)
@@ -37,11 +37,11 @@ static inline int set_rtc_mmss(unsigned long nowtime)
 
 /*
  * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
  */
 static irqreturn_t timer_interrupt(int irq, void *dummy)
 {
-       do_timer(1);
+       xtime_update(1);
        update_process_times(user_mode(get_irq_regs()));
        profile_tick(CPU_PROFILING);
 
index 100baaa692a14fecc4d1cf74c995d3b43f210e1c..6cb9c3a9b6c955c013ee0185c23138ac3346f745 100644 (file)
@@ -46,8 +46,8 @@ extern void mvme147_reset (void);
 
 static int bcd2int (unsigned char b);
 
-/* Save tick handler routine pointer, will point to do_timer() in
- * kernel/sched.c, called via mvme147_process_int() */
+/* Save tick handler routine pointer, will point to xtime_update() in
+ * kernel/time/timekeeping.c, called via mvme147_process_int() */
 
 irq_handler_t tick_handler;
 
index 11edf61cc2c4cb864b5a092bf9e1b2ba018f15ee..0b28e26216535c2212b4bd03610d503a1b82b275 100644 (file)
@@ -51,8 +51,8 @@ extern void mvme16x_reset (void);
 
 int bcd2int (unsigned char b);
 
-/* Save tick handler routine pointer, will point to do_timer() in
- * kernel/sched.c, called via mvme16x_process_int() */
+/* Save tick handler routine pointer, will point to xtime_update() in
+ * kernel/time/timekeeping.c, called via mvme16x_process_int() */
 
 static irq_handler_t tick_handler;
 
index 2d9e21bd313a821725457a0751e5411a67246d01..6464ad3ae3e6fe303732a8ab83a6eacf96473c20 100644 (file)
@@ -66,7 +66,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
 #ifdef CONFIG_SUN3
        intersil_clear();
 #endif
-        do_timer(1);
+       xtime_update(1);
        update_process_times(user_mode(get_irq_regs()));
         if (!(kstat_cpu(0).irqs[irq] % 20))
                 sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
index d6ac2a43453ce5be5808a0fa6aeacd4666cf8da6..6623909f70e6f1d00facb0047865c12aa0510509 100644 (file)
@@ -36,7 +36,7 @@ static inline int set_rtc_mmss(unsigned long nowtime)
 #ifndef CONFIG_GENERIC_CLOCKEVENTS
 /*
  * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
  */
 irqreturn_t arch_timer_interrupt(int irq, void *dummy)
 {
@@ -44,11 +44,7 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
        if (current->pid)
                profile_tick(CPU_PROFILING);
 
-       write_seqlock(&xtime_lock);
-
-       do_timer(1);
-
-       write_sequnlock(&xtime_lock);
+       xtime_update(1);
 
        update_process_times(user_mode(get_irq_regs()));
 
index ad3fd61b2fe7eff6689e72a9e6936093e226740d..b0526d2716fa7defea7c036c750284f5a7496443 100644 (file)
@@ -29,7 +29,7 @@
 })
 
 static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
@@ -94,31 +94,34 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
-       int prev, cmp;
+       int ret = 0, cmp;
+       u32 prev;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
-       __asm__ __volatile__ ("1:       lwx     %0, %2, r0;             \
-                                       cmp     %1, %0, %3;             \
-                                       beqi    %1, 3f;                 \
-                               2:      swx     %4, %2, r0;             \
-                                       addic   %1, r0, 0;              \
-                                       bnei    %1, 1b;                 \
+       __asm__ __volatile__ ("1:       lwx     %1, %3, r0;             \
+                                       cmp     %2, %1, %4;             \
+                                       beqi    %2, 3f;                 \
+                               2:      swx     %5, %3, r0;             \
+                                       addic   %2, r0, 0;              \
+                                       bnei    %2, 1b;                 \
                                3:                                      \
                                .section .fixup,\"ax\";                 \
                                4:      brid    3b;                     \
-                                       addik   %0, r0, %5;             \
+                                       addik   %0, r0, %6;             \
                                .previous;                              \
                                .section __ex_table,\"a\";              \
                                .word   1b,4b,2b,4b;                    \
                                .previous;"                             \
-               : "=&r" (prev), "=&r"(cmp)                              \
+               : "+r" (ret), "=&r" (prev), "=&r"(cmp)  \
                : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT));
 
-       return prev;
+       *uval = prev;
+       return ret;
 }
 
 #endif /* __KERNEL__ */
index f5ecc0566bc292f72b0020ac043bbfa7fd5a3300..d88983516e26118bff775f8d9c87f58e8163a88f 100644 (file)
@@ -4,6 +4,7 @@ config MIPS
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_IDE
        select HAVE_OPROFILE
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
        select HAVE_ARCH_KGDB
@@ -208,6 +209,7 @@ config MACH_JZ4740
        select ARCH_REQUIRE_GPIOLIB
        select SYS_HAS_EARLY_PRINTK
        select HAVE_PWM
+       select HAVE_CLK
 
 config LASAT
        bool "LASAT Networks platforms"
@@ -333,6 +335,8 @@ config PNX8550_STB810
 config PMC_MSP
        bool "PMC-Sierra MSP chipsets"
        depends on EXPERIMENTAL
+       select CEVT_R4K
+       select CSRC_R4K
        select DMA_NONCOHERENT
        select SWAP_IO_SPACE
        select NO_EXCEPT_FILL
index 6398fa95905c9036518aa728292b16fe925eee39..40b84b99119133e2bce153532c9db5fcca2d1700 100644 (file)
@@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert);
 
 static void mtx1_reset(char *c)
 {
-       /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
-       au_writel(0x00000000, 0xAE00001C);
+       /* Jump to the reset vector */
+       __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
 }
 
 static void mtx1_power_off(void)
index e30e42add697ef728d0180cd0ea25d9414ee53d2..956f946218c519eb3b1c86cf76540150eac244e7 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/mtd/physmap.h>
 #include <mtd/mtd-abi.h>
 
+#include <asm/mach-au1x00/au1xxx_eth.h>
+
 static struct gpio_keys_button mtx1_gpio_button[] = {
        {
                .gpio = 207,
@@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = {
        &mtx1_mtd,
 };
 
+static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
+       .phy_search_highest_addr        = 1,
+       .phy1_search_mac0               = 1,
+};
+
 static int __init mtx1_register_devices(void)
 {
        int rc;
 
+       au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
+
        rc = gpio_request(mtx1_gpio_button[0].gpio,
                                        mtx1_gpio_button[0].desc);
        if (rc < 0) {
index b43c918925d36406cd0903afb95f6c2d6abe546f..80c521e5290d630b0c0ec69023aaa6c9d4a00bff 100644 (file)
@@ -36,8 +36,8 @@
 
 static void xxs1500_reset(char *c)
 {
-       /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
-       au_writel(0x00000000, 0xAE00001C);
+       /* Jump to the reset vector */
+       __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
 }
 
 static void xxs1500_power_off(void)
index b9cce90346cfc334819f44e95c4308f96b8f9533..6ebf1734b411b5150412b21a6744ec7c177c2088 100644 (file)
@@ -75,7 +75,7 @@
 }
 
 static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
@@ -132,11 +132,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
-       int retval;
+       int ret = 0;
+       u32 val;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        if (cpu_has_llsc && R10000_LLSC_WAR) {
@@ -145,25 +147,25 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
                "       .set    push                                    \n"
                "       .set    noat                                    \n"
                "       .set    mips3                                   \n"
-               "1:     ll      %0, %2                                  \n"
-               "       bne     %0, %z3, 3f                             \n"
+               "1:     ll      %1, %3                                  \n"
+               "       bne     %1, %z4, 3f                             \n"
                "       .set    mips0                                   \n"
-               "       move    $1, %z4                                 \n"
+               "       move    $1, %z5                                 \n"
                "       .set    mips3                                   \n"
-               "2:     sc      $1, %1                                  \n"
+               "2:     sc      $1, %2                                  \n"
                "       beqzl   $1, 1b                                  \n"
                __WEAK_LLSC_MB
                "3:                                                     \n"
                "       .set    pop                                     \n"
                "       .section .fixup,\"ax\"                          \n"
-               "4:     li      %0, %5                                  \n"
+               "4:     li      %0, %6                                  \n"
                "       j       3b                                      \n"
                "       .previous                                       \n"
                "       .section __ex_table,\"a\"                       \n"
                "       "__UA_ADDR "\t1b, 4b                            \n"
                "       "__UA_ADDR "\t2b, 4b                            \n"
                "       .previous                                       \n"
-               : "=&r" (retval), "=R" (*uaddr)
+               : "+r" (ret), "=&r" (val), "=R" (*uaddr)
                : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
                : "memory");
        } else if (cpu_has_llsc) {
@@ -172,31 +174,32 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
                "       .set    push                                    \n"
                "       .set    noat                                    \n"
                "       .set    mips3                                   \n"
-               "1:     ll      %0, %2                                  \n"
-               "       bne     %0, %z3, 3f                             \n"
+               "1:     ll      %1, %3                                  \n"
+               "       bne     %1, %z4, 3f                             \n"
                "       .set    mips0                                   \n"
-               "       move    $1, %z4                                 \n"
+               "       move    $1, %z5                                 \n"
                "       .set    mips3                                   \n"
-               "2:     sc      $1, %1                                  \n"
+               "2:     sc      $1, %2                                  \n"
                "       beqz    $1, 1b                                  \n"
                __WEAK_LLSC_MB
                "3:                                                     \n"
                "       .set    pop                                     \n"
                "       .section .fixup,\"ax\"                          \n"
-               "4:     li      %0, %5                                  \n"
+               "4:     li      %0, %6                                  \n"
                "       j       3b                                      \n"
                "       .previous                                       \n"
                "       .section __ex_table,\"a\"                       \n"
                "       "__UA_ADDR "\t1b, 4b                            \n"
                "       "__UA_ADDR "\t2b, 4b                            \n"
                "       .previous                                       \n"
-               : "=&r" (retval), "=R" (*uaddr)
+               : "+r" (ret), "=&r" (val), "=R" (*uaddr)
                : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
                : "memory");
        } else
                return -ENOSYS;
 
-       return retval;
+       *uval = val;
+       return ret;
 }
 
 #endif
index e00007cf816220ed3f62c9269372d1f050714027..d0c77496c7281fdca202a46ef8d453aa04557f7b 100644 (file)
 
 #ifndef __MIPS_PERF_EVENT_H__
 #define __MIPS_PERF_EVENT_H__
-
-/*
- * MIPS performance counters do not raise NMI upon overflow, a regular
- * interrupt will be signaled. Hence we can do the pending perf event
- * work at the tail of the irq handler.
- */
-static inline void
-set_perf_event_pending(void)
-{
-}
-
+/* Leave it empty here. The file is required by linux/perf_event.h */
 #endif /* __MIPS_PERF_EVENT_H__ */
index 5a84a1f11231fa000b71525842fc2cd6c1ec4d71..94ca2b018af70ff3191d4e44055311efc09e800d 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/uasm.h>
 
-/*
- * If the Instruction Pointer is in module space (0xc0000000), return true;
- * otherwise, it is in kernel space (0x80000000), return false.
- *
- * FIXME: This will not work when the kernel space and module space are the
- * same. If they are the same, we need to modify scripts/recordmcount.pl,
- * ftrace_make_nop/call() and the other related parts to ensure the
- * enabling/disabling of the calling site to _mcount is right for both kernel
- * and module.
- */
-
-static inline int in_module(unsigned long ip)
-{
-       return ip & 0x40000000;
-}
+#include <asm-generic/sections.h>
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 #define JAL 0x0c000000         /* jump & link: ip --> ra, jump to target */
 #define ADDR_MASK 0x03ffffff   /*  op_code|addr : 31...26|25 ....0 */
 
-#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
-#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
 #define INSN_NOP 0x00000000    /* nop */
 #define INSN_JAL(addr) \
        ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void)
 #endif
 }
 
+/*
+ * Check if the address is in kernel space
+ *
+ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
+ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
+ */
+static inline int in_kernel_space(unsigned long ip)
+{
+       if (ip >= (unsigned long)_stext &&
+           ip <= (unsigned long)_etext)
+               return 1;
+       return 0;
+}
+
 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
 {
        int faulted;
@@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
        return 0;
 }
 
+/*
+ * The details about the calling site of mcount on MIPS
+ *
+ * 1. For kernel:
+ *
+ * move at, ra
+ * jal _mcount         --> nop
+ *
+ * 2. For modules:
+ *
+ * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
+ *
+ * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ *  sub sp, sp, 8
+ *                                  1: offset = 5 instructions
+ * 2.2 For the Other situations
+ *
+ * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * jalr v1
+ *  nop | move $12, ra_address | sub sp, sp, 8
+ *                                  1: offset = 4 instructions
+ */
+
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+#define MCOUNT_OFFSET_INSNS 5
+#else
+#define MCOUNT_OFFSET_INSNS 4
+#endif
+#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
+
 int ftrace_make_nop(struct module *mod,
                    struct dyn_ftrace *rec, unsigned long addr)
 {
@@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod,
        unsigned long ip = rec->ip;
 
        /*
-        * We have compiled module with -mlong-calls, but compiled the kernel
-        * without it, we need to cope with them respectively.
+        * If ip is in kernel space, no long call, otherwise, long call is
+        * needed.
         */
-       if (in_module(ip)) {
-#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
-               /*
-                * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
-                * addiu v1, v1, low_16bit_of_mcount
-                * move at, ra
-                * move $12, ra_address
-                * jalr v1
-                *  sub sp, sp, 8
-                *                                  1: offset = 5 instructions
-                */
-               new = INSN_B_1F_5;
-#else
-               /*
-                * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
-                * addiu v1, v1, low_16bit_of_mcount
-                * move at, ra
-                * jalr v1
-                *  nop | move $12, ra_address | sub sp, sp, 8
-                *                                  1: offset = 4 instructions
-                */
-               new = INSN_B_1F_4;
-#endif
-       } else {
-               /*
-                * move at, ra
-                * jal _mcount          --> nop
-                */
-               new = INSN_NOP;
-       }
+       new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+
        return ftrace_modify_code(ip, new);
 }
 
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        unsigned int new;
        unsigned long ip = rec->ip;
 
-       /* ip, module: 0xc0000000, kernel: 0x80000000 */
-       new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
+       new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
+               insn_lui_v1_hi16_mcount;
 
        return ftrace_modify_code(ip, new);
 }
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void)
 #define S_R_SP (0xafb0 << 16)  /* s{d,w} R, offset(sp) */
 #define OFFSET_MASK    0xffff  /* stack offset range: 0 ~ PT_SIZE */
 
-unsigned long ftrace_get_parent_addr(unsigned long self_addr,
-                                    unsigned long parent,
-                                    unsigned long parent_addr,
-                                    unsigned long fp)
+unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+               old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
 {
-       unsigned long sp, ip, ra;
+       unsigned long sp, ip, tmp;
        unsigned int code;
        int faulted;
 
        /*
-        * For module, move the ip from calling site of mcount to the
-        * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
-        * kernel, move to the instruction "move ra, at"(offset is 12)
+        * For module, move the ip from the return address after the
+        * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
+        * kernel, move after the instruction "move ra, at"(offset is 16)
         */
-       ip = self_addr - (in_module(self_addr) ? 20 : 12);
+       ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
 
        /*
         * search the text until finding the non-store instruction or "s{d,w}
         * ra, offset(sp)" instruction
         */
        do {
-               ip -= 4;
-
                /* get the code at "ip": code = *(unsigned int *)ip; */
                safe_load_code(code, ip, faulted);
 
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
                 * store the ra on the stack
                 */
                if ((code & S_R_SP) != S_R_SP)
-                       return parent_addr;
+                       return parent_ra_addr;
 
-       } while (((code & S_RA_SP) != S_RA_SP));
+               /* Move to the next instruction */
+               ip -= 4;
+       } while ((code & S_RA_SP) != S_RA_SP);
 
        sp = fp + (code & OFFSET_MASK);
 
-       /* ra = *(unsigned long *)sp; */
-       safe_load_stack(ra, sp, faulted);
+       /* tmp = *(unsigned long *)sp; */
+       safe_load_stack(tmp, sp, faulted);
        if (unlikely(faulted))
                return 0;
 
-       if (ra == parent)
+       if (tmp == old_parent_ra)
                return sp;
        return 0;
 }
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  */
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
                           unsigned long fp)
 {
-       unsigned long old;
+       unsigned long old_parent_ra;
        struct ftrace_graph_ent trace;
        unsigned long return_hooker = (unsigned long)
            &return_to_handler;
-       int faulted;
+       int faulted, insns;
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
 
        /*
-        * "parent" is the stack address saved the return address of the caller
-        * of _mcount.
+        * "parent_ra_addr" is the stack address saved the return address of
+        * the caller of _mcount.
         *
         * if the gcc < 4.5, a leaf function does not save the return address
         * in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
         * do it in ftrace_graph_caller of mcount.S.
         */
 
-       /* old = *parent; */
-       safe_load_stack(old, parent, faulted);
+       /* old_parent_ra = *parent_ra_addr; */
+       safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
        if (unlikely(faulted))
                goto out;
 #ifndef KBUILD_MCOUNT_RA_ADDRESS
-       parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
-                       (unsigned long)parent, fp);
+       parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
+                       old_parent_ra, (unsigned long)parent_ra_addr, fp);
        /*
         * If fails when getting the stack address of the non-leaf function's
         * ra, stop function graph tracer and return
         */
-       if (parent == 0)
+       if (parent_ra_addr == 0)
                goto out;
 #endif
-       /* *parent = return_hooker; */
-       safe_store_stack(return_hooker, parent, faulted);
+       /* *parent_ra_addr = return_hooker; */
+       safe_store_stack(return_hooker, parent_ra_addr, faulted);
        if (unlikely(faulted))
                goto out;
 
-       if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
-           -EBUSY) {
-               *parent = old;
+       if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
+           == -EBUSY) {
+               *parent_ra_addr = old_parent_ra;
                return;
        }
 
-       trace.func = self_addr;
+       /*
+        * Get the recorded ip of the current mcount calling site in the
+        * __mcount_loc section, which will be used to filter the function
+        * entries configured through the tracing/set_graph_function interface.
+        */
+
+       insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+       trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
 
        /* Only trace if the calling function expects to */
        if (!ftrace_graph_entry(&trace)) {
                current->curr_ret_stack--;
-               *parent = old;
+               *parent_ra_addr = old_parent_ra;
        }
        return;
 out:
index 2b7f3f703b83cbeb86f48926517f1e0887f07496..a8244854d3dc623f99a4a4d1a2de0d0950153459 100644 (file)
@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event,
        return ret;
 }
 
-static int mipspmu_enable(struct perf_event *event)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       struct hw_perf_event *hwc = &event->hw;
-       int idx;
-       int err = 0;
-
-       /* To look for a free counter for this event. */
-       idx = mipspmu->alloc_counter(cpuc, hwc);
-       if (idx < 0) {
-               err = idx;
-               goto out;
-       }
-
-       /*
-        * If there is an event in the counter we are going to use then
-        * make sure it is disabled.
-        */
-       event->hw.idx = idx;
-       mipspmu->disable_event(idx);
-       cpuc->events[idx] = event;
-
-       /* Set the period for the event. */
-       mipspmu_event_set_period(event, hwc, idx);
-
-       /* Enable the event. */
-       mipspmu->enable_event(hwc, idx);
-
-       /* Propagate our changes to the userspace mapping. */
-       perf_event_update_userpage(event);
-
-out:
-       return err;
-}
-
 static void mipspmu_event_update(struct perf_event *event,
                        struct hw_perf_event *hwc,
                        int idx)
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event,
        unsigned long flags;
        int shift = 64 - TOTAL_BITS;
        s64 prev_raw_count, new_raw_count;
-       s64 delta;
+       u64 delta;
 
 again:
        prev_raw_count = local64_read(&hwc->prev_count);
@@ -231,32 +196,90 @@ again:
        return;
 }
 
-static void mipspmu_disable(struct perf_event *event)
+static void mipspmu_start(struct perf_event *event, int flags)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (!mipspmu)
+               return;
+
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+       hwc->state = 0;
+
+       /* Set the period for the event. */
+       mipspmu_event_set_period(event, hwc, hwc->idx);
+
+       /* Enable the event. */
+       mipspmu->enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_stop(struct perf_event *event, int flags)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (!mipspmu)
+               return;
+
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+               /* We are working on a local event. */
+               mipspmu->disable_event(hwc->idx);
+               barrier();
+               mipspmu_event_update(event, hwc, hwc->idx);
+               hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       }
+}
+
+static int mipspmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
+       int idx;
+       int err = 0;
 
+       perf_pmu_disable(event->pmu);
 
-       WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+       /* To look for a free counter for this event. */
+       idx = mipspmu->alloc_counter(cpuc, hwc);
+       if (idx < 0) {
+               err = idx;
+               goto out;
+       }
 
-       /* We are working on a local event. */
+       /*
+        * If there is an event in the counter we are going to use then
+        * make sure it is disabled.
+        */
+       event->hw.idx = idx;
        mipspmu->disable_event(idx);
+       cpuc->events[idx] = event;
 
-       barrier();
-
-       mipspmu_event_update(event, hwc, idx);
-       cpuc->events[idx] = NULL;
-       clear_bit(idx, cpuc->used_mask);
+       hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       if (flags & PERF_EF_START)
+               mipspmu_start(event, PERF_EF_RELOAD);
 
+       /* Propagate our changes to the userspace mapping. */
        perf_event_update_userpage(event);
+
+out:
+       perf_pmu_enable(event->pmu);
+       return err;
 }
 
-static void mipspmu_unthrottle(struct perf_event *event)
+static void mipspmu_del(struct perf_event *event, int flags)
 {
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
 
-       mipspmu->enable_event(hwc, hwc->idx);
+       WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+       mipspmu_stop(event, PERF_EF_UPDATE);
+       cpuc->events[idx] = NULL;
+       clear_bit(idx, cpuc->used_mask);
+
+       perf_event_update_userpage(event);
 }
 
 static void mipspmu_read(struct perf_event *event)
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event)
        mipspmu_event_update(event, hwc, hwc->idx);
 }
 
-static struct pmu pmu = {
-       .enable         = mipspmu_enable,
-       .disable        = mipspmu_disable,
-       .unthrottle     = mipspmu_unthrottle,
-       .read           = mipspmu_read,
-};
+static void mipspmu_enable(struct pmu *pmu)
+{
+       if (mipspmu)
+               mipspmu->start();
+}
+
+static void mipspmu_disable(struct pmu *pmu)
+{
+       if (mipspmu)
+               mipspmu->stop();
+}
 
 static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmu_reserve_mutex);
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void)
                perf_irq = save_perf_irq;
 }
 
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+       if (atomic_dec_and_mutex_lock(&active_events,
+                               &pmu_reserve_mutex)) {
+               /*
+                * We must not call the destroy function with interrupts
+                * disabled.
+                */
+               on_each_cpu(reset_counters,
+                       (void *)(long)mipspmu->num_counters, 1);
+               mipspmu_free_irq();
+               mutex_unlock(&pmu_reserve_mutex);
+       }
+}
+
+static int mipspmu_event_init(struct perf_event *event)
+{
+       int err = 0;
+
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
+       if (!mipspmu || event->cpu >= nr_cpumask_bits ||
+               (event->cpu >= 0 && !cpu_online(event->cpu)))
+               return -ENODEV;
+
+       if (!atomic_inc_not_zero(&active_events)) {
+               if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
+                       atomic_dec(&active_events);
+                       return -ENOSPC;
+               }
+
+               mutex_lock(&pmu_reserve_mutex);
+               if (atomic_read(&active_events) == 0)
+                       err = mipspmu_get_irq();
+
+               if (!err)
+                       atomic_inc(&active_events);
+               mutex_unlock(&pmu_reserve_mutex);
+       }
+
+       if (err)
+               return err;
+
+       err = __hw_perf_event_init(event);
+       if (err)
+               hw_perf_event_destroy(event);
+
+       return err;
+}
+
+static struct pmu pmu = {
+       .pmu_enable     = mipspmu_enable,
+       .pmu_disable    = mipspmu_disable,
+       .event_init     = mipspmu_event_init,
+       .add            = mipspmu_add,
+       .del            = mipspmu_del,
+       .start          = mipspmu_start,
+       .stop           = mipspmu_stop,
+       .read           = mipspmu_read,
+};
+
 static inline unsigned int
 mipspmu_perf_event_encode(const struct mips_perf_event *pev)
 {
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc,
 {
        struct hw_perf_event fake_hwc = event->hw;
 
-       if (event->pmu && event->pmu != &pmu)
-               return 0;
+       /* Allow mixed event group. So return 1 to pass validation. */
+       if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+               return 1;
 
        return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
 }
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event)
        return 0;
 }
 
-/*
- * mipsxx/rm9000/loongson2 have different performance counters, they have
- * specific low-level init routines.
- */
-static void reset_counters(void *arg);
-static int __hw_perf_event_init(struct perf_event *event);
-
-static void hw_perf_event_destroy(struct perf_event *event)
-{
-       if (atomic_dec_and_mutex_lock(&active_events,
-                               &pmu_reserve_mutex)) {
-               /*
-                * We must not call the destroy function with interrupts
-                * disabled.
-                */
-               on_each_cpu(reset_counters,
-                       (void *)(long)mipspmu->num_counters, 1);
-               mipspmu_free_irq();
-               mutex_unlock(&pmu_reserve_mutex);
-       }
-}
-
-const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
-       int err = 0;
-
-       if (!mipspmu || event->cpu >= nr_cpumask_bits ||
-               (event->cpu >= 0 && !cpu_online(event->cpu)))
-               return ERR_PTR(-ENODEV);
-
-       if (!atomic_inc_not_zero(&active_events)) {
-               if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
-                       atomic_dec(&active_events);
-                       return ERR_PTR(-ENOSPC);
-               }
-
-               mutex_lock(&pmu_reserve_mutex);
-               if (atomic_read(&active_events) == 0)
-                       err = mipspmu_get_irq();
-
-               if (!err)
-                       atomic_inc(&active_events);
-               mutex_unlock(&pmu_reserve_mutex);
-       }
-
-       if (err)
-               return ERR_PTR(err);
-
-       err = __hw_perf_event_init(event);
-       if (err)
-               hw_perf_event_destroy(event);
-
-       return err ? ERR_PTR(err) : &pmu;
-}
-
-void hw_perf_enable(void)
-{
-       if (mipspmu)
-               mipspmu->start();
-}
-
-void hw_perf_disable(void)
-{
-       if (mipspmu)
-               mipspmu->stop();
-}
-
 /* This is needed by specific irq handlers in perf_event_*.c */
 static void
 handle_associated_event(struct cpu_hw_events *cpuc,
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc,
 #include "perf_event_mipsxx.c"
 
 /* Callchain handling code. */
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
-               u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
 
 /*
  * Leave userspace callchain empty for now. When we find a way to trace
  * the user stack callchains, we add here.
  */
-static void
-perf_callchain_user(struct pt_regs *regs,
-                   struct perf_callchain_entry *entry)
+void perf_callchain_user(struct perf_callchain_entry *entry,
+                   struct pt_regs *regs)
 {
 }
 
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
        while (!kstack_end(sp)) {
                addr = *sp++;
                if (__kernel_text_address(addr)) {
-                       callchain_store(entry, addr);
+                       perf_callchain_store(entry, addr);
                        if (entry->nr >= PERF_MAX_STACK_DEPTH)
                                break;
                }
        }
 }
 
-static void
-perf_callchain_kernel(struct pt_regs *regs,
-                     struct perf_callchain_entry *entry)
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                     struct pt_regs *regs)
 {
        unsigned long sp = regs->regs[29];
 #ifdef CONFIG_KALLSYMS
        unsigned long ra = regs->regs[31];
        unsigned long pc = regs->cp0_epc;
 
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
        if (raw_show_trace || !__kernel_text_address(pc)) {
                unsigned long stack_page =
                        (unsigned long)task_stack_page(current);
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs,
                return;
        }
        do {
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
                if (entry->nr >= PERF_MAX_STACK_DEPTH)
                        break;
                pc = unwind_stack(current, &sp, pc, &ra);
        } while (pc);
 #else
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
        save_raw_perf_callchain(entry, sp);
 #endif
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs,
-                 struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (!current || !current->pid)
-               return;
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       if (!is_user) {
-               perf_callchain_kernel(regs, entry);
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-       if (regs)
-               perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
-       entry->nr = 0;
-       perf_do_callchain(regs, entry);
-       return entry;
-}
index 183e0d226669193700c72f405f1e4e43303612a4..d9a7db78ed62bd1b38bf9401eacdb3c7332ae6f2 100644 (file)
@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
         * interrupt, not NMI.
         */
        if (handled == IRQ_HANDLED)
-               perf_event_do_pending();
+               irq_work_run();
 
 #ifdef CONFIG_MIPS_MT_SMP
        read_unlock(&pmuint_rwlock);
@@ -1045,6 +1045,8 @@ init_hw_perf_events(void)
                        "CPU, irq %d%s\n", mipspmu->name, counters, irq,
                        irq < 0 ? " (share with timer interrupt)" : "");
 
+       perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
        return 0;
 }
 early_initcall(init_hw_perf_events);
index 5922342bca3991d4b7ab4a9d6f8483fb3e416779..dbbe0ce48d89a4957a349febda33d0934c97d9af 100644 (file)
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
 
 static int protected_restore_fp_context(struct sigcontext __user *sc)
 {
-       int err, tmp;
+       int err, tmp __maybe_unused;
        while (1) {
                lock_fpu_owner();
                own_fpu_inatomic(0);
index a0ed0e052b2e8f630c34900d5b6e5958f945bbcc..aae986613795d928fa1228331c1526f8ed9e2a56 100644 (file)
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
 
 static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
 {
-       int err, tmp;
+       int err, tmp __maybe_unused;
        while (1) {
                lock_fpu_owner();
                own_fpu_inatomic(0);
index 383aeb95cb49ee2a8e27f489826c66b6d0f6663f..32a2561010823253c24b9d9cffdc1801d380f9a9 100644 (file)
@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void)
  */
 static struct task_struct *cpu_idle_thread[NR_CPUS];
 
+struct create_idle {
+       struct work_struct work;
+       struct task_struct *idle;
+       struct completion done;
+       int cpu;
+};
+
+static void __cpuinit do_fork_idle(struct work_struct *work)
+{
+       struct create_idle *c_idle =
+               container_of(work, struct create_idle, work);
+
+       c_idle->idle = fork_idle(c_idle->cpu);
+       complete(&c_idle->done);
+}
+
 int __cpuinit __cpu_up(unsigned int cpu)
 {
        struct task_struct *idle;
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
         * Linux can schedule processes on this slave.
         */
        if (!cpu_idle_thread[cpu]) {
-               idle = fork_idle(cpu);
-               cpu_idle_thread[cpu] = idle;
+               /*
+                * Schedule work item to avoid forking user task
+                * Ported from arch/x86/kernel/smpboot.c
+                */
+               struct create_idle c_idle = {
+                       .cpu    = cpu,
+                       .done   = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+               };
+
+               INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
+               schedule_work(&c_idle.work);
+               wait_for_completion(&c_idle.done);
+               idle = cpu_idle_thread[cpu] = c_idle.idle;
 
                if (IS_ERR(idle))
                        panic(KERN_ERR "Fork failed for CPU %d", cpu);
index 1dc6edff45e08a604377862ec3d8980c6b333cf0..58beabf50b3c34a022ec49b9a3c3a245aa130666 100644 (file)
@@ -383,12 +383,11 @@ save_static_function(sys_sysmips);
 static int __used noinline
 _sys_sysmips(nabi_no_regargs struct pt_regs regs)
 {
-       long cmd, arg1, arg2, arg3;
+       long cmd, arg1, arg2;
 
        cmd = regs.regs[4];
        arg1 = regs.regs[5];
        arg2 = regs.regs[6];
-       arg3 = regs.regs[7];
 
        switch (cmd) {
        case MIPS_ATOMIC_SET:
@@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs)
                if (arg1 & 2)
                        set_thread_flag(TIF_LOGADE);
                else
-                       clear_thread_flag(TIF_FIXADE);
+                       clear_thread_flag(TIF_LOGADE);
 
                return 0;
 
index 6a1fdfef8fded5763a457ec026467b356717fc78..ab52b7cf3b6bcc007cfda63bce5c028be665bbfa 100644 (file)
@@ -148,9 +148,9 @@ struct {
        spinlock_t tc_list_lock;
        struct list_head tc_list;       /* Thread contexts */
 } vpecontrol = {
-       .vpe_list_lock  = SPIN_LOCK_UNLOCKED,
+       .vpe_list_lock  = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
        .vpe_list       = LIST_HEAD_INIT(vpecontrol.vpe_list),
-       .tc_list_lock   = SPIN_LOCK_UNLOCKED,
+       .tc_list_lock   = __SPIN_LOCK_UNLOCKED(tc_list_lock),
        .tc_list        = LIST_HEAD_INIT(vpecontrol.tc_list)
 };
 
index 6e1b77fec7ea6da1c6236b67dcce54db06a54342..aca93eed8779b7756f32d0fb69bf0b32b9227019 100644 (file)
@@ -1,6 +1,7 @@
+if MACH_LOONGSON
+
 choice
        prompt "Machine Type"
-       depends on MACH_LOONGSON
 
 config LEMOTE_FULOONG2E
        bool "Lemote Fuloong(2e) mini-PC"
@@ -87,3 +88,5 @@ config LOONGSON_UART_BASE
 config LOONGSON_MC146818
        bool
        default n
+
+endif # MACH_LOONGSON
index 1a06defc4f7f565fd45a0c33d8e24a17bdd22091..353e1d2e41a525660be6be28338de00875e98938 100644 (file)
@@ -44,10 +44,5 @@ void __init prom_init_cmdline(void)
                strcat(arcs_cmdline, " ");
        }
 
-       if ((strstr(arcs_cmdline, "console=")) == NULL)
-               strcat(arcs_cmdline, " console=ttyS0,115200");
-       if ((strstr(arcs_cmdline, "root=")) == NULL)
-               strcat(arcs_cmdline, " root=/dev/hda1");
-
        prom_init_machtype();
 }
index 81fbe6b73f91f71b896292cbf55b7b1c67990f80..2efd5d9dee27192f8a0da79698eb79b5c6e9828c 100644 (file)
@@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void)
 
 void __init prom_init_machtype(void)
 {
-       char *p, str[MACHTYPE_LEN];
+       char *p, str[MACHTYPE_LEN + 1];
        int machtype = MACH_LEMOTE_FL2E;
 
        mips_machtype = LOONGSON_MACHTYPE;
@@ -53,6 +53,7 @@ void __init prom_init_machtype(void)
        }
        p += strlen("machtype=");
        strncpy(str, p, MACHTYPE_LEN);
+       str[MACHTYPE_LEN] = '\0';
        p = strstr(str, " ");
        if (p)
                *p = '\0';
index 2701d9500959700451e48a0c37d5ce4d6551c96b..2a7d43f4f161c07b4a9f6d85cca363d545fe7d85 100644 (file)
@@ -70,7 +70,7 @@
 
 
 #define COMPXSP \
-  unsigned xm; int xe; int xs; int xc
+  unsigned xm; int xe; int xs __maybe_unused; int xc
 
 #define COMPYSP \
   unsigned ym; int ye; int ys; int yc
 
 
 #define COMPXDP \
-u64 xm; int xe; int xs; int xc
+u64 xm; int xe; int xs __maybe_unused; int xc
 
 #define COMPYDP \
 u64 ym; int ye; int ys; int yc
index 2efcbd24c82fcfa8d2741243101eb5df116e2d85..279599e9a779fa80fe4e237d353aa5fe916dedab 100644 (file)
@@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr)
 void __init paging_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES];
-       unsigned long lastpfn;
+       unsigned long lastpfn __maybe_unused;
 
        pagetable_init();
 
index 083d3412d0bccc7744ec151cd493de614d0375b8..04f9e17db9d0dfbadda95b9f9751c798667529a6 100644 (file)
@@ -109,6 +109,8 @@ static bool scratchpad_available(void)
 static int scratchpad_offset(int i)
 {
        BUG();
+       /* Really unreachable, but evidently some GCC want this. */
+       return 0;
 }
 #endif
 /*
index b7c03d80c88c80e1a54af4254d42522b95e9f76a..68798f869c0f7dda2236988ac517949c25c60bc8 100644 (file)
@@ -308,7 +308,7 @@ static struct resource pci_mem_resource = {
  *  RETURNS:     PCIBIOS_SUCCESSFUL  - success
  *
  ****************************************************************************/
-static int bpci_interrupt(int irq, void *dev_id)
+static irqreturn_t bpci_interrupt(int irq, void *dev_id)
 {
        struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
        unsigned int stat = preg->if_status;
@@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id)
        /* write to clear all asserted interrupts */
        preg->if_status = stat;
 
-       return PCIBIOS_SUCCESSFUL;
+       return IRQ_HANDLED;
 }
 
 /*****************************************************************************
index c139988bb85d242befe8038e217f8ad40d4dc708..8d798497c614fa0e79070f3be0a3981eb3e094d9 100644 (file)
@@ -4,15 +4,11 @@ choice
 
 config PMC_MSP4200_EVAL
        bool "PMC-Sierra MSP4200 Eval Board"
-       select CEVT_R4K
-       select CSRC_R4K
        select IRQ_MSP_SLP
        select HW_HAS_PCI
 
 config PMC_MSP4200_GW
        bool "PMC-Sierra MSP4200 VoIP Gateway"
-       select CEVT_R4K
-       select CSRC_R4K
        select IRQ_MSP_SLP
        select HW_HAS_PCI
 
index cca64e15f57f57d2efcaf59e270d111b9a30df8a..01df84ce31e209cd3d05e0e316b68ea78d74798e 100644 (file)
@@ -81,7 +81,7 @@ void __init plat_time_init(void)
        mips_hpt_frequency = cpu_rate/2;
 }
 
-unsigned int __init get_c0_compare_int(void)
+unsigned int __cpuinit get_c0_compare_int(void)
 {
        return MSP_INT_VPE0_TIMER;
 }
index 92d2f9298e3832155b14bf21d1eb964e015b87f5..9d773a639513abd3a35311a117a563912d52b236 100644 (file)
@@ -139,7 +139,7 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m,
  * Atomically reads the value of @v.  Note that the guaranteed
  * useful range of an atomic_t is only 24 bits.
  */
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (ACCESS_ONCE((v)->counter))
 
 /**
  * atomic_set - set atomic variable
index 679dee0bbd089dddabdbe230abf3874cebb174aa..3d6e60dad9d98a2b99a44029658638c48f300d50 100644 (file)
@@ -160,9 +160,10 @@ struct __large_struct { unsigned long buf[100]; };
 
 #define __get_user_check(x, ptr, size)                                 \
 ({                                                                     \
+       const __typeof__(ptr) __guc_ptr = (ptr);                        \
        int _e;                                                         \
-       if (likely(__access_ok((unsigned long) (ptr), (size))))         \
-               _e = __get_user_nocheck((x), (ptr), (size));            \
+       if (likely(__access_ok((unsigned long) __guc_ptr, (size))))     \
+               _e = __get_user_nocheck((x), __guc_ptr, (size));        \
        else {                                                          \
                _e = -EFAULT;                                           \
                (x) = (__typeof__(x))0;                                 \
index 75da468090b90342ef5ff2566389b6efa1b28487..5b955000626d9f79fc3abe126dcc117c0076bc1e 100644 (file)
@@ -104,8 +104,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
        unsigned tsc, elapse;
        irqreturn_t ret;
 
-       write_seqlock(&xtime_lock);
-
        while (tsc = get_cycles(),
               elapse = tsc - mn10300_last_tsc, /* time elapsed since last
                                                 * tick */
@@ -114,11 +112,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
                mn10300_last_tsc += MN10300_TSC_PER_HZ;
 
                /* advance the kernel's time tracking system */
-               do_timer(1);
+               xtime_update(1);
        }
 
-       write_sequnlock(&xtime_lock);
-
        ret = local_timer_interrupt();
 #ifdef CONFIG_SMP
        send_IPI_allbutself(LOCAL_TIMER_IPI);
index a8933a60b2d44956e4518931e6f7395839b784d2..a6b63dde603d50ebcd42e0da51cd7efbb983d6de 100644 (file)
@@ -69,7 +69,7 @@ static void flush_icache_page_range(unsigned long start, unsigned long end)
 
        /* invalidate the icache coverage on that region */
        mn10300_local_icache_inv_range2(addr + off, size);
-       smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+       smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
 }
 
 /**
@@ -101,7 +101,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
                 * directly */
                start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
                mn10300_icache_inv_range(start_page, end);
-               smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+               smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
                if (start_page == start)
                        goto done;
                end = start_page;
index 30394081d9b6d51ab7486c70054b4bea7beaec89..6ab9580b0b0091ae2fd888998754475bef7d52f5 100644 (file)
@@ -185,26 +185,21 @@ struct hpux_statfs {
      int16_t f_pad;
 };
 
-static int do_statfs_hpux(struct path *path, struct hpux_statfs *buf)
+static int do_statfs_hpux(struct kstatfs *st, struct hpux_statfs __user *p)
 {
-       struct kstatfs st;
-       int retval;
-       
-       retval = vfs_statfs(path, &st);
-       if (retval)
-               return retval;
-
-       memset(buf, 0, sizeof(*buf));
-       buf->f_type = st.f_type;
-       buf->f_bsize = st.f_bsize;
-       buf->f_blocks = st.f_blocks;
-       buf->f_bfree = st.f_bfree;
-       buf->f_bavail = st.f_bavail;
-       buf->f_files = st.f_files;
-       buf->f_ffree = st.f_ffree;
-       buf->f_fsid[0] = st.f_fsid.val[0];
-       buf->f_fsid[1] = st.f_fsid.val[1];
-
+       struct hpux_statfs buf;
+       memset(&buf, 0, sizeof(buf));
+       buf.f_type = st->f_type;
+       buf.f_bsize = st->f_bsize;
+       buf.f_blocks = st->f_blocks;
+       buf.f_bfree = st->f_bfree;
+       buf.f_bavail = st->f_bavail;
+       buf.f_files = st->f_files;
+       buf.f_ffree = st->f_ffree;
+       buf.f_fsid[0] = st->f_fsid.val[0];
+       buf.f_fsid[1] = st->f_fsid.val[1];
+       if (copy_to_user(p, &buf, sizeof(buf)))
+               return -EFAULT;
        return 0;
 }
 
@@ -212,35 +207,19 @@ static int do_statfs_hpux(struct path *path, struct hpux_statfs *buf)
 asmlinkage long hpux_statfs(const char __user *pathname,
                                                struct hpux_statfs __user *buf)
 {
-       struct path path;
-       int error;
-
-       error = user_path(pathname, &path);
-       if (!error) {
-               struct hpux_statfs tmp;
-               error = do_statfs_hpux(&path, &tmp);
-               if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
-                       error = -EFAULT;
-               path_put(&path);
-       }
+       struct kstatfs st;
+       int error = user_statfs(pathname, &st);
+       if (!error)
+               error = do_statfs_hpux(&st, buf);
        return error;
 }
 
 asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf)
 {
-       struct file *file;
-       struct hpux_statfs tmp;
-       int error;
-
-       error = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-       error = do_statfs_hpux(&file->f_path, &tmp);
-       if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
-               error = -EFAULT;
-       fput(file);
- out:
+       struct kstatfs st;
+       int error = fd_statfs(fd, &st);
+       if (!error)
+               error = do_statfs_hpux(&st, buf);
        return error;
 }
 
index 0c705c3a55efc01305dbea4e14eecf63c0f3f05e..67a33cc27ef2741c73c598ec6ddbcce26ce70d21 100644 (file)
@@ -8,7 +8,7 @@
 #include <asm/errno.h>
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
@@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 
 /* Non-atomic version */
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
-       int err = 0;
-       int uval;
+       u32 val;
 
        /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
         * our gateway page, and causes no end of trouble...
@@ -62,15 +62,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
        if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
                return -EFAULT;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
-       err = get_user(uval, uaddr);
-       if (err) return -EFAULT;
-       if (uval == oldval)
-               err = put_user(newval, uaddr);
-       if (err) return -EFAULT;
-       return uval;
+       if (get_user(val, uaddr))
+               return -EFAULT;
+       if (val == oldval && put_user(newval, uaddr))
+               return -EFAULT;
+       *uval = val;
+       return 0;
 }
 
 #endif /*__KERNEL__*/
index 05511ccb61d24a9bc8735f9897537e3bd520b825..45b7389d77aa5a18e1ce4adf883ff7c985b320cc 100644 (file)
@@ -162,11 +162,8 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
                update_process_times(user_mode(get_irq_regs()));
        }
 
-       if (cpu == 0) {
-               write_seqlock(&xtime_lock);
-               do_timer(ticks_elapsed);
-               write_sequnlock(&xtime_lock);
-       }
+       if (cpu == 0)
+               xtime_update(ticks_elapsed);
 
        return IRQ_HANDLED;
 }
index 7c589ef81fb0eec42eee5f17cfbfc25563bb66fa..c94e4a3fe2ef3de09decfd1aa47bda3b77342f53 100644 (file)
@@ -30,7 +30,7 @@
        : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
        : "cr0", "memory")
 
-static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
@@ -82,35 +82,38 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
-       int prev;
+       int ret = 0;
+       u32 prev;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
         __asm__ __volatile__ (
         PPC_RELEASE_BARRIER
-"1:     lwarx   %0,0,%2         # futex_atomic_cmpxchg_inatomic\n\
-        cmpw    0,%0,%3\n\
+"1:     lwarx   %1,0,%3         # futex_atomic_cmpxchg_inatomic\n\
+        cmpw    0,%1,%4\n\
         bne-    3f\n"
-        PPC405_ERR77(0,%2)
-"2:     stwcx.  %4,0,%2\n\
+        PPC405_ERR77(0,%3)
+"2:     stwcx.  %5,0,%3\n\
         bne-    1b\n"
         PPC_ACQUIRE_BARRIER
 "3:    .section .fixup,\"ax\"\n\
-4:     li      %0,%5\n\
+4:     li      %0,%6\n\
        b       3b\n\
        .previous\n\
        .section __ex_table,\"a\"\n\
        .align 3\n\
        " PPC_LONG "1b,4b,2b,4b\n\
        .previous" \
-        : "=&r" (prev), "+m" (*uaddr)
+        : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
         : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
         : "cc", "memory");
 
-        return prev;
+       *uval = prev;
+        return ret;
 }
 
 #endif /* __KERNEL__ */
index 380d48bacd16d0047cd1cb5006853ff4adf9506c..26b8c807f8f12d5ee5e3b06f7f2ec0a1523d559c 100644 (file)
 //
 //----------------------------------------------------------------------------
 #include <linux/cache.h>
+#include <linux/threads.h>
 #include <asm/types.h>
 #include <asm/mmu.h>
 
+/*
+ * We only have to have statically allocated lppaca structs on
+ * legacy iSeries, which supports at most 64 cpus.
+ */
+#ifdef CONFIG_PPC_ISERIES
+#if NR_CPUS < 64
+#define NR_LPPACAS     NR_CPUS
+#else
+#define NR_LPPACAS     64
+#endif
+#else /* not iSeries */
+#define NR_LPPACAS     1
+#endif
+
+
 /* The Hypervisor barfs if the lppaca crosses a page boundary.  A 1k
  * alignment is sufficient to prevent this */
 struct lppaca {
index 991d5998d6be6711a68e15ab8347b110ade13dd3..fe56a23e1ff0c1b79d3d66f2c2cf89b08b5eb9d0 100644 (file)
@@ -240,6 +240,12 @@ struct machdep_calls {
         * claims to support kexec.
         */
        int (*machine_kexec_prepare)(struct kimage *image);
+
+       /* Called to perform the _real_ kexec.
+        * Do NOT allocate memory or fail here. We are past the point of
+        * no return.
+        */
+       void (*machine_kexec)(struct kimage *image);
 #endif /* CONFIG_KEXEC */
 
 #ifdef CONFIG_SUSPEND
index 8447d89fbe72639a6a0ce2db68694554bd7c61e8..bb1e2cdeb9bff0ec6322680196c11e5e676b2ca6 100644 (file)
  * by Paul Mackerras <paulus@samba.org>.
  */
 
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-
 /*
  * the semaphore definition
  */
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
-struct rw_semaphore {
-       long                    count;
-       spinlock_t              wait_lock;
-       struct list_head        wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       struct lockdep_map      dep_map;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name)                              \
-{                                                              \
-       RWSEM_UNLOCKED_VALUE,                                   \
-       __SPIN_LOCK_UNLOCKED((name).wait_lock),                 \
-       LIST_HEAD_INIT((name).wait_list)                        \
-       __RWSEM_DEP_MAP_INIT(name)                              \
-}
-
-#define DECLARE_RWSEM(name)            \
-       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
-                        struct lock_class_key *key);
-
-#define init_rwsem(sem)                                        \
-       do {                                            \
-               static struct lock_class_key __key;     \
-                                                       \
-               __init_rwsem((sem), #sem, &__key);      \
-       } while (0)
-
 /*
  * lock for reading
  */
@@ -174,10 +128,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
        return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-       return sem->count != 0;
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_RWSEM_H */
index 49a170af81456ce722fa58cd70e5980370f5f70b..a5f8672eeff37d593c219ac7ac68a6296587b7f4 100644 (file)
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
 
        save_ftrace_enabled = __ftrace_enabled_save();
 
-       default_machine_kexec(image);
+       if (ppc_md.machine_kexec)
+               ppc_md.machine_kexec(image);
+       else
+               default_machine_kexec(image);
 
        __ftrace_enabled_restore(save_ftrace_enabled);
 
index ebf9846f3c3b30b7cf2d61d48a8a96c37b702c1c..f4adf89d7614150a73c44ad8251ae13ad7934b16 100644 (file)
@@ -26,20 +26,6 @@ extern unsigned long __toc_start;
 
 #ifdef CONFIG_PPC_BOOK3S
 
-/*
- * We only have to have statically allocated lppaca structs on
- * legacy iSeries, which supports at most 64 cpus.
- */
-#ifdef CONFIG_PPC_ISERIES
-#if NR_CPUS < 64
-#define NR_LPPACAS     NR_CPUS
-#else
-#define NR_LPPACAS     64
-#endif
-#else /* not iSeries */
-#define NR_LPPACAS     1
-#endif
-
 /*
  * The structure which the hypervisor knows about - this structure
  * should not cross a page boundary.  The vpa_init/register_vpa call
index 7a1d5cb76932312f7e15609fe5f84c077696eda3..8303a6c65ef7e85f230bac363bb722c49e460211 100644 (file)
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
                        prime_debug_regs(new_thread);
 }
 #else  /* !CONFIG_PPC_ADV_DEBUG_REGS */
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
 static void set_debug_reg_defaults(struct thread_struct *thread)
 {
        if (thread->dabr) {
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
                set_dabr(0);
        }
 }
+#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
 
 int set_dabr(unsigned long dabr)
@@ -670,11 +672,11 @@ void flush_thread(void)
 {
        discard_lazy_cpu_state();
 
-#ifdef CONFIG_HAVE_HW_BREAKPOINTS
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
        flush_ptrace_hw_breakpoint(current);
-#else /* CONFIG_HAVE_HW_BREAKPOINTS */
+#else /* CONFIG_HAVE_HW_BREAKPOINT */
        set_debug_reg_defaults(&current->thread);
-#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
 }
 
 void
index fd4812329570d1658640c22776755dda9c1170fe..0dc95c0aa3beb01f9f46b2ebecaa551baa844e6f 100644 (file)
@@ -1516,7 +1516,8 @@ int start_topology_update(void)
 {
        int rc = 0;
 
-       if (firmware_has_feature(FW_FEATURE_VPHN) &&
+       /* Disabled until races with load balancing are fixed */
+       if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
            get_lppaca()->shared_proc) {
                vphn_enabled = 1;
                setup_cpu_associativity_change_counters();
index 1ec06576f619bc8e3e73fecc9d04cabeb7263240..c14d09f614f362ef67e04744bc5db8f62b8947cc 100644 (file)
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  * neesd to be flushed. This function will either perform the flush
  * immediately or will batch it up if the current CPU has an active
  * batch on it.
- *
- * Must be called from within some kind of spinlock/non-preempt region...
  */
 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, unsigned long pte, int huge)
 {
-       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
        unsigned long vsid, vaddr;
        unsigned int psize;
        int ssize;
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
         */
        if (!batch->active) {
                flush_hash_page(vaddr, rpte, psize, ssize, 0);
+               put_cpu_var(ppc64_tlb_batch);
                return;
        }
 
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
        batch->index = ++i;
        if (i >= PPC64_TLB_BATCH_NR)
                __flush_tlb_pending(batch);
+       put_cpu_var(ppc64_tlb_batch);
 }
 
 /*
index 187a7d32f86a25ea2ff19e1fab99f7c551d0d59e..a3d2ce54ea2eb0f1f22ba3cb7d9973eecaf06453 100644 (file)
@@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
        if (!IS_ERR(tmp)) {
                struct nameidata nd;
 
-               ret = path_lookup(tmp, LOOKUP_PARENT, &nd);
+               ret = kern_path_parent(tmp, &nd);
                if (!ret) {
                        nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE;
                        ret = spufs_create(&nd, flags, mode, neighbor);
index fdb7384c0c4f3fe372207d4ad8bdc48a364a97c0..f0491cc2890004167a58ebefcdf24a37dd87534b 100644 (file)
@@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
        pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA  */
        pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
 
-       for (i = 0; i < NR_CPUS; i++) {
-               if (lppaca_of(i).dyn_proc_status >= 2)
+       for (i = 0; i < NR_LPPACAS; i++) {
+               if (lppaca[i].dyn_proc_status >= 2)
                        continue;
 
                snprintf(p, 32 - (p - buf), "@%d", i);
@@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
 
                dt_prop_str(dt, "device_type", device_type_cpu);
 
-               index = lppaca_of(i).dyn_hv_phys_proc_index;
+               index = lppaca[i].dyn_hv_phys_proc_index;
                d = &xIoHriProcessorVpd[index];
 
                dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
index b0863410517f517129d111c4891cec990736abb4..2946ae10fbfdba2ea2eb2cc1e730eedf1cc4ad8b 100644 (file)
@@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void)
         * on but calling this function multiple times is fine.
         */
        identify_cpu(0, mfspr(SPRN_PVR));
+       initialise_paca(&boot_paca, 0);
 
        powerpc_firmware_features |= FW_FEATURE_ISERIES;
        powerpc_firmware_features |= FW_FEATURE_LPAR;
index 5c5d02de49e9ae53fe5728fef181f1657fd4c3dc..81cf36b691f1dfd42c2ed4f5a48f6bc42a0a7a0e 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/uaccess.h>
 #include <asm/errno.h>
 
-static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
@@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        return ret;
 }
 
-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr,
-                                               int oldval, int newval)
+static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                                               u32 oldval, u32 newval)
 {
-       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
-       return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval);
+       return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
 }
 
 #endif /* __KERNEL__ */
index 423fdda2322dd6ee3f51add1cea21ce96f0b63f0..d0eb4653cebdb0d7bf0eab014cfb142904ad89b7 100644 (file)
 
 #ifdef __KERNEL__
 
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
-struct rwsem_waiter;
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
-       signed long             count;
-       spinlock_t              wait_lock;
-       struct list_head        wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       struct lockdep_map      dep_map;
-#endif
-};
-
 #ifndef __s390x__
 #define RWSEM_UNLOCKED_VALUE   0x00000000
 #define RWSEM_ACTIVE_BIAS      0x00000001
@@ -80,41 +57,6 @@ struct rw_semaphore {
 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS        (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
-/*
- * initialisation
- */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
-   LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(name) \
-       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
-       sem->count = RWSEM_UNLOCKED_VALUE;
-       spin_lock_init(&sem->wait_lock);
-       INIT_LIST_HEAD(&sem->wait_list);
-}
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
-                        struct lock_class_key *key);
-
-#define init_rwsem(sem)                                \
-do {                                           \
-       static struct lock_class_key __key;     \
-                                               \
-       __init_rwsem((sem), #sem, &__key);      \
-} while (0)
-
-
 /*
  * lock for reading
  */
@@ -377,10 +319,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
        return new;
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-       return (sem->count != 0);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _S390_RWSEM_H */
index d6b1ed0ec52b3b8bb5a27537a2fa378cfddce34b..2d9ea11f919ad2a1565091d3b861da2d2da09de5 100644 (file)
@@ -83,8 +83,8 @@ struct uaccess_ops {
        size_t (*clear_user)(size_t, void __user *);
        size_t (*strnlen_user)(size_t, const char __user *);
        size_t (*strncpy_from_user)(size_t, const char __user *, char *);
-       int (*futex_atomic_op)(int op, int __user *, int oparg, int *old);
-       int (*futex_atomic_cmpxchg)(int __user *, int old, int new);
+       int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
+       int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
 };
 
 extern struct uaccess_ops uaccess;
index 126011df14f1e7ad1a582e90de001177330c66d8..1d2536cb630bc5a99ce5f88a3cba871cc096b519 100644 (file)
@@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *);
 extern size_t copy_to_user_std(size_t, void __user *, const void *);
 extern size_t strnlen_user_std(size_t, const char __user *);
 extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
-extern int futex_atomic_cmpxchg_std(int __user *, int, int);
-extern int futex_atomic_op_std(int, int __user *, int, int *);
+extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32);
+extern int futex_atomic_op_std(int, u32 __user *, int, int *);
 
 extern size_t copy_from_user_pt(size_t, const void __user *, void *);
 extern size_t copy_to_user_pt(size_t, void __user *, const void *);
-extern int futex_atomic_op_pt(int, int __user *, int, int *);
-extern int futex_atomic_cmpxchg_pt(int __user *, int, int);
+extern int futex_atomic_op_pt(int, u32 __user *, int, int *);
+extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32);
 
 #endif /* __ARCH_S390_LIB_UACCESS_H */
index 404f2de296dcad3c6017488363dc183986ad0bf1..74833831417fcb3585831e52a000b5fded8d8217 100644 (file)
@@ -302,7 +302,7 @@ fault:
                     : "0" (-EFAULT), "d" (oparg), "a" (uaddr),         \
                       "m" (*uaddr) : "cc" );
 
-static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
+static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
 {
        int oldval = 0, newval, ret;
 
@@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
        return ret;
 }
 
-int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
+int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
 {
        int ret;
 
@@ -354,26 +354,29 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
        return ret;
 }
 
-static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
+static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
+                                    u32 oldval, u32 newval)
 {
        int ret;
 
        asm volatile("0: cs   %1,%4,0(%5)\n"
-                    "1: lr   %0,%1\n"
+                    "1: la   %0,0\n"
                     "2:\n"
                     EX_TABLE(0b,2b) EX_TABLE(1b,2b)
                     : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
                     : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
                     : "cc", "memory" );
+       *uval = oldval;
        return ret;
 }
 
-int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
+int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
+                           u32 oldval, u32 newval)
 {
        int ret;
 
        if (segment_eq(get_fs(), KERNEL_DS))
-               return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
+               return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
        spin_lock(&current->mm->page_table_lock);
        uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
        if (!uaddr) {
@@ -382,7 +385,7 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
        }
        get_page(virt_to_page(uaddr));
        spin_unlock(&current->mm->page_table_lock);
-       ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
+       ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
        put_page(virt_to_page(uaddr));
        return ret;
 }
index a6c4f7ed24a493d1e0ea5fde43b19dc5b0fe37f2..bb1a7eed42ce4cbef8350dca7a8eb85269fb1966 100644 (file)
@@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
                : "0" (-EFAULT), "d" (oparg), "a" (uaddr),              \
                  "m" (*uaddr) : "cc");
 
-int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
+int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
 {
        int oldval = 0, newval, ret;
 
@@ -287,19 +287,21 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
        return ret;
 }
 
-int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
+int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
+                            u32 oldval, u32 newval)
 {
        int ret;
 
        asm volatile(
                "   sacf 256\n"
                "0: cs   %1,%4,0(%5)\n"
-               "1: lr   %0,%1\n"
+               "1: la   %0,0\n"
                "2: sacf 0\n"
                EX_TABLE(0b,2b) EX_TABLE(1b,2b)
                : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
                : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
                : "cc", "memory" );
+       *uval = oldval;
        return ret;
 }
 
index a9f16a7f9aeaf5bf1a1482c8e38355b1c35c6f2d..6cb9f193a95ea5cc7c7b3471a92758206a394527 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <asm/system.h>
 
-static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr,
+static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
                                           int *oldval)
 {
        unsigned long flags;
@@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr,
        return ret;
 }
 
-static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr,
+static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr,
                                           int *oldval)
 {
        unsigned long flags;
@@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr,
        return ret;
 }
 
-static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr,
+static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr,
                                          int *oldval)
 {
        unsigned long flags;
@@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr,
        return ret;
 }
 
-static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr,
+static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr,
                                           int *oldval)
 {
        unsigned long flags;
@@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr,
        return ret;
 }
 
-static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr,
+static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr,
                                           int *oldval)
 {
        unsigned long flags;
@@ -88,11 +88,13 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr,
        return ret;
 }
 
-static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr,
-                                                  int oldval, int newval)
+static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
+                                                  u32 __user *uaddr,
+                                                  u32 oldval, u32 newval)
 {
        unsigned long flags;
-       int ret, prev = 0;
+       int ret;
+       u32 prev = 0;
 
        local_irq_save(flags);
 
@@ -102,10 +104,8 @@ static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr,
 
        local_irq_restore(flags);
 
-       if (ret)
-               return ret;
-
-       return prev;
+       *uval = prev;
+       return ret;
 }
 
 #endif /* __ASM_SH_FUTEX_IRQ_H */
index 68256ec5fa35b3e2c46ead9e2566810583351dcd..7be39a646fbd0e42ffa14ca0be8387bdebde1cbb 100644 (file)
@@ -10,7 +10,7 @@
 /* XXX: UP variants, fix for SH-4A and SMP.. */
 #include <asm/futex-irq.h>
 
-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
@@ -65,12 +65,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
-       return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval);
+       return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
 }
 
 #endif /* __KERNEL__ */
index 06e2251a5e483ebfd57594f55a02cbe69bfd1b59..edab57265293936cc3b6ecaed0cfc8fa609eb983 100644 (file)
 #endif
 
 #ifdef __KERNEL__
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
 
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
-       long            count;
 #define RWSEM_UNLOCKED_VALUE           0x00000000
 #define RWSEM_ACTIVE_BIAS              0x00000001
 #define RWSEM_ACTIVE_MASK              0x0000ffff
 #define RWSEM_WAITING_BIAS             (-0x00010000)
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-       spinlock_t              wait_lock;
-       struct list_head        wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       struct lockdep_map      dep_map;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
-       { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
-         LIST_HEAD_INIT((name).wait_list) \
-         __RWSEM_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(name)            \
-       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
-                        struct lock_class_key *key);
-
-#define init_rwsem(sem)                                \
-do {                                           \
-       static struct lock_class_key __key;     \
-                                               \
-       __init_rwsem((sem), #sem, &__key);      \
-} while (0)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
-       sem->count = RWSEM_UNLOCKED_VALUE;
-       spin_lock_init(&sem->wait_lock);
-       INIT_LIST_HEAD(&sem->wait_list);
-}
 
 /*
  * lock for reading
@@ -179,10 +128,5 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
        return atomic_add_return(delta, (atomic_t *)(&sem->count));
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-       return (sem->count != 0);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_SH_RWSEM_H */
index a78701da775b9d6dd2e246c80983562095bf6e54..4a5350037c8f59217a06e6e6ea1c429594d51073 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <asm-generic/sections.h>
 
-extern void __nosave_begin, __nosave_end;
+extern long __nosave_begin, __nosave_end;
 extern long __machvec_start, __machvec_end;
 extern char __uncached_start, __uncached_end;
 extern char _ebss[];
index 672944f5b19c63031f128232345360869ff6a558..e53b4b38bd11f954fd3d23b9e72e74d1540ea78e 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/io.h>
 #include <linux/sh_timer.h>
 #include <linux/serial_sci.h>
-#include <asm/machtypes.h>
+#include <generated/machtypes.h>
 
 static struct resource rtc_resources[] = {
        [0] = {
@@ -255,12 +255,17 @@ static struct platform_device *sh7750_early_devices[] __initdata = {
 
 void __init plat_early_device_setup(void)
 {
+       struct platform_device *dev[1];
+
        if (mach_is_rts7751r2d()) {
                scif_platform_data.scscr |= SCSCR_CKE1;
-               early_platform_add_devices(&scif_device, 1);
+               dev[0] = &scif_device;
+               early_platform_add_devices(dev, 1);
        } else {
-               early_platform_add_devices(&sci_device, 1);
-               early_platform_add_devices(&scif_device, 1);
+               dev[0] = &sci_device;
+               early_platform_add_devices(dev, 1);
+               dev[0] = &scif_device;
+               early_platform_add_devices(dev, 1);
        }
 
        early_platform_add_devices(sh7750_early_devices,
index faa8f86c0db490718d5dbb2c6948f11d3fff28d1..0901b2f14e15e9b2b396e923641de5c2a28eb90d 100644 (file)
 void __delay(unsigned long loops)
 {
        __asm__ __volatile__(
+               /*
+                * ST40-300 appears to have an issue with this code,
+                * normally taking two cycles each loop, as with all
+                * other SH variants. If however the branch and the
+                * delay slot straddle an 8 byte boundary, this increases
+                * to 3 cycles.
+                * This align directive ensures this doesn't occur.
+                */
+               ".balign 8\n\t"
+
                "tst    %0, %0\n\t"
                "1:\t"
                "bf/s   1b\n\t"
index 88d3dc3d30d50aabcbdfcff04ac197cf19470bb7..5a580ea04429801eef9bda7d3301c8311492989e 100644 (file)
@@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from,
                kunmap_atomic(vfrom, KM_USER0);
        }
 
-       if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+       if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
+           (vma->vm_flags & VM_EXEC))
                __flush_purge_region(vto, PAGE_SIZE);
 
        kunmap_atomic(vto, KM_USER1);
index 47f95839dc6956e190cd1175cb50b74491dcab00..444e7bea23bcbe81ac5005177f13e12e27192caf 100644 (file)
@@ -30,7 +30,7 @@
        : "r" (uaddr), "r" (oparg), "i" (-EFAULT)       \
        : "memory")
 
-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        int cmparg = (encoded_op << 20) >> 20;
        int oldval = 0, ret, tem;
 
-       if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int))))
+       if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
                return -EFAULT;
        if (unlikely((((unsigned long) uaddr) & 0x3UL)))
                return -EINVAL;
@@ -85,26 +85,30 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
+       int ret = 0;
+
        __asm__ __volatile__(
-       "\n1:   casa    [%3] %%asi, %2, %0\n"
+       "\n1:   casa    [%4] %%asi, %3, %1\n"
        "2:\n"
        "       .section .fixup,#alloc,#execinstr\n"
        "       .align  4\n"
        "3:     sethi   %%hi(2b), %0\n"
        "       jmpl    %0 + %%lo(2b), %%g0\n"
-       "        mov    %4, %0\n"
+       "       mov     %5, %0\n"
        "       .previous\n"
        "       .section __ex_table,\"a\"\n"
        "       .align  4\n"
        "       .word   1b, 3b\n"
        "       .previous\n"
-       : "=r" (newval)
-       : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
+       : "+r" (ret), "=r" (newval)
+       : "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
        : "memory");
 
-       return newval;
+       *uval = newval;
+       return ret;
 }
 
 #endif /* !(_SPARC64_FUTEX_H) */
index a2b4302869bcfb8ffe1d54ee3282293a18aeae83..069bf4d663a119f90aedeb484a3603e2a8b6f2b7 100644 (file)
 
 #ifdef __KERNEL__
 
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
-struct rwsem_waiter;
-
-struct rw_semaphore {
-       signed long                     count;
 #define RWSEM_UNLOCKED_VALUE           0x00000000L
 #define RWSEM_ACTIVE_BIAS              0x00000001L
 #define RWSEM_ACTIVE_MASK              0xffffffffL
 #define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-       spinlock_t                      wait_lock;
-       struct list_head                wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       struct lockdep_map              dep_map;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
-  LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(name) \
-       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
-                        struct lock_class_key *key);
-
-#define init_rwsem(sem)                                                \
-do {                                                           \
-       static struct lock_class_key __key;                     \
-                                                               \
-       __init_rwsem((sem), #sem, &__key);                      \
-} while (0)
 
 /*
  * lock for reading
@@ -160,11 +119,6 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
        return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-       return (sem->count != 0);
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _SPARC64_RWSEM_H */
index aeaa09a3c655606fa01bdb90be3491983b6ecf98..2cdc131b50acd04040762e9c814a1035d95087f1 100644 (file)
@@ -700,10 +700,8 @@ static void pcic_clear_clock_irq(void)
 
 static irqreturn_t pcic_timer_handler (int irq, void *h)
 {
-       write_seqlock(&xtime_lock);     /* Dummy, to show that we remember */
        pcic_clear_clock_irq();
-       do_timer(1);
-       write_sequnlock(&xtime_lock);
+       xtime_update(1);
 #ifndef CONFIG_SMP
        update_process_times(user_mode(get_irq_regs()));
 #endif
index 9c743b1886fff4314caf7487065aa8852f8db1e7..4211bfc9bcadc0249d1ba2f83a0e2837bd1ac243 100644 (file)
@@ -85,7 +85,7 @@ int update_persistent_clock(struct timespec now)
 
 /*
  * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
  */
 
 #define TICK_SIZE (tick_nsec / 1000)
@@ -96,14 +96,9 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
        profile_tick(CPU_PROFILING);
 #endif
 
-       /* Protect counter clear so that do_gettimeoffset works */
-       write_seqlock(&xtime_lock);
-
        clear_clock_irq();
 
-       do_timer(1);
-
-       write_sequnlock(&xtime_lock);
+       xtime_update(1);
 
 #ifndef CONFIG_SMP
        update_process_times(user_mode(get_irq_regs()));
index cbddeb38ffdab2bf5f25562190dd833bf2f9ded4..d3c7a12ad879a4ed7dba7202417f7dfa1644a64a 100644 (file)
@@ -16,7 +16,7 @@
 #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
 
 spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
-       [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
+       [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
 };
 
 #else /* SMP */
index fe0d10dcae57a140d6fa083b8a815b65d89d965b..d03ec124a598bc4b8d287ee9ed028289f07d3cc9 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/errno.h>
 
-extern struct __get_user futex_set(int __user *v, int i);
-extern struct __get_user futex_add(int __user *v, int n);
-extern struct __get_user futex_or(int __user *v, int n);
-extern struct __get_user futex_andn(int __user *v, int n);
-extern struct __get_user futex_cmpxchg(int __user *v, int o, int n);
+extern struct __get_user futex_set(u32 __user *v, int i);
+extern struct __get_user futex_add(u32 __user *v, int n);
+extern struct __get_user futex_or(u32 __user *v, int n);
+extern struct __get_user futex_andn(u32 __user *v, int n);
+extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
 
 #ifndef __tilegx__
-extern struct __get_user futex_xor(int __user *v, int n);
+extern struct __get_user futex_xor(u32 __user *v, int n);
 #else
-static inline struct __get_user futex_xor(int __user *uaddr, int n)
+static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
 {
        struct __get_user asm_ret = __get_user_4(uaddr);
        if (!asm_ret.err) {
@@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n)
 }
 #endif
 
-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
@@ -119,16 +119,17 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        return ret;
 }
 
-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
-                                               int newval)
+static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                                               u32 oldval, u32 newval)
 {
        struct __get_user asm_ret;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        asm_ret = futex_cmpxchg(uaddr, oldval, newval);
-       return asm_ret.err ? asm_ret.err : asm_ret.val;
+       *uval = asm_ret.val;
+       return asm_ret.err;
 }
 
 #ifndef __tilegx__
index e351e14b433909e6b83e36bba70d86bf04918c3b..1e78940218c0f82767c16c72080d3d36dcd97c0f 100644 (file)
@@ -7,6 +7,7 @@ config UML
        bool
        default y
        select HAVE_GENERIC_HARDIRQS
+       select GENERIC_HARDIRQS_NO_DEPRECATED
 
 config MMU
        bool
index 5ee328099c6359ae721f919ba554ba53e7052551..02fb017fed472ed96d66aa1a4252b17b2f5bd677 100644 (file)
@@ -10,6 +10,8 @@ endmenu
 
 config UML_X86
        def_bool y
+       select GENERIC_FIND_FIRST_BIT
+       select GENERIC_FIND_NEXT_BIT
 
 config 64BIT
        bool
@@ -19,6 +21,9 @@ config X86_32
        def_bool !64BIT
        select HAVE_AOUT
 
+config X86_64
+       def_bool 64BIT
+
 config RWSEM_XCHGADD_ALGORITHM
        def_bool X86_XADD
 
index 975613b23dcfb9cd0b60d46afa05260e733d90c2..c70e047eed72e192cd9d0a8ade7cc6f8302a9d6c 100644 (file)
@@ -124,35 +124,18 @@ void mconsole_log(struct mc_request *req)
 #if 0
 void mconsole_proc(struct mc_request *req)
 {
-       struct nameidata nd;
        struct vfsmount *mnt = current->nsproxy->pid_ns->proc_mnt;
        struct file *file;
-       int n, err;
+       int n;
        char *ptr = req->request.data, *buf;
        mm_segment_t old_fs = get_fs();
 
        ptr += strlen("proc");
        ptr = skip_spaces(ptr);
 
-       err = vfs_path_lookup(mnt->mnt_root, mnt, ptr, LOOKUP_FOLLOW, &nd);
-       if (err) {
-               mconsole_reply(req, "Failed to look up file", 1, 0);
-               goto out;
-       }
-
-       err = may_open(&nd.path, MAY_READ, O_RDONLY);
-       if (result) {
-               mconsole_reply(req, "Failed to open file", 1, 0);
-               path_put(&nd.path);
-               goto out;
-       }
-
-       file = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY,
-                          current_cred());
-       err = PTR_ERR(file);
+       file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
        if (IS_ERR(file)) {
                mconsole_reply(req, "Failed to open file", 1, 0);
-               path_put(&nd.path);
                goto out;
        }
 
index ba4a98ba39c0322989f32deaf36f7df990cd61f9..620f5b70957d55c4001a31fc9b9682a2e4feb687 100644 (file)
@@ -185,7 +185,7 @@ struct ubd {
        .no_cow =               0, \
        .shared =               0, \
        .cow =                  DEFAULT_COW, \
-       .lock =                 SPIN_LOCK_UNLOCKED,     \
+       .lock =                 __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
        .request =              NULL, \
        .start_sg =             0, \
        .end_sg =               0, \
index 3f0ac9e0c96639b1d1a346be1686d3f6374e5479..64cfea80cfe2345937e0070779444b06accbfec7 100644 (file)
@@ -35,8 +35,10 @@ int show_interrupts(struct seq_file *p, void *v)
        }
 
        if (i < NR_IRQS) {
-               raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
-               action = irq_desc[i].action;
+               struct irq_desc *desc = irq_to_desc(i);
+
+               raw_spin_lock_irqsave(&desc->lock, flags);
+               action = desc->action;
                if (!action)
                        goto skip;
                seq_printf(p, "%3d: ",i);
@@ -46,7 +48,7 @@ int show_interrupts(struct seq_file *p, void *v)
                for_each_online_cpu(j)
                        seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-               seq_printf(p, " %14s", irq_desc[i].chip->name);
+               seq_printf(p, " %14s", get_irq_desc_chip(desc)->name);
                seq_printf(p, "  %s", action->name);
 
                for (action=action->next; action; action = action->next)
@@ -54,7 +56,7 @@ int show_interrupts(struct seq_file *p, void *v)
 
                seq_putc(p, '\n');
 skip:
-               raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+               raw_spin_unlock_irqrestore(&desc->lock, flags);
        } else if (i == NR_IRQS)
                seq_putc(p, '\n');
 
@@ -360,10 +362,10 @@ EXPORT_SYMBOL(um_request_irq);
 EXPORT_SYMBOL(reactivate_fd);
 
 /*
- * irq_chip must define (startup || enable) &&
- * (shutdown || disable) && end
+ * irq_chip must define at least enable/disable and ack when
+ * the edge handler is used.
  */
-static void dummy(unsigned int irq)
+static void dummy(struct irq_data *d)
 {
 }
 
@@ -371,20 +373,17 @@ static void dummy(unsigned int irq)
 static struct irq_chip normal_irq_type = {
        .name = "SIGIO",
        .release = free_irq_by_irq_and_dev,
-       .disable = dummy,
-       .enable = dummy,
-       .ack = dummy,
-       .end = dummy
+       .irq_disable = dummy,
+       .irq_enable = dummy,
+       .irq_ack = dummy,
 };
 
 static struct irq_chip SIGVTALRM_irq_type = {
        .name = "SIGVTALRM",
        .release = free_irq_by_irq_and_dev,
-       .shutdown = dummy, /* never called */
-       .disable = dummy,
-       .enable = dummy,
-       .ack = dummy,
-       .end = dummy
+       .irq_disable = dummy,
+       .irq_enable = dummy,
+       .irq_ack = dummy,
 };
 
 void __init init_IRQ(void)
index b4c2e9c676232e85b6664462cfc90a22cffa6454..f8958b01b97549bc4ab9f311025a6c73aeddadcb 100644 (file)
@@ -64,8 +64,12 @@ config X86
        select HAVE_TEXT_POKE_SMP
        select HAVE_GENERIC_HARDIRQS
        select HAVE_SPARSE_IRQ
+       select GENERIC_FIND_FIRST_BIT
+       select GENERIC_FIND_NEXT_BIT
        select GENERIC_IRQ_PROBE
        select GENERIC_PENDING_IRQ if SMP
+       select GENERIC_IRQ_SHOW
+       select IRQ_FORCED_THREADING
        select USE_GENERIC_SMP_HELPERS if SMP
 
 config INSTRUCTION_DECODER
@@ -1707,7 +1711,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
        depends on NUMA
 
 config USE_PERCPU_NUMA_NODE_ID
-       def_bool X86_64
+       def_bool y
        depends on NUMA
 
 menu "Power management and ACPI options"
index 283c5a6a03a6536d5b171830d0474ecc9849aff0..ed47e6e1747f1af78549296e5fa85c862bcb1e7e 100644 (file)
@@ -294,11 +294,6 @@ config X86_GENERIC
 
 endif
 
-config X86_CPU
-       def_bool y
-       select GENERIC_FIND_FIRST_BIT
-       select GENERIC_FIND_NEXT_BIT
-
 #
 # Define implied options from the CPU selection here
 config X86_INTERNODE_CACHE_SHIFT
index 646aa78ba5fdb2fe89e54d52b88a2ce5f7c6e20f..46a82388243785a4c98dbb4641765549dc47d12b 100644 (file)
@@ -62,7 +62,12 @@ int main(int argc, char *argv[])
        if (fseek(f, -4L, SEEK_END)) {
                perror(argv[1]);
        }
-       fread(&olen, sizeof olen, 1, f);
+
+       if (fread(&olen, sizeof(olen), 1, f) != 1) {
+               perror(argv[1]);
+               return 1;
+       }
+
        ilen = ftell(f);
        olen = getle32(&olen);
        fclose(f);
index 518bb99c339480820fc3995b1456d29704d67f07..430312ba6e3f3b0b9cee93427feb69b18c9a5ab2 100644 (file)
@@ -25,6 +25,8 @@
 #define sysretl_audit ia32_ret_from_sys_call
 #endif
 
+       .section .entry.text, "ax"
+
 #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
 
        .macro IA32_ARG_FIXUP noebp=0
@@ -126,26 +128,20 @@ ENTRY(ia32_sysenter_target)
         */
        ENABLE_INTERRUPTS(CLBR_NONE)
        movl    %ebp,%ebp               /* zero extension */
-       pushq   $__USER32_DS
-       CFI_ADJUST_CFA_OFFSET 8
+       pushq_cfi $__USER32_DS
        /*CFI_REL_OFFSET ss,0*/
-       pushq   %rbp
-       CFI_ADJUST_CFA_OFFSET 8
+       pushq_cfi %rbp
        CFI_REL_OFFSET rsp,0
-       pushfq
-       CFI_ADJUST_CFA_OFFSET 8
+       pushfq_cfi
        /*CFI_REL_OFFSET rflags,0*/
        movl    8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
        CFI_REGISTER rip,r10
-       pushq   $__USER32_CS
-       CFI_ADJUST_CFA_OFFSET 8
+       pushq_cfi $__USER32_CS
        /*CFI_REL_OFFSET cs,0*/
        movl    %eax, %eax
-       pushq   %r10
-       CFI_ADJUST_CFA_OFFSET 8
+       pushq_cfi %r10
        CFI_REL_OFFSET rip,0
-       pushq   %rax
-       CFI_ADJUST_CFA_OFFSET 8
+       pushq_cfi %rax
        cld
        SAVE_ARGS 0,0,1
        /* no need to do an access_ok check here because rbp has been
@@ -182,11 +178,9 @@ sysexit_from_sys_call:
        xorq    %r9,%r9
        xorq    %r10,%r10
        xorq    %r11,%r11
-       popfq
-       CFI_ADJUST_CFA_OFFSET -8
+       popfq_cfi
        /*CFI_RESTORE rflags*/
-       popq    %rcx                            /* User %esp */
-       CFI_ADJUST_CFA_OFFSET -8
+       popq_cfi %rcx                           /* User %esp */
        CFI_REGISTER rsp,rcx
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS_SYSEXIT32
@@ -421,8 +415,7 @@ ENTRY(ia32_syscall)
         */
        ENABLE_INTERRUPTS(CLBR_NONE)
        movl %eax,%eax
-       pushq %rax
-       CFI_ADJUST_CFA_OFFSET 8
+       pushq_cfi %rax
        cld
        /* note the registers are not zero extended to the sf.
           this could be a problem. */
@@ -851,4 +844,7 @@ ia32_sys_call_table:
        .quad sys_fanotify_init
        .quad sys32_fanotify_mark
        .quad sys_prlimit64             /* 340 */
+       .quad sys_name_to_handle_at
+       .quad compat_sys_open_by_handle_at
+       .quad compat_sys_clock_adjtime
 ia32_syscall_end:
index 211ca3f7fd16f07a313fc4333b563888765519a4..b964ec45754691807f6adc25f23ea072e7b5070e 100644 (file)
@@ -88,6 +88,7 @@ extern int acpi_disabled;
 extern int acpi_pci_disabled;
 extern int acpi_skip_timer_override;
 extern int acpi_use_timer_override;
+extern int acpi_fix_pin2_polarity;
 
 extern u8 acpi_sci_flags;
 extern int acpi_sci_override_gsi;
@@ -185,15 +186,7 @@ struct bootnode;
 
 #ifdef CONFIG_ACPI_NUMA
 extern int acpi_numa;
-extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
-                               unsigned long end);
-extern int acpi_scan_nodes(unsigned long start, unsigned long end);
-#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-
-#ifdef CONFIG_NUMA_EMU
-extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
-                                  int num_nodes);
-#endif
+extern int x86_acpi_numa_init(void);
 #endif /* CONFIG_ACPI_NUMA */
 
 #define acpi_unlazy_tlb(x)     leave_mm(x)
index 64dc82ee19f00da98e796c81e767e677c56cf796..e264ae5a144347c30aadac9561cd38af1fa418ab 100644 (file)
@@ -9,23 +9,20 @@ struct amd_nb_bus_dev_range {
        u8 dev_limit;
 };
 
-extern struct pci_device_id amd_nb_misc_ids[];
+extern const struct pci_device_id amd_nb_misc_ids[];
 extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
 struct bootnode;
 
 extern int early_is_amd_nb(u32 value);
 extern int amd_cache_northbridges(void);
 extern void amd_flush_garts(void);
-extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
-extern int amd_scan_nodes(void);
-
-#ifdef CONFIG_NUMA_EMU
-extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
-extern void amd_get_nodes(struct bootnode *nodes);
-#endif
+extern int amd_numa_init(void);
+extern int amd_get_subcaches(int);
+extern int amd_set_subcaches(int, int);
 
 struct amd_northbridge {
        struct pci_dev *misc;
+       struct pci_dev *link;
 };
 
 struct amd_northbridge_info {
@@ -37,6 +34,7 @@ extern struct amd_northbridge_info amd_northbridges;
 
 #define AMD_NB_GART                    0x1
 #define AMD_NB_L3_INDEX_DISABLE                0x2
+#define AMD_NB_L3_PARTITIONING         0x4
 
 #ifdef CONFIG_AMD_NB
 
index 5b7d5137e167cc7fb8d2770ad93a5447abccb0fa..a279d98ea95e9880d93fee697b3efd30646f9b5b 100644 (file)
@@ -303,8 +303,6 @@ struct apic {
 
        void (*setup_apic_routing)(void);
        int (*multi_timer_check)(int apic, int irq);
-       int (*apicid_to_node)(int logical_apicid);
-       int (*cpu_to_logical_apicid)(int cpu);
        int (*cpu_present_to_apicid)(int mps_cpu);
        void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
        void (*setup_portio_remap)(void);
@@ -352,6 +350,23 @@ struct apic {
        void (*icr_write)(u32 low, u32 high);
        void (*wait_icr_idle)(void);
        u32 (*safe_wait_icr_idle)(void);
+
+#ifdef CONFIG_X86_32
+       /*
+        * Called very early during boot from get_smp_config().  It should
+        * return the logical apicid.  x86_[bios]_cpu_to_apicid is
+        * initialized before this function is called.
+        *
+        * If logical apicid can't be determined that early, the function
+        * may return BAD_APICID.  Logical apicid will be configured after
+        * init_apic_ldr() while bringing up CPUs.  Note that NUMA affinity
+        * won't be applied properly during early boot in this case.
+        */
+       int (*x86_32_early_logical_apicid)(int cpu);
+
+       /* determine CPU -> NUMA node mapping */
+       int (*x86_32_numa_cpu_node)(int cpu);
+#endif
 };
 
 /*
@@ -499,6 +514,11 @@ extern struct apic apic_noop;
 
 extern struct apic apic_default;
 
+static inline int noop_x86_32_early_logical_apicid(int cpu)
+{
+       return BAD_APICID;
+}
+
 /*
  * Set up the logical destination ID.
  *
@@ -518,7 +538,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
        return cpuid_apic >> index_msb;
 }
 
-extern int default_apicid_to_node(int logical_apicid);
+extern int default_x86_32_numa_cpu_node(int cpu);
 
 #endif
 
@@ -554,12 +574,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma
        *retmap = *phys_map;
 }
 
-/* Mapping from cpu number to logical apicid */
-static inline int default_cpu_to_logical_apicid(int cpu)
-{
-       return 1 << cpu;
-}
-
 static inline int __default_cpu_present_to_apicid(int mps_cpu)
 {
        if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
@@ -592,8 +606,4 @@ extern int default_check_phys_apicid_present(int phys_apicid);
 
 #endif /* CONFIG_X86_LOCAL_APIC */
 
-#ifdef CONFIG_X86_32
-extern u8 cpu_2_logical_apicid[NR_CPUS];
-#endif
-
 #endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h
new file mode 100644 (file)
index 0000000..e656ad8
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _ASM_CE4100_H_
+#define _ASM_CE4100_H_
+
+int ce4100_pci_init(void);
+
+#endif
index 220e2ea08e80b3b2f40b33771913c4d80e2d815e..91f3e087cf21817704c68f24379edc4cf31a6885 100644 (file)
 #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
 #define X86_FEATURE_TBM                (6*32+21) /* trailing bit manipulations */
 #define X86_FEATURE_TOPOEXT    (6*32+22) /* topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
 
 /*
  * Auxiliary flags: Linux defined - For features scattered in various
@@ -279,6 +280,7 @@ extern const char * const x86_power_flags[32];
 #define cpu_has_xsave          boot_cpu_has(X86_FEATURE_XSAVE)
 #define cpu_has_hypervisor     boot_cpu_has(X86_FEATURE_HYPERVISOR)
 #define cpu_has_pclmulqdq      boot_cpu_has(X86_FEATURE_PCLMULQDQ)
+#define cpu_has_perfctr_core   boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
 
 #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
 # define cpu_has_invlpg                1
index 57650ab4a5f593a99821d54a1f78b691fc2ab77a..1cd6d26a0a8da1594f45b9869f69fbfe57aacb56 100644 (file)
@@ -16,10 +16,13 @@ BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
 BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
 BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
 
-.irpc idx, "01234567"
+.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
+       16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+.if NUM_INVALIDATE_TLB_VECTORS > \idx
 BUILD_INTERRUPT3(invalidate_interrupt\idx,
                 (INVALIDATE_TLB_VECTOR_START)+\idx,
                 smp_invalidate_interrupt)
+.endif
 .endr
 #endif
 
index 06850a7194e159561e8c3d7d6c6e6f2f78485a9a..2c6fc9e6281252d669fc3efbbc1ebed704e9a6ba 100644 (file)
@@ -7,14 +7,12 @@
    frame pointer later */
 #ifdef CONFIG_FRAME_POINTER
        .macro FRAME
-       pushl %ebp
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebp
        CFI_REL_OFFSET ebp,0
        movl %esp,%ebp
        .endm
        .macro ENDFRAME
-       popl %ebp
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ebp
        CFI_RESTORE ebp
        .endm
 #else
index 1f11ce44e956dc41d3e33812821aa8249455a269..d09bb03653f028e86db69270b9cbb0959adba914 100644 (file)
@@ -37,7 +37,7 @@
                       "+m" (*uaddr), "=&r" (tem)               \
                     : "r" (oparg), "i" (-EFAULT), "1" (0))
 
-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
@@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        return ret;
 }
 
-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
-                                               int newval)
+static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                                               u32 oldval, u32 newval)
 {
+       int ret = 0;
 
 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
        /* Real i386 machines have no cmpxchg instruction */
@@ -119,21 +120,22 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
                return -ENOSYS;
 #endif
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
-       asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
+       asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
                     "2:\t.section .fixup, \"ax\"\n"
-                    "3:\tmov     %2, %0\n"
+                    "3:\tmov     %3, %0\n"
                     "\tjmp     2b\n"
                     "\t.previous\n"
                     _ASM_EXTABLE(1b, 3b)
-                    : "=a" (oldval), "+m" (*uaddr)
-                    : "i" (-EFAULT), "r" (newval), "0" (oldval)
+                    : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+                    : "i" (-EFAULT), "r" (newval), "1" (oldval)
                     : "memory"
        );
 
-       return oldval;
+       *uval = oldval;
+       return ret;
 }
 
 #endif
index 0274ec5a7e6285c18bbdd9908bc04d4b829fd9cc..bb9efe8706e2bf7b1c1343d153ab2fdf968e601a 100644 (file)
@@ -45,6 +45,30 @@ extern void invalidate_interrupt4(void);
 extern void invalidate_interrupt5(void);
 extern void invalidate_interrupt6(void);
 extern void invalidate_interrupt7(void);
+extern void invalidate_interrupt8(void);
+extern void invalidate_interrupt9(void);
+extern void invalidate_interrupt10(void);
+extern void invalidate_interrupt11(void);
+extern void invalidate_interrupt12(void);
+extern void invalidate_interrupt13(void);
+extern void invalidate_interrupt14(void);
+extern void invalidate_interrupt15(void);
+extern void invalidate_interrupt16(void);
+extern void invalidate_interrupt17(void);
+extern void invalidate_interrupt18(void);
+extern void invalidate_interrupt19(void);
+extern void invalidate_interrupt20(void);
+extern void invalidate_interrupt21(void);
+extern void invalidate_interrupt22(void);
+extern void invalidate_interrupt23(void);
+extern void invalidate_interrupt24(void);
+extern void invalidate_interrupt25(void);
+extern void invalidate_interrupt26(void);
+extern void invalidate_interrupt27(void);
+extern void invalidate_interrupt28(void);
+extern void invalidate_interrupt29(void);
+extern void invalidate_interrupt30(void);
+extern void invalidate_interrupt31(void);
 
 extern void irq_move_cleanup_interrupt(void);
 extern void reboot_interrupt(void);
index 36fb1a6a510908553465fbef97785464b83c0879..8dbe353e41e160a49da8b370f2f489e161aa8e11 100644 (file)
@@ -11,8 +11,8 @@ kernel_physical_mapping_init(unsigned long start,
                             unsigned long page_size_mask);
 
 
-extern unsigned long __initdata e820_table_start;
-extern unsigned long __meminitdata e820_table_end;
-extern unsigned long __meminitdata e820_table_top;
+extern unsigned long __initdata pgt_buf_start;
+extern unsigned long __meminitdata pgt_buf_end;
+extern unsigned long __meminitdata pgt_buf_top;
 
 #endif /* _ASM_X86_INIT_32_H */
index 0b7228268a63e1d9bfacc13aca04c664e8297b1d..615fa9061b57cde4f8cdb10ac574904eec6e1147 100644 (file)
@@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
                                                 int vector);
 extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
                                                         int vector);
-extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
-                                                        int vector);
-extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
-                                                        int vector);
 
 /* Avoid include hell */
 #define NMI_VECTOR 0x02
@@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector)
 }
 
 #ifdef CONFIG_X86_32
+extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+                                                        int vector);
+extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+                                                        int vector);
 extern void default_send_IPI_mask_logical(const struct cpumask *mask,
                                                 int vector);
 extern void default_send_IPI_allbutself(int vector);
index 6af0894dafb445cdaedceb1630249c47e955633f..6e976ee3b3ef7cf3aeb2522cecf153b6931c7d7d 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_IRQ_VECTORS_H
 #define _ASM_X86_IRQ_VECTORS_H
 
+#include <linux/threads.h>
 /*
  * Linux IRQ vector layout.
  *
@@ -16,8 +17,8 @@
  *  Vectors   0 ...  31 : system traps and exceptions - hardcoded events
  *  Vectors  32 ... 127 : device interrupts
  *  Vector  128         : legacy int80 syscall interface
- *  Vectors 129 ... 237 : device interrupts
- *  Vectors 238 ... 255 : special interrupts
+ *  Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts
+ *  Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
  *
  * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
  *
 #define THRESHOLD_APIC_VECTOR          0xf9
 #define REBOOT_VECTOR                  0xf8
 
-/* f0-f7 used for spreading out TLB flushes: */
-#define INVALIDATE_TLB_VECTOR_END      0xf7
-#define INVALIDATE_TLB_VECTOR_START    0xf0
-#define NUM_INVALIDATE_TLB_VECTORS        8
-
-/*
- * Local APIC timer IRQ vector is on a different priority level,
- * to work around the 'lost local interrupt if more than 2 IRQ
- * sources per level' errata.
- */
-#define LOCAL_TIMER_VECTOR             0xef
-
 /*
  * Generic system vector for platform specific use
  */
-#define X86_PLATFORM_IPI_VECTOR                0xed
+#define X86_PLATFORM_IPI_VECTOR                0xf7
 
 /*
  * IRQ work vector:
  */
-#define IRQ_WORK_VECTOR                        0xec
+#define IRQ_WORK_VECTOR                        0xf6
 
-#define UV_BAU_MESSAGE                 0xea
+#define UV_BAU_MESSAGE                 0xf5
 
 /*
  * Self IPI vector for machine checks
  */
-#define MCE_SELF_VECTOR                        0xeb
+#define MCE_SELF_VECTOR                        0xf4
 
 /* Xen vector callback to receive events in a HVM domain */
-#define XEN_HVM_EVTCHN_CALLBACK                0xe9
+#define XEN_HVM_EVTCHN_CALLBACK                0xf3
+
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR             0xef
+
+/* up to 32 vectors used for spreading out TLB flushes: */
+#if NR_CPUS <= 32
+# define NUM_INVALIDATE_TLB_VECTORS    (NR_CPUS)
+#else
+# define NUM_INVALIDATE_TLB_VECTORS    (32)
+#endif
+
+#define INVALIDATE_TLB_VECTOR_END      (0xee)
+#define INVALIDATE_TLB_VECTOR_START    \
+       (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1)
 
 #define NR_VECTORS                      256
 
index ca242d35e8733ee8c23a7a960d28c933b5d055f4..518bbbb9ee59135bd6e3213dc3550803c9df2b47 100644 (file)
@@ -13,7 +13,6 @@ enum die_val {
        DIE_PANIC,
        DIE_NMI,
        DIE_DIE,
-       DIE_NMIWATCHDOG,
        DIE_KERNELDEBUG,
        DIE_TRAP,
        DIE_GPF,
index 0c90dd9f05053c83591df6e04ec5d7979fee779f..9c7d95f6174bfa1b01b14e121017b6383a87c16f 100644 (file)
@@ -25,7 +25,6 @@ extern int pic_mode;
 #define MAX_IRQ_SOURCES                256
 
 extern unsigned int def_to_bigsmp;
-extern u8 apicid_2_node[];
 
 #ifdef CONFIG_X86_NUMAQ
 extern int mp_bus_id_to_node[MAX_MP_BUSSES];
@@ -33,8 +32,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
 extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
 #endif
 
-#define MAX_APICID             256
-
 #else /* CONFIG_X86_64: */
 
 #define MAX_MP_BUSSES          256
index 4d0dfa0d998e9f80ce244d86e1fd583513aaaaca..823d482234008790894f75fe384f0b677d2b701c 100644 (file)
 #define MSR_IA32_PERFCTR1              0x000000c2
 #define MSR_FSB_FREQ                   0x000000cd
 
+#define MSR_NHM_SNB_PKG_CST_CFG_CTL    0x000000e2
+#define NHM_C3_AUTO_DEMOTE             (1UL << 25)
+#define NHM_C1_AUTO_DEMOTE             (1UL << 26)
+#define ATM_LNC_C6_AUTO_DEMOTE         (1UL << 25)
+
 #define MSR_MTRRcap                    0x000000fe
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 
@@ -47,6 +52,9 @@
 #define MSR_IA32_MCG_STATUS            0x0000017a
 #define MSR_IA32_MCG_CTL               0x0000017b
 
+#define MSR_OFFCORE_RSP_0              0x000001a6
+#define MSR_OFFCORE_RSP_1              0x000001a7
+
 #define MSR_IA32_PEBS_ENABLE           0x000003f1
 #define MSR_IA32_DS_AREA               0x00000600
 #define MSR_IA32_PERF_CAPABILITIES     0x00000345
index c76f5b92b840f8042e43d4f614ec2170b0d7a933..07f46016d3ff30cb7dc86ba202cabaeab2ecc3e1 100644 (file)
@@ -7,7 +7,6 @@
 
 #ifdef CONFIG_X86_LOCAL_APIC
 
-extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
 extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
 extern int reserve_perfctr_nmi(unsigned int);
 extern void release_perfctr_nmi(unsigned int);
index 27da400d3138cd63906b6df5e3c74d5e90048c81..3d4dab43c99469b6d917f0ba4f4bebd7cdc25bb6 100644 (file)
@@ -1,5 +1,57 @@
+#ifndef _ASM_X86_NUMA_H
+#define _ASM_X86_NUMA_H
+
+#include <asm/topology.h>
+#include <asm/apicdef.h>
+
+#ifdef CONFIG_NUMA
+
+#define NR_NODE_MEMBLKS                (MAX_NUMNODES*2)
+
+/*
+ * __apicid_to_node[] stores the raw mapping between physical apicid and
+ * node and is used to initialize cpu_to_node mapping.
+ *
+ * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus
+ * should be accessed by the accessors - set_apicid_to_node() and
+ * numa_cpu_node().
+ */
+extern s16 __apicid_to_node[MAX_LOCAL_APIC];
+
+static inline void set_apicid_to_node(int apicid, s16 node)
+{
+       __apicid_to_node[apicid] = node;
+}
+#else  /* CONFIG_NUMA */
+static inline void set_apicid_to_node(int apicid, s16 node)
+{
+}
+#endif /* CONFIG_NUMA */
+
 #ifdef CONFIG_X86_32
 # include "numa_32.h"
 #else
 # include "numa_64.h"
 #endif
+
+#ifdef CONFIG_NUMA
+extern void __cpuinit numa_set_node(int cpu, int node);
+extern void __cpuinit numa_clear_node(int cpu);
+extern void __init numa_init_array(void);
+extern void __init init_cpu_to_node(void);
+extern void __cpuinit numa_add_cpu(int cpu);
+extern void __cpuinit numa_remove_cpu(int cpu);
+#else  /* CONFIG_NUMA */
+static inline void numa_set_node(int cpu, int node)    { }
+static inline void numa_clear_node(int cpu)            { }
+static inline void numa_init_array(void)               { }
+static inline void init_cpu_to_node(void)              { }
+static inline void numa_add_cpu(int cpu)               { }
+static inline void numa_remove_cpu(int cpu)            { }
+#endif /* CONFIG_NUMA */
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
+#endif
+
+#endif /* _ASM_X86_NUMA_H */
index b0ef2b449a9d6cb360c65104c6a207f4bdad505a..c6beed1ef10373f9be333a01fa8648cb499a1836 100644 (file)
@@ -4,7 +4,12 @@
 extern int numa_off;
 
 extern int pxm_to_nid(int pxm);
-extern void numa_remove_cpu(int cpu);
+
+#ifdef CONFIG_NUMA
+extern int __cpuinit numa_cpu_node(int cpu);
+#else  /* CONFIG_NUMA */
+static inline int numa_cpu_node(int cpu)               { return NUMA_NO_NODE; }
+#endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_HIGHMEM
 extern void set_highmem_pages_init(void);
index 0493be39607cfac8dd2dad1715aa3d06832a1fa5..344eb1790b4646901f842786a8df9fb21f17bd63 100644 (file)
@@ -2,23 +2,16 @@
 #define _ASM_X86_NUMA_64_H
 
 #include <linux/nodemask.h>
-#include <asm/apicdef.h>
 
 struct bootnode {
        u64 start;
        u64 end;
 };
 
-extern int compute_hash_shift(struct bootnode *nodes, int numblks,
-                             int *nodeids);
-
 #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
 
-extern void numa_init_array(void);
 extern int numa_off;
 
-extern s16 apicid_to_node[MAX_LOCAL_APIC];
-
 extern unsigned long numa_free_all_bootmem(void);
 extern void setup_node_bootmem(int nodeid, unsigned long start,
                               unsigned long end);
@@ -31,11 +24,11 @@ extern void setup_node_bootmem(int nodeid, unsigned long start,
  */
 #define NODE_MIN_SIZE (4*1024*1024)
 
-extern void __init init_cpu_to_node(void);
-extern void __cpuinit numa_set_node(int cpu, int node);
-extern void __cpuinit numa_clear_node(int cpu);
-extern void __cpuinit numa_add_cpu(int cpu);
-extern void __cpuinit numa_remove_cpu(int cpu);
+extern nodemask_t numa_nodes_parsed __initdata;
+
+extern int __cpuinit numa_cpu_node(int cpu);
+extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+extern void __init numa_set_distance(int from, int to, int distance);
 
 #ifdef CONFIG_NUMA_EMU
 #define FAKE_NODE_MIN_SIZE     ((u64)32 << 20)
@@ -43,11 +36,7 @@ extern void __cpuinit numa_remove_cpu(int cpu);
 void numa_emu_cmdline(char *);
 #endif /* CONFIG_NUMA_EMU */
 #else
-static inline void init_cpu_to_node(void)              { }
-static inline void numa_set_node(int cpu, int node)    { }
-static inline void numa_clear_node(int cpu)            { }
-static inline void numa_add_cpu(int cpu, int node)     { }
-static inline void numa_remove_cpu(int cpu)            { }
+static inline int numa_cpu_node(int cpu)               { return NUMA_NO_NODE; }
 #endif
 
 #endif /* _ASM_X86_NUMA_64_H */
index 1df66211fd1b53d4d6233cc1e481e88009622aab..bce688d54c12383e29b360bb7688b3fc9d385830 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_X86_PAGE_DEFS_H
 
 #include <linux/const.h>
+#include <linux/types.h>
 
 /* PAGE_SHIFT determines the page size */
 #define PAGE_SHIFT     12
@@ -45,11 +46,15 @@ extern int devmem_is_allowed(unsigned long pagenr);
 extern unsigned long max_low_pfn_mapped;
 extern unsigned long max_pfn_mapped;
 
+static inline phys_addr_t get_max_mapped(void)
+{
+       return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
+}
+
 extern unsigned long init_memory_mapping(unsigned long start,
                                         unsigned long end);
 
-extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn,
-                               int acpi, int k8);
+extern void initmem_init(void);
 extern void free_initmem(void);
 
 #endif /* !__ASSEMBLY__ */
index e2f6a99f14ab3eb47b48da80a21ebc676e4eaa82..cc29086e30cd1b4b4d8ba17d696f24286d420db0 100644 (file)
@@ -22,6 +22,7 @@
 
 #define ARCH_P4_CNTRVAL_BITS   (40)
 #define ARCH_P4_CNTRVAL_MASK   ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
+#define ARCH_P4_UNFLAGGED_BIT  ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
 
 #define P4_ESCR_EVENT_MASK     0x7e000000U
 #define P4_ESCR_EVENT_SHIFT    25
index 45636cefa186b427f558d98b7257a0f00dd405ec..4c25ab48257bf8043639d87cc557f04ab2ad263a 100644 (file)
@@ -94,10 +94,6 @@ struct cpuinfo_x86 {
        int                     x86_cache_alignment;    /* In bytes */
        int                     x86_power;
        unsigned long           loops_per_jiffy;
-#ifdef CONFIG_SMP
-       /* cpus sharing the last level cache: */
-       cpumask_var_t           llc_shared_map;
-#endif
        /* cpuid returned max cores value: */
        u16                      x86_max_cores;
        u16                     apicid;
index d1e41b0f9b60b37eb061caec5661fc6fb9e1cbbf..df4cd32b4cc6eba5e623618577466206bd6b8de2 100644 (file)
 #endif
 
 #ifdef __KERNEL__
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/lockdep.h>
 #include <asm/asm.h>
 
-struct rwsem_waiter;
-
-extern asmregparm struct rw_semaphore *
- rwsem_down_read_failed(struct rw_semaphore *sem);
-extern asmregparm struct rw_semaphore *
- rwsem_down_write_failed(struct rw_semaphore *sem);
-extern asmregparm struct rw_semaphore *
- rwsem_wake(struct rw_semaphore *);
-extern asmregparm struct rw_semaphore *
- rwsem_downgrade_wake(struct rw_semaphore *sem);
-
 /*
- * the semaphore definition
- *
  * The bias values and the counter type limits the number of
  * potential readers/writers to 32767 for 32 bits and 2147483647
  * for 64 bits.
@@ -74,43 +57,6 @@ extern asmregparm struct rw_semaphore *
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
-typedef signed long rwsem_count_t;
-
-struct rw_semaphore {
-       rwsem_count_t           count;
-       spinlock_t              wait_lock;
-       struct list_head        wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       struct lockdep_map dep_map;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-
-#define __RWSEM_INITIALIZER(name)                              \
-{                                                              \
-       RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
-       LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
-}
-
-#define DECLARE_RWSEM(name)                                    \
-       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
-                        struct lock_class_key *key);
-
-#define init_rwsem(sem)                                                \
-do {                                                           \
-       static struct lock_class_key __key;                     \
-                                                               \
-       __init_rwsem((sem), #sem, &__key);                      \
-} while (0)
-
 /*
  * lock for reading
  */
@@ -133,7 +79,7 @@ static inline void __down_read(struct rw_semaphore *sem)
  */
 static inline int __down_read_trylock(struct rw_semaphore *sem)
 {
-       rwsem_count_t result, tmp;
+       long result, tmp;
        asm volatile("# beginning __down_read_trylock\n\t"
                     "  mov          %0,%1\n\t"
                     "1:\n\t"
@@ -155,7 +101,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
  */
 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 {
-       rwsem_count_t tmp;
+       long tmp;
        asm volatile("# beginning down_write\n\t"
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
                     /* adds 0xffff0001, returns the old value */
@@ -180,9 +126,8 @@ static inline void __down_write(struct rw_semaphore *sem)
  */
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-       rwsem_count_t ret = cmpxchg(&sem->count,
-                                   RWSEM_UNLOCKED_VALUE,
-                                   RWSEM_ACTIVE_WRITE_BIAS);
+       long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+                          RWSEM_ACTIVE_WRITE_BIAS);
        if (ret == RWSEM_UNLOCKED_VALUE)
                return 1;
        return 0;
@@ -193,7 +138,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
  */
 static inline void __up_read(struct rw_semaphore *sem)
 {
-       rwsem_count_t tmp;
+       long tmp;
        asm volatile("# beginning __up_read\n\t"
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
                     /* subtracts 1, returns the old value */
@@ -211,7 +156,7 @@ static inline void __up_read(struct rw_semaphore *sem)
  */
 static inline void __up_write(struct rw_semaphore *sem)
 {
-       rwsem_count_t tmp;
+       long tmp;
        asm volatile("# beginning __up_write\n\t"
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
                     /* subtracts 0xffff0001, returns the old value */
@@ -247,8 +192,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 /*
  * implement atomic add functionality
  */
-static inline void rwsem_atomic_add(rwsem_count_t delta,
-                                   struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
 {
        asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
                     : "+m" (sem->count)
@@ -258,10 +202,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta,
 /*
  * implement exchange and add functionality
  */
-static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
-                                               struct rw_semaphore *sem)
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 {
-       rwsem_count_t tmp = delta;
+       long tmp = delta;
 
        asm volatile(LOCK_PREFIX "xadd %0,%1"
                     : "+r" (tmp), "+m" (sem->count)
@@ -270,10 +213,5 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
        return tmp + delta;
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-       return (sem->count != 0);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_X86_RWSEM_H */
index 1f46951367763471fce5819fcf54045ced1fe8e9..73b11bc0ae6faeb8102ae1303ceaa8335d89e340 100644 (file)
 #endif
 #include <asm/thread_info.h>
 #include <asm/cpumask.h>
+#include <asm/cpufeature.h>
 
 extern int smp_num_siblings;
 extern unsigned int num_processors;
 
+static inline bool cpu_has_ht_siblings(void)
+{
+       bool has_siblings = false;
+#ifdef CONFIG_SMP
+       has_siblings = cpu_has_ht && smp_num_siblings > 1;
+#endif
+       return has_siblings;
+}
+
 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+/* cpus sharing the last level cache: */
+DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
 DECLARE_PER_CPU(u16, cpu_llc_id);
 DECLARE_PER_CPU(int, cpu_number);
 
@@ -36,8 +48,16 @@ static inline struct cpumask *cpu_core_mask(int cpu)
        return per_cpu(cpu_core_map, cpu);
 }
 
+static inline struct cpumask *cpu_llc_shared_mask(int cpu)
+{
+       return per_cpu(cpu_llc_shared_map, cpu);
+}
+
 DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
+DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
+#endif
 
 /* Static state in head.S used to set up a CPU */
 extern unsigned long stack_start; /* Initial stack pointer address */
index 6c22bf353f26495b1fa71dc5a92cdaa05e5b1d8e..725b7783199328c936bb10350d55b797b661315e 100644 (file)
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
         */
        CMOS_WRITE(0, 0xf);
 
-       *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+       *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
 }
 
 static inline void __init smpboot_setup_io_apic(void)
index 33ecc3ea8782add8f88e68405d227ee4af700134..12569e691ce3aa139b745682fce86349918cee1a 100644 (file)
@@ -98,8 +98,6 @@ do {                                                                  \
  */
 #define HAVE_DISABLE_HLT
 #else
-#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
-#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
 
 /* frame pointer must be last for get_wchan */
 #define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
index 21899cc31e52110242dc6f041c08048701ecee32..910a7084f7f22dda7739d2b3d122d1c5be39ae33 100644 (file)
 
 #include <asm/mpspec.h>
 
-#ifdef CONFIG_X86_32
-
-/* Mappings between logical cpu number and node number */
-extern int cpu_to_node_map[];
-
-/* Returns the number of the node containing CPU 'cpu' */
-static inline int __cpu_to_node(int cpu)
-{
-       return cpu_to_node_map[cpu];
-}
-#define early_cpu_to_node __cpu_to_node
-#define cpu_to_node __cpu_to_node
-
-#else /* CONFIG_X86_64 */
-
 /* Mappings between logical cpu number and node number */
 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
 
@@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu)
 
 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
 
-#endif /* CONFIG_X86_64 */
-
 /* Mappings between node number and cpus on that node. */
 extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
 
@@ -155,7 +138,7 @@ extern unsigned long node_remap_size[];
        .balance_interval       = 1,                                    \
 }
 
-#ifdef CONFIG_X86_64_ACPI_NUMA
+#ifdef CONFIG_X86_64
 extern int __node_distance(int, int);
 #define node_distance(a, b) __node_distance(a, b)
 #endif
index b766a5e8ba0e7802501b049034394a343216cf4a..ffaf183c619a21f30c24384f8aa9e106e496dfa2 100644 (file)
 #define __NR_fanotify_init     338
 #define __NR_fanotify_mark     339
 #define __NR_prlimit64         340
+#define __NR_name_to_handle_at 341
+#define __NR_open_by_handle_at  342
+#define __NR_clock_adjtime     343
 
 #ifdef __KERNEL__
 
-#define NR_syscalls 341
+#define NR_syscalls 344
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 363e9b8a715b6d5144ebc8135d65dc75ce9e9e5c..5466bea670e7e3c14774c998fb0f90d089cb3d8a 100644 (file)
@@ -669,6 +669,12 @@ __SYSCALL(__NR_fanotify_init, sys_fanotify_init)
 __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
 #define __NR_prlimit64                         302
 __SYSCALL(__NR_prlimit64, sys_prlimit64)
+#define __NR_name_to_handle_at                 303
+__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
+#define __NR_open_by_handle_at                 304
+__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+#define __NR_clock_adjtime                     305
+__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
 
 #ifndef __NO_STUBS
 #define __ARCH_WANT_OLD_READDIR
index ce1d54c8a433a6b866977beeb9c6681e0db64746..3e094af443c396a6ccf9f08628ecafc2a5d974ab 100644 (file)
@@ -176,7 +176,7 @@ struct bau_msg_payload {
 struct bau_msg_header {
        unsigned int dest_subnodeid:6;  /* must be 0x10, for the LB */
        /* bits 5:0 */
-       unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */
+       unsigned int base_dest_nodeid:15; /* nasid of the */
        /* bits 20:6 */                   /* first bit in uvhub map */
        unsigned int command:8; /* message type */
        /* bits 28:21 */
index a3c28ae4025b2c9a9422fa492faac821b7f221d3..8508bfe52296b18b5b3164acff6e2cf449a31792 100644 (file)
@@ -287,7 +287,7 @@ HYPERVISOR_fpu_taskswitch(int set)
 static inline int
 HYPERVISOR_sched_op(int cmd, void *arg)
 {
-       return _hypercall2(int, sched_op_new, cmd, arg);
+       return _hypercall2(int, sched_op, cmd, arg);
 }
 
 static inline long
@@ -422,10 +422,17 @@ HYPERVISOR_set_segment_base(int reg, unsigned long value)
 #endif
 
 static inline int
-HYPERVISOR_suspend(unsigned long srec)
+HYPERVISOR_suspend(unsigned long start_info_mfn)
 {
-       return _hypercall3(int, sched_op, SCHEDOP_shutdown,
-                          SHUTDOWN_suspend, srec);
+       struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
+
+       /*
+        * For a PV guest the tools require that the start_info mfn be
+        * present in rdx/edx when the hypercall is made. Per the
+        * hypercall calling convention this is the third hypercall
+        * argument, which is start_info_mfn here.
+        */
+       return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
 }
 
 static inline int
index f25bdf238a3383c9ebceb88eb34966f7096fc0e4..c61934fbf22a4f0d1b8de6c4ff5b514f40e3f0e3 100644 (file)
@@ -29,8 +29,10 @@ typedef struct xpaddr {
 
 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
 #define INVALID_P2M_ENTRY      (~0UL)
-#define FOREIGN_FRAME_BIT      (1UL<<31)
+#define FOREIGN_FRAME_BIT      (1UL<<(BITS_PER_LONG-1))
+#define IDENTITY_FRAME_BIT     (1UL<<(BITS_PER_LONG-2))
 #define FOREIGN_FRAME(m)       ((m) | FOREIGN_FRAME_BIT)
+#define IDENTITY_FRAME(m)      ((m) | IDENTITY_FRAME_BIT)
 
 /* Maximum amount of memory we can handle in a domain in pages */
 #define MAX_DOMAIN_PAGES                                               \
@@ -41,12 +43,18 @@ extern unsigned int   machine_to_phys_order;
 
 extern unsigned long get_phys_to_machine(unsigned long pfn);
 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern unsigned long set_phys_range_identity(unsigned long pfn_s,
+                                            unsigned long pfn_e);
 
 extern int m2p_add_override(unsigned long mfn, struct page *page);
 extern int m2p_remove_override(struct page *page);
 extern struct page *m2p_find_override(unsigned long mfn);
 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
 
+#ifdef CONFIG_XEN_DEBUG_FS
+extern int p2m_dump_show(struct seq_file *m, void *v);
+#endif
 static inline unsigned long pfn_to_mfn(unsigned long pfn)
 {
        unsigned long mfn;
@@ -57,7 +65,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
        mfn = get_phys_to_machine(pfn);
 
        if (mfn != INVALID_P2M_ENTRY)
-               mfn &= ~FOREIGN_FRAME_BIT;
+               mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
 
        return mfn;
 }
@@ -73,25 +81,44 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
 static inline unsigned long mfn_to_pfn(unsigned long mfn)
 {
        unsigned long pfn;
+       int ret = 0;
 
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return mfn;
 
+       if (unlikely((mfn >> machine_to_phys_order) != 0)) {
+               pfn = ~0;
+               goto try_override;
+       }
        pfn = 0;
        /*
         * The array access can fail (e.g., device space beyond end of RAM).
         * In such cases it doesn't matter what we return (we return garbage),
         * but we must handle the fault without crashing!
         */
-       __get_user(pfn, &machine_to_phys_mapping[mfn]);
-
-       /*
-        * If this appears to be a foreign mfn (because the pfn
-        * doesn't map back to the mfn), then check the local override
-        * table to see if there's a better pfn to use.
+       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+try_override:
+       /* ret might be < 0 if there are no entries in the m2p for mfn */
+       if (ret < 0)
+               pfn = ~0;
+       else if (get_phys_to_machine(pfn) != mfn)
+               /*
+                * If this appears to be a foreign mfn (because the pfn
+                * doesn't map back to the mfn), then check the local override
+                * table to see if there's a better pfn to use.
+                *
+                * m2p_find_override_pfn returns ~0 if it doesn't find anything.
+                */
+               pfn = m2p_find_override_pfn(mfn, ~0);
+
+       /* 
+        * pfn is ~0 if there are no entries in the m2p for mfn or if the
+        * entry doesn't map back to the mfn and m2p_override doesn't have a
+        * valid entry for it.
         */
-       if (get_phys_to_machine(pfn) != mfn)
-               pfn = m2p_find_override_pfn(mfn, pfn);
+       if (pfn == ~0 &&
+                       get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
+               pfn = mfn;
 
        return pfn;
 }
index 2329b3eaf8d36e71c72de4cb5221c7442dc41473..aa86209891622f1a871970cc758efc8ea28f935a 100644 (file)
@@ -27,16 +27,16 @@ static inline void __init xen_setup_pirqs(void)
  * its own functions.
  */
 struct xen_pci_frontend_ops {
-       int (*enable_msi)(struct pci_dev *dev, int **vectors);
+       int (*enable_msi)(struct pci_dev *dev, int vectors[]);
        void (*disable_msi)(struct pci_dev *dev);
-       int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec);
+       int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec);
        void (*disable_msix)(struct pci_dev *dev);
 };
 
 extern struct xen_pci_frontend_ops *xen_pci_frontend;
 
 static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
-                                             int **vectors)
+                                             int vectors[])
 {
        if (xen_pci_frontend && xen_pci_frontend->enable_msi)
                return xen_pci_frontend->enable_msi(dev, vectors);
@@ -48,7 +48,7 @@ static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
                        xen_pci_frontend->disable_msi(dev);
 }
 static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
-                                              int **vectors, int nvec)
+                                              int vectors[], int nvec)
 {
        if (xen_pci_frontend && xen_pci_frontend->enable_msix)
                return xen_pci_frontend->enable_msix(dev, vectors, nvec);
index b3a71137983a53b29a9571697ea0b270dec7c186..9a966c579af521c5206a0bb40131b1a20489c879 100644 (file)
@@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
 int acpi_sci_override_gsi __initdata;
 int acpi_skip_timer_override __initdata;
 int acpi_use_timer_override __initdata;
+int acpi_fix_pin2_polarity __initdata;
 
 #ifdef CONFIG_X86_LOCAL_APIC
 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
                return 0;
        }
 
-       if (acpi_skip_timer_override &&
-           intsrc->source_irq == 0 && intsrc->global_irq == 2) {
-               printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
-               return 0;
+       if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+               if (acpi_skip_timer_override) {
+                       printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+                       return 0;
+               }
+               if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+                       intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
+                       printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+               }
        }
 
        mp_override_legacy_irq(intsrc->source_irq,
@@ -589,14 +595,8 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
        nid = acpi_get_node(handle);
        if (nid == -1 || !node_online(nid))
                return;
-#ifdef CONFIG_X86_64
-       apicid_to_node[physid] = nid;
+       set_apicid_to_node(physid, nid);
        numa_set_node(cpu, nid);
-#else /* CONFIG_X86_32 */
-       apicid_2_node[physid] = nid;
-       cpu_to_node_map[cpu] = nid;
-#endif
-
 #endif
 }
 
index 0a99f7198bc373a8a2b45da3518234823786a590..ed3c2e5b714a8409deec1c67dd9b1e17acc952cc 100644 (file)
@@ -12,7 +12,7 @@
 
 static u32 *flush_words;
 
-struct pci_device_id amd_nb_misc_ids[] = {
+const struct pci_device_id amd_nb_misc_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
@@ -20,6 +20,11 @@ struct pci_device_id amd_nb_misc_ids[] = {
 };
 EXPORT_SYMBOL(amd_nb_misc_ids);
 
+static struct pci_device_id amd_nb_link_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
+       {}
+};
+
 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
        { 0x00, 0x18, 0x20 },
        { 0xff, 0x00, 0x20 },
@@ -31,7 +36,7 @@ struct amd_northbridge_info amd_northbridges;
 EXPORT_SYMBOL(amd_northbridges);
 
 static struct pci_dev *next_northbridge(struct pci_dev *dev,
-                                       struct pci_device_id *ids)
+                                       const struct pci_device_id *ids)
 {
        do {
                dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
@@ -45,7 +50,7 @@ int amd_cache_northbridges(void)
 {
        int i = 0;
        struct amd_northbridge *nb;
-       struct pci_dev *misc;
+       struct pci_dev *misc, *link;
 
        if (amd_nb_num())
                return 0;
@@ -64,10 +69,12 @@ int amd_cache_northbridges(void)
        amd_northbridges.nb = nb;
        amd_northbridges.num = i;
 
-       misc = NULL;
+       link = misc = NULL;
        for (i = 0; i != amd_nb_num(); i++) {
                node_to_amd_nb(i)->misc = misc =
                        next_northbridge(misc, amd_nb_misc_ids);
+               node_to_amd_nb(i)->link = link =
+                       next_northbridge(link, amd_nb_link_ids);
         }
 
        /* some CPU families (e.g. family 0x11) do not support GART */
@@ -85,6 +92,13 @@ int amd_cache_northbridges(void)
             boot_cpu_data.x86_mask >= 0x1))
                amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 
+       if (boot_cpu_data.x86 == 0x15)
+               amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
+
+       /* L3 cache partitioning is supported on family 0x15 */
+       if (boot_cpu_data.x86 == 0x15)
+               amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
@@ -93,8 +107,9 @@ EXPORT_SYMBOL_GPL(amd_cache_northbridges);
    they're useless anyways */
 int __init early_is_amd_nb(u32 device)
 {
-       struct pci_device_id *id;
+       const struct pci_device_id *id;
        u32 vendor = device & 0xffff;
+
        device >>= 16;
        for (id = amd_nb_misc_ids; id->vendor; id++)
                if (vendor == id->vendor && device == id->device)
@@ -102,6 +117,65 @@ int __init early_is_amd_nb(u32 device)
        return 0;
 }
 
+int amd_get_subcaches(int cpu)
+{
+       struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
+       unsigned int mask;
+       int cuid = 0;
+
+       if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+               return 0;
+
+       pci_read_config_dword(link, 0x1d4, &mask);
+
+#ifdef CONFIG_SMP
+       cuid = cpu_data(cpu).compute_unit_id;
+#endif
+       return (mask >> (4 * cuid)) & 0xf;
+}
+
+int amd_set_subcaches(int cpu, int mask)
+{
+       static unsigned int reset, ban;
+       struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
+       unsigned int reg;
+       int cuid = 0;
+
+       if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
+               return -EINVAL;
+
+       /* if necessary, collect reset state of L3 partitioning and BAN mode */
+       if (reset == 0) {
+               pci_read_config_dword(nb->link, 0x1d4, &reset);
+               pci_read_config_dword(nb->misc, 0x1b8, &ban);
+               ban &= 0x180000;
+       }
+
+       /* deactivate BAN mode if any subcaches are to be disabled */
+       if (mask != 0xf) {
+               pci_read_config_dword(nb->misc, 0x1b8, &reg);
+               pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
+       }
+
+#ifdef CONFIG_SMP
+       cuid = cpu_data(cpu).compute_unit_id;
+#endif
+       mask <<= 4 * cuid;
+       mask |= (0xf ^ (1 << cuid)) << 26;
+
+       pci_write_config_dword(nb->link, 0x1d4, mask);
+
+       /* reset BAN mode if L3 partitioning returned to reset state */
+       pci_read_config_dword(nb->link, 0x1d4, &reg);
+       if (reg == reset) {
+               pci_read_config_dword(nb->misc, 0x1b8, &reg);
+               reg &= ~0x180000;
+               pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
+       }
+
+       return 0;
+}
+
 int amd_cache_gart(void)
 {
        int i;
index 671d5aad7a0c9999f662d7c404e55b1aac99d017..1293c709ee856599e02dd1570bbae71f392d4d9a 100644 (file)
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void)
        memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
 
        if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
-               apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
+               adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
                global_clock_event = &adev->evt;
                printk(KERN_DEBUG "%s clockevent registered as global\n",
                       global_clock_event->name);
index 5955a7800a96637d2336b1a568f2f9882ad8689b..7b1e8e10b89c926dbd4d0ce988357cb685a916c5 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/mmzone.h>
 #include <linux/pci_ids.h>
 #include <linux/pci.h>
@@ -57,7 +57,7 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
 static u32 __init allocate_aperture(void)
 {
        u32 aper_size;
-       void *p;
+       unsigned long addr;
 
        /* aper_size should <= 1G */
        if (fallback_aper_order > 5)
@@ -83,27 +83,26 @@ static u32 __init allocate_aperture(void)
         * so don't use 512M below as gart iommu, leave the space for kernel
         * code for safe
         */
-       p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20);
+       addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20);
+       if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) {
+               printk(KERN_ERR
+                       "Cannot allocate aperture memory hole (%lx,%uK)\n",
+                               addr, aper_size>>10);
+               return 0;
+       }
+       memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
        /*
         * Kmemleak should not scan this block as it may not be mapped via the
         * kernel direct mapping.
         */
-       kmemleak_ignore(p);
-       if (!p || __pa(p)+aper_size > 0xffffffff) {
-               printk(KERN_ERR
-                       "Cannot allocate aperture memory hole (%p,%uK)\n",
-                               p, aper_size>>10);
-               if (p)
-                       free_bootmem(__pa(p), aper_size);
-               return 0;
-       }
+       kmemleak_ignore(phys_to_virt(addr));
        printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
-                       aper_size >> 10, __pa(p));
-       insert_aperture_resource((u32)__pa(p), aper_size);
-       register_nosave_region((u32)__pa(p) >> PAGE_SHIFT,
-                               (u32)__pa(p+aper_size) >> PAGE_SHIFT);
+                       aper_size >> 10, addr);
+       insert_aperture_resource((u32)addr, aper_size);
+       register_nosave_region(addr >> PAGE_SHIFT,
+                              (addr+aper_size) >> PAGE_SHIFT);
 
-       return (u32)__pa(p);
+       return (u32)addr;
 }
 
 
index ffbf7c21bbc66d897a0da3d4b0bfb09de49eb270..966673f44141ef1db7d5f8e1ab198fc28b53f6df 100644 (file)
@@ -79,6 +79,15 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
 
 #ifdef CONFIG_X86_32
+
+/*
+ * On x86_32, the mapping between cpu and logical apicid may vary
+ * depending on apic in use.  The following early percpu variable is
+ * used for the mapping.  This is where the behaviors of x86_64 and 32
+ * actually diverge.  Let's keep it ugly for now.
+ */
+DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID);
+
 /*
  * Knob to control our willingness to enable the local APIC.
  *
@@ -1217,6 +1226,19 @@ void __cpuinit setup_local_APIC(void)
         */
        apic->init_apic_ldr();
 
+#ifdef CONFIG_X86_32
+       /*
+        * APIC LDR is initialized.  If logical_apicid mapping was
+        * initialized during get_smp_config(), make sure it matches the
+        * actual value.
+        */
+       i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
+       WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
+       /* always use the value from LDR */
+       early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
+               logical_smp_processor_id();
+#endif
+
        /*
         * Set Task Priority to 'accept all'. We never change this
         * later on.
@@ -1910,17 +1932,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
 {
        int cpu;
 
-       /*
-        * Validate version
-        */
-       if (version == 0x0) {
-               pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
-                          "fixing up to 0x10. (tell your hw vendor)\n",
-                               version);
-               version = 0x10;
-       }
-       apic_version[apicid] = version;
-
        if (num_processors >= nr_cpu_ids) {
                int max = nr_cpu_ids;
                int thiscpu = max + disabled_cpus;
@@ -1934,22 +1945,34 @@ void __cpuinit generic_processor_info(int apicid, int version)
        }
 
        num_processors++;
-       cpu = cpumask_next_zero(-1, cpu_present_mask);
-
-       if (version != apic_version[boot_cpu_physical_apicid])
-               WARN_ONCE(1,
-                       "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
-                       apic_version[boot_cpu_physical_apicid], cpu, version);
-
-       physid_set(apicid, phys_cpu_present_map);
        if (apicid == boot_cpu_physical_apicid) {
                /*
                 * x86_bios_cpu_apicid is required to have processors listed
                 * in same order as logical cpu numbers. Hence the first
                 * entry is BSP, and so on.
+                * boot_cpu_init() already hold bit 0 in cpu_present_mask
+                * for BSP.
                 */
                cpu = 0;
+       } else
+               cpu = cpumask_next_zero(-1, cpu_present_mask);
+
+       /*
+        * Validate version
+        */
+       if (version == 0x0) {
+               pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
+                          cpu, apicid);
+               version = 0x10;
        }
+       apic_version[apicid] = version;
+
+       if (version != apic_version[boot_cpu_physical_apicid]) {
+               pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
+                       apic_version[boot_cpu_physical_apicid], cpu, version);
+       }
+
+       physid_set(apicid, phys_cpu_present_map);
        if (apicid > max_physical_apicid)
                max_physical_apicid = apicid;
 
@@ -1957,7 +1980,10 @@ void __cpuinit generic_processor_info(int apicid, int version)
        early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
        early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
 #endif
-
+#ifdef CONFIG_X86_32
+       early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
+               apic->x86_32_early_logical_apicid(cpu);
+#endif
        set_cpu_possible(cpu, true);
        set_cpu_present(cpu, true);
 }
@@ -1978,10 +2004,14 @@ void default_init_apic_ldr(void)
 }
 
 #ifdef CONFIG_X86_32
-int default_apicid_to_node(int logical_apicid)
+int default_x86_32_numa_cpu_node(int cpu)
 {
-#ifdef CONFIG_SMP
-       return apicid_2_node[hard_smp_processor_id()];
+#ifdef CONFIG_NUMA
+       int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
+
+       if (apicid != BAD_APICID)
+               return __apicid_to_node[apicid];
+       return NUMA_NO_NODE;
 #else
        return 0;
 #endif
index 09d3b17ce0c2479ad0ba388f1b2840d6beb8972e..5652d31fe108a74206c71af583b79c11d4891777 100644 (file)
@@ -185,8 +185,6 @@ struct apic apic_flat =  {
        .ioapic_phys_id_map             = NULL,
        .setup_apic_routing             = NULL,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = NULL,
-       .cpu_to_logical_apicid          = NULL,
        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
        .apicid_to_cpu_present          = NULL,
        .setup_portio_remap             = NULL,
@@ -337,8 +335,6 @@ struct apic apic_physflat =  {
        .ioapic_phys_id_map             = NULL,
        .setup_apic_routing             = NULL,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = NULL,
-       .cpu_to_logical_apicid          = NULL,
        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
        .apicid_to_cpu_present          = NULL,
        .setup_portio_remap             = NULL,
index e31b9ffe25f5e1f84c62593c8441f765f80c0158..f1baa2dc087ac1b6b880c60c896fb05fb099be69 100644 (file)
@@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void)
        return 0;
 }
 
-static int noop_cpu_to_logical_apicid(int cpu)
-{
-       return 0;
-}
-
 static int noop_phys_pkg_id(int cpuid_apic, int index_msb)
 {
        return 0;
@@ -113,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
        cpumask_set_cpu(cpu, retmask);
 }
 
-int noop_apicid_to_node(int logical_apicid)
-{
-       /* we're always on node 0 */
-       return 0;
-}
-
 static u32 noop_apic_read(u32 reg)
 {
        WARN_ON_ONCE((cpu_has_apic && !disable_apic));
@@ -130,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v)
        WARN_ON_ONCE(cpu_has_apic && !disable_apic);
 }
 
+#ifdef CONFIG_X86_32
+static int noop_x86_32_numa_cpu_node(int cpu)
+{
+       /* we're always on node 0 */
+       return 0;
+}
+#endif
+
 struct apic apic_noop = {
        .name                           = "noop",
        .probe                          = noop_probe,
@@ -153,9 +150,7 @@ struct apic apic_noop = {
        .ioapic_phys_id_map             = default_ioapic_phys_id_map,
        .setup_apic_routing             = NULL,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = noop_apicid_to_node,
 
-       .cpu_to_logical_apicid          = noop_cpu_to_logical_apicid,
        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
        .apicid_to_cpu_present          = physid_set_mask_of_physid,
 
@@ -197,4 +192,9 @@ struct apic apic_noop = {
        .icr_write                      = noop_apic_icr_write,
        .wait_icr_idle                  = noop_apic_wait_icr_idle,
        .safe_wait_icr_idle             = noop_safe_apic_wait_icr_idle,
+
+#ifdef CONFIG_X86_32
+       .x86_32_early_logical_apicid    = noop_x86_32_early_logical_apicid,
+       .x86_32_numa_cpu_node           = noop_x86_32_numa_cpu_node,
+#endif
 };
index cb804c5091b941e8b362a5abc0f2ab44fb16c943..541a2e43165942490453aa315eb6bbf9f7accbae 100644 (file)
@@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit)
        return 1;
 }
 
+static int bigsmp_early_logical_apicid(int cpu)
+{
+       /* on bigsmp, logical apicid is the same as physical */
+       return early_per_cpu(x86_cpu_to_apicid, cpu);
+}
+
 static inline unsigned long calculate_ldr(int cpu)
 {
        unsigned long val, id;
@@ -80,11 +86,6 @@ static void bigsmp_setup_apic_routing(void)
                nr_ioapics);
 }
 
-static int bigsmp_apicid_to_node(int logical_apicid)
-{
-       return apicid_2_node[hard_smp_processor_id()];
-}
-
 static int bigsmp_cpu_present_to_apicid(int mps_cpu)
 {
        if (mps_cpu < nr_cpu_ids)
@@ -93,14 +94,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu)
        return BAD_APICID;
 }
 
-/* Mapping from cpu number to logical apicid */
-static inline int bigsmp_cpu_to_logical_apicid(int cpu)
-{
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return cpu_physical_id(cpu);
-}
-
 static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
 {
        /* For clustered we don't have a good way to do this yet - hack */
@@ -115,7 +108,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
 /* As we are using single CPU as destination, pick only one CPU here */
 static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
-       return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask));
+       int cpu = cpumask_first(cpumask);
+
+       if (cpu < nr_cpu_ids)
+               return cpu_physical_id(cpu);
+       return BAD_APICID;
 }
 
 static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -129,9 +126,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
         */
        for_each_cpu_and(cpu, cpumask, andmask) {
                if (cpumask_test_cpu(cpu, cpu_online_mask))
-                       break;
+                       return cpu_physical_id(cpu);
        }
-       return bigsmp_cpu_to_logical_apicid(cpu);
+       return BAD_APICID;
 }
 
 static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
@@ -219,8 +216,6 @@ struct apic apic_bigsmp = {
        .ioapic_phys_id_map             = bigsmp_ioapic_phys_id_map,
        .setup_apic_routing             = bigsmp_setup_apic_routing,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = bigsmp_apicid_to_node,
-       .cpu_to_logical_apicid          = bigsmp_cpu_to_logical_apicid,
        .cpu_present_to_apicid          = bigsmp_cpu_present_to_apicid,
        .apicid_to_cpu_present          = physid_set_mask_of_physid,
        .setup_portio_remap             = NULL,
@@ -256,4 +251,7 @@ struct apic apic_bigsmp = {
        .icr_write                      = native_apic_icr_write,
        .wait_icr_idle                  = native_apic_wait_icr_idle,
        .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+
+       .x86_32_early_logical_apicid    = bigsmp_early_logical_apicid,
+       .x86_32_numa_cpu_node           = default_x86_32_numa_cpu_node,
 };
index 8593582d8022bed84972fe6bde245035778d7c80..3e9de4854c5b5253bf041ef60dbc3d594b665b80 100644 (file)
@@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit)
        return physid_isset(bit, phys_cpu_present_map);
 }
 
+static int es7000_early_logical_apicid(int cpu)
+{
+       /* on es7000, logical apicid is the same as physical */
+       return early_per_cpu(x86_bios_cpu_apicid, cpu);
+}
+
 static unsigned long calculate_ldr(int cpu)
 {
        unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);
@@ -504,12 +510,11 @@ static void es7000_setup_apic_routing(void)
                nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
 }
 
-static int es7000_apicid_to_node(int logical_apicid)
+static int es7000_numa_cpu_node(int cpu)
 {
        return 0;
 }
 
-
 static int es7000_cpu_present_to_apicid(int mps_cpu)
 {
        if (!mps_cpu)
@@ -528,18 +533,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap)
        ++cpu_id;
 }
 
-/* Mapping from cpu number to logical apicid */
-static int es7000_cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return cpu_2_logical_apicid[cpu];
-#else
-       return logical_smp_processor_id();
-#endif
-}
-
 static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
 {
        /* For clustered we don't have a good way to do this yet - hack */
@@ -561,7 +554,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
         * The cpus in the mask must all be on the apic cluster.
         */
        for_each_cpu(cpu, cpumask) {
-               int new_apicid = es7000_cpu_to_logical_apicid(cpu);
+               int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
 
                if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
                        WARN(1, "Not a valid mask!");
@@ -578,7 +571,7 @@ static unsigned int
 es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
                              const struct cpumask *andmask)
 {
-       int apicid = es7000_cpu_to_logical_apicid(0);
+       int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
        cpumask_var_t cpumask;
 
        if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
@@ -655,8 +648,6 @@ struct apic __refdata apic_es7000_cluster = {
        .ioapic_phys_id_map             = es7000_ioapic_phys_id_map,
        .setup_apic_routing             = es7000_setup_apic_routing,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = es7000_apicid_to_node,
-       .cpu_to_logical_apicid          = es7000_cpu_to_logical_apicid,
        .cpu_present_to_apicid          = es7000_cpu_present_to_apicid,
        .apicid_to_cpu_present          = es7000_apicid_to_cpu_present,
        .setup_portio_remap             = NULL,
@@ -695,6 +686,9 @@ struct apic __refdata apic_es7000_cluster = {
        .icr_write                      = native_apic_icr_write,
        .wait_icr_idle                  = native_apic_wait_icr_idle,
        .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+
+       .x86_32_early_logical_apicid    = es7000_early_logical_apicid,
+       .x86_32_numa_cpu_node           = es7000_numa_cpu_node,
 };
 
 struct apic __refdata apic_es7000 = {
@@ -720,8 +714,6 @@ struct apic __refdata apic_es7000 = {
        .ioapic_phys_id_map             = es7000_ioapic_phys_id_map,
        .setup_apic_routing             = es7000_setup_apic_routing,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = es7000_apicid_to_node,
-       .cpu_to_logical_apicid          = es7000_cpu_to_logical_apicid,
        .cpu_present_to_apicid          = es7000_cpu_present_to_apicid,
        .apicid_to_cpu_present          = es7000_apicid_to_cpu_present,
        .setup_portio_remap             = NULL,
@@ -758,4 +750,7 @@ struct apic __refdata apic_es7000 = {
        .icr_write                      = native_apic_icr_write,
        .wait_icr_idle                  = native_apic_wait_icr_idle,
        .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+
+       .x86_32_early_logical_apicid    = es7000_early_logical_apicid,
+       .x86_32_numa_cpu_node           = es7000_numa_cpu_node,
 };
index 79fd43ca6f96103b94e5047acb7b3879ab42d4e7..c4e557a1ebb6feae34839531ee6752ec98995e4f 100644 (file)
@@ -83,7 +83,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
                arch_spin_lock(&lock);
                printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
                show_regs(regs);
-               dump_stack();
                arch_spin_unlock(&lock);
                cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
                return NOTIFY_STOP;
index 8d23e831a45e2413ed659c3a3cc6eb5354a096be..4b5ebd26f56547dc527192ea431a9cf378439ad7 100644 (file)
@@ -187,7 +187,7 @@ int __init arch_early_irq_init(void)
        irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs);
 
        for (i = 0; i < count; i++) {
-               set_irq_chip_data(i, &cfg[i]);
+               irq_set_chip_data(i, &cfg[i]);
                zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node);
                zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node);
                /*
@@ -206,7 +206,7 @@ int __init arch_early_irq_init(void)
 #ifdef CONFIG_SPARSE_IRQ
 static struct irq_cfg *irq_cfg(unsigned int irq)
 {
-       return get_irq_chip_data(irq);
+       return irq_get_chip_data(irq);
 }
 
 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
@@ -232,7 +232,7 @@ static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
 {
        if (!cfg)
                return;
-       set_irq_chip_data(at, NULL);
+       irq_set_chip_data(at, NULL);
        free_cpumask_var(cfg->domain);
        free_cpumask_var(cfg->old_domain);
        kfree(cfg);
@@ -262,14 +262,14 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
        if (res < 0) {
                if (res != -EEXIST)
                        return NULL;
-               cfg = get_irq_chip_data(at);
+               cfg = irq_get_chip_data(at);
                if (cfg)
                        return cfg;
        }
 
        cfg = alloc_irq_cfg(at, node);
        if (cfg)
-               set_irq_chip_data(at, cfg);
+               irq_set_chip_data(at, cfg);
        else
                irq_free_desc(at);
        return cfg;
@@ -1185,7 +1185,7 @@ void __setup_vector_irq(int cpu)
        raw_spin_lock(&vector_lock);
        /* Mark the inuse vectors */
        for_each_active_irq(irq) {
-               cfg = get_irq_chip_data(irq);
+               cfg = irq_get_chip_data(irq);
                if (!cfg)
                        continue;
                /*
@@ -1240,35 +1240,31 @@ static inline int IO_APIC_irq_trigger(int irq)
 }
 #endif
 
-static void ioapic_register_intr(unsigned int irq, unsigned long trigger)
+static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
+                                unsigned long trigger)
 {
+       struct irq_chip *chip = &ioapic_chip;
+       irq_flow_handler_t hdl;
+       bool fasteoi;
 
        if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-           trigger == IOAPIC_LEVEL)
+           trigger == IOAPIC_LEVEL) {
                irq_set_status_flags(irq, IRQ_LEVEL);
-       else
+               fasteoi = true;
+       } else {
                irq_clear_status_flags(irq, IRQ_LEVEL);
+               fasteoi = false;
+       }
 
-       if (irq_remapped(get_irq_chip_data(irq))) {
+       if (irq_remapped(cfg)) {
                irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-               if (trigger)
-                       set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
-                                                     handle_fasteoi_irq,
-                                                    "fasteoi");
-               else
-                       set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
-                                                     handle_edge_irq, "edge");
-               return;
+               chip = &ir_ioapic_chip;
+               fasteoi = trigger != 0;
        }
 
-       if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-           trigger == IOAPIC_LEVEL)
-               set_irq_chip_and_handler_name(irq, &ioapic_chip,
-                                             handle_fasteoi_irq,
-                                             "fasteoi");
-       else
-               set_irq_chip_and_handler_name(irq, &ioapic_chip,
-                                             handle_edge_irq, "edge");
+       hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
+       irq_set_chip_and_handler_name(irq, chip, hdl,
+                                     fasteoi ? "fasteoi" : "edge");
 }
 
 static int setup_ioapic_entry(int apic_id, int irq,
@@ -1366,7 +1362,7 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
                return;
        }
 
-       ioapic_register_intr(irq, trigger);
+       ioapic_register_intr(irq, cfg, trigger);
        if (irq < legacy_pic->nr_legacy_irqs)
                legacy_pic->mask(irq);
 
@@ -1491,7 +1487,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
         * The timer IRQ doesn't have to know that behind the
         * scene we may have a 8259A-master in AEOI mode ...
         */
-       set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
+       irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
+                                     "edge");
 
        /*
         * Add it to the IO-APIC irq-routing table:
@@ -1598,7 +1595,7 @@ __apicdebuginit(void) print_IO_APIC(void)
        for_each_active_irq(irq) {
                struct irq_pin_list *entry;
 
-               cfg = get_irq_chip_data(irq);
+               cfg = irq_get_chip_data(irq);
                if (!cfg)
                        continue;
                entry = cfg->irq_2_pin;
@@ -2364,7 +2361,7 @@ static void irq_complete_move(struct irq_cfg *cfg)
 
 void irq_force_complete_move(int irq)
 {
-       struct irq_cfg *cfg = get_irq_chip_data(irq);
+       struct irq_cfg *cfg = irq_get_chip_data(irq);
 
        if (!cfg)
                return;
@@ -2378,7 +2375,7 @@ static inline void irq_complete_move(struct irq_cfg *cfg) { }
 static void ack_apic_edge(struct irq_data *data)
 {
        irq_complete_move(data->chip_data);
-       move_native_irq(data->irq);
+       irq_move_irq(data);
        ack_APIC_irq();
 }
 
@@ -2435,7 +2432,7 @@ static void ack_apic_level(struct irq_data *data)
        irq_complete_move(cfg);
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        /* If we are moving the irq we need to mask it */
-       if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
+       if (unlikely(irqd_is_setaffinity_pending(data))) {
                do_unmask_irq = 1;
                mask_ioapic(cfg);
        }
@@ -2524,7 +2521,7 @@ static void ack_apic_level(struct irq_data *data)
                 * and you can go talk to the chipset vendor about it.
                 */
                if (!io_apic_level_ack_pending(cfg))
-                       move_masked_irq(irq);
+                       irq_move_masked_irq(data);
                unmask_ioapic(cfg);
        }
 }
@@ -2587,7 +2584,7 @@ static inline void init_IO_APIC_traps(void)
         * 0x80, because int 0x80 is hm, kind of importantish. ;)
         */
        for_each_active_irq(irq) {
-               cfg = get_irq_chip_data(irq);
+               cfg = irq_get_chip_data(irq);
                if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
                        /*
                         * Hmm.. We don't have an entry for this,
@@ -2598,7 +2595,7 @@ static inline void init_IO_APIC_traps(void)
                                legacy_pic->make_irq(irq);
                        else
                                /* Strange. Oh, well.. */
-                               set_irq_chip(irq, &no_irq_chip);
+                               irq_set_chip(irq, &no_irq_chip);
                }
        }
 }
@@ -2638,7 +2635,7 @@ static struct irq_chip lapic_chip __read_mostly = {
 static void lapic_register_intr(int irq)
 {
        irq_clear_status_flags(irq, IRQ_LEVEL);
-       set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
+       irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
                                      "edge");
 }
 
@@ -2722,7 +2719,7 @@ int timer_through_8259 __initdata;
  */
 static inline void __init check_timer(void)
 {
-       struct irq_cfg *cfg = get_irq_chip_data(0);
+       struct irq_cfg *cfg = irq_get_chip_data(0);
        int node = cpu_to_node(0);
        int apic1, pin1, apic2, pin2;
        unsigned long flags;
@@ -3033,7 +3030,7 @@ unsigned int create_irq_nr(unsigned int from, int node)
        raw_spin_unlock_irqrestore(&vector_lock, flags);
 
        if (ret) {
-               set_irq_chip_data(irq, cfg);
+               irq_set_chip_data(irq, cfg);
                irq_clear_status_flags(irq, IRQ_NOREQUEST);
        } else {
                free_irq_at(irq, cfg);
@@ -3058,7 +3055,7 @@ int create_irq(void)
 
 void destroy_irq(unsigned int irq)
 {
-       struct irq_cfg *cfg = get_irq_chip_data(irq);
+       struct irq_cfg *cfg = irq_get_chip_data(irq);
        unsigned long flags;
 
        irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
@@ -3092,7 +3089,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
 
        dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
 
-       if (irq_remapped(get_irq_chip_data(irq))) {
+       if (irq_remapped(cfg)) {
                struct irte irte;
                int ir_index;
                u16 sub_handle;
@@ -3264,6 +3261,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
 
 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
 {
+       struct irq_chip *chip = &msi_chip;
        struct msi_msg msg;
        int ret;
 
@@ -3271,14 +3269,15 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
        if (ret < 0)
                return ret;
 
-       set_irq_msi(irq, msidesc);
+       irq_set_msi_desc(irq, msidesc);
        write_msi_msg(irq, &msg);
 
-       if (irq_remapped(get_irq_chip_data(irq))) {
+       if (irq_remapped(irq_get_chip_data(irq))) {
                irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-               set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
-       } else
-               set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
+               chip = &msi_ir_chip;
+       }
+
+       irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
 
        dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
 
@@ -3396,8 +3395,8 @@ int arch_setup_dmar_msi(unsigned int irq)
        if (ret < 0)
                return ret;
        dmar_msi_write(irq, &msg);
-       set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
-               "edge");
+       irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
+                                     "edge");
        return 0;
 }
 #endif
@@ -3455,6 +3454,7 @@ static struct irq_chip hpet_msi_type = {
 
 int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
 {
+       struct irq_chip *chip = &hpet_msi_type;
        struct msi_msg msg;
        int ret;
 
@@ -3474,15 +3474,12 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
        if (ret < 0)
                return ret;
 
-       hpet_msi_write(get_irq_data(irq), &msg);
+       hpet_msi_write(irq_get_handler_data(irq), &msg);
        irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-       if (irq_remapped(get_irq_chip_data(irq)))
-               set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
-                                             handle_edge_irq, "edge");
-       else
-               set_irq_chip_and_handler_name(irq, &hpet_msi_type,
-                                             handle_edge_irq, "edge");
+       if (irq_remapped(irq_get_chip_data(irq)))
+               chip = &ir_hpet_msi_type;
 
+       irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
        return 0;
 }
 #endif
@@ -3569,7 +3566,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
 
                write_ht_irq_msg(irq, &msg);
 
-               set_irq_chip_and_handler_name(irq, &ht_irq_chip,
+               irq_set_chip_and_handler_name(irq, &ht_irq_chip,
                                              handle_edge_irq, "edge");
 
                dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
@@ -3826,8 +3823,8 @@ int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
 void __init setup_ioapic_dest(void)
 {
        int pin, ioapic, irq, irq_entry;
-       struct irq_desc *desc;
        const struct cpumask *mask;
+       struct irq_data *idata;
 
        if (skip_ioapic_setup == 1)
                return;
@@ -3842,21 +3839,20 @@ void __init setup_ioapic_dest(void)
                if ((ioapic > 0) && (irq > 16))
                        continue;
 
-               desc = irq_to_desc(irq);
+               idata = irq_get_irq_data(irq);
 
                /*
                 * Honour affinities which have been set in early boot
                 */
-               if (desc->status &
-                   (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
-                       mask = desc->irq_data.affinity;
+               if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
+                       mask = idata->affinity;
                else
                        mask = apic->target_cpus();
 
                if (intr_remapping_enabled)
-                       ir_ioapic_set_affinity(&desc->irq_data, mask, false);
+                       ir_ioapic_set_affinity(idata, mask, false);
                else
-                       ioapic_set_affinity(&desc->irq_data, mask, false);
+                       ioapic_set_affinity(idata, mask, false);
        }
 
 }
@@ -4054,5 +4050,6 @@ void __init pre_init_apic_IRQ0(void)
        setup_local_APIC();
 
        io_apic_setup_irq_pin(0, 0, &attr);
-       set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
+       irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
+                                     "edge");
 }
index 08385e090a6f2fa830d796a67709ede023b1f6c5..cce91bf26676cddee4b5c42fa21016a801411079 100644 (file)
@@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
        local_irq_restore(flags);
 }
 
+#ifdef CONFIG_X86_32
+
 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
                                                 int vector)
 {
@@ -71,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
        local_irq_save(flags);
        for_each_cpu(query_cpu, mask)
                __default_send_IPI_dest_field(
-                       apic->cpu_to_logical_apicid(query_cpu), vector,
-                       apic->dest_logical);
+                       early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
+                       vector, apic->dest_logical);
        local_irq_restore(flags);
 }
 
@@ -90,14 +92,12 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
                if (query_cpu == this_cpu)
                        continue;
                __default_send_IPI_dest_field(
-                       apic->cpu_to_logical_apicid(query_cpu), vector,
-                       apic->dest_logical);
+                       early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
+                       vector, apic->dest_logical);
                }
        local_irq_restore(flags);
 }
 
-#ifdef CONFIG_X86_32
-
 /*
  * This is only used on smaller machines.
  */
index 960f26ab5c9f24ed2f4587107faa2fcd457d782c..6273eee5134b7d81c8dea080886d150a3adca14f 100644 (file)
@@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask
        return physids_promote(0xFUL, retmap);
 }
 
-static inline int numaq_cpu_to_logical_apicid(int cpu)
-{
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return cpu_2_logical_apicid[cpu];
-}
-
 /*
  * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
  * cpu to APIC ID relation to properly interact with the intelligent
@@ -398,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid)
        return logical_apicid >> 4;
 }
 
+static int numaq_numa_cpu_node(int cpu)
+{
+       int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
+
+       if (logical_apicid != BAD_APICID)
+               return numaq_apicid_to_node(logical_apicid);
+       return NUMA_NO_NODE;
+}
+
 static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap)
 {
        int node = numaq_apicid_to_node(logical_apicid);
@@ -508,8 +510,6 @@ struct apic __refdata apic_numaq = {
        .ioapic_phys_id_map             = numaq_ioapic_phys_id_map,
        .setup_apic_routing             = numaq_setup_apic_routing,
        .multi_timer_check              = numaq_multi_timer_check,
-       .apicid_to_node                 = numaq_apicid_to_node,
-       .cpu_to_logical_apicid          = numaq_cpu_to_logical_apicid,
        .cpu_present_to_apicid          = numaq_cpu_present_to_apicid,
        .apicid_to_cpu_present          = numaq_apicid_to_cpu_present,
        .setup_portio_remap             = numaq_setup_portio_remap,
@@ -547,4 +547,7 @@ struct apic __refdata apic_numaq = {
        .icr_write                      = native_apic_icr_write,
        .wait_icr_idle                  = native_apic_wait_icr_idle,
        .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+
+       .x86_32_early_logical_apicid    = noop_x86_32_early_logical_apicid,
+       .x86_32_numa_cpu_node           = numaq_numa_cpu_node,
 };
index 99d2fe01608420bef1646f206a476ae4a8e3ba26..fc84c7b61108086448295821abfc2daca5d0c391 100644 (file)
@@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void)
                apic->setup_apic_routing();
 }
 
+static int default_x86_32_early_logical_apicid(int cpu)
+{
+       return 1 << cpu;
+}
+
 static void setup_apic_flat_routing(void)
 {
 #ifdef CONFIG_X86_IO_APIC
@@ -130,8 +135,6 @@ struct apic apic_default = {
        .ioapic_phys_id_map             = default_ioapic_phys_id_map,
        .setup_apic_routing             = setup_apic_flat_routing,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = default_apicid_to_node,
-       .cpu_to_logical_apicid          = default_cpu_to_logical_apicid,
        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
        .apicid_to_cpu_present          = physid_set_mask_of_physid,
        .setup_portio_remap             = NULL,
@@ -167,6 +170,9 @@ struct apic apic_default = {
        .icr_write                      = native_apic_icr_write,
        .wait_icr_idle                  = native_apic_wait_icr_idle,
        .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+
+       .x86_32_early_logical_apicid    = default_x86_32_early_logical_apicid,
+       .x86_32_numa_cpu_node           = default_x86_32_numa_cpu_node,
 };
 
 extern struct apic apic_numaq;
index 9b419263d90df0e4b1ebc5c7d7ced5c7c36f038b..e4b8059b414a800627c95eb9269d9c7f4fbd1774 100644 (file)
@@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit)
        return 1;
 }
 
-static void summit_init_apic_ldr(void)
+static int summit_early_logical_apicid(int cpu)
 {
-       unsigned long val, id;
        int count = 0;
-       u8 my_id = (u8)hard_smp_processor_id();
+       u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu);
        u8 my_cluster = APIC_CLUSTER(my_id);
 #ifdef CONFIG_SMP
        u8 lid;
@@ -206,7 +205,7 @@ static void summit_init_apic_ldr(void)
 
        /* Create logical APIC IDs by counting CPUs already in cluster. */
        for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
-               lid = cpu_2_logical_apicid[i];
+               lid = early_per_cpu(x86_cpu_to_logical_apicid, i);
                if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
                        ++count;
        }
@@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void)
        /* We only have a 4 wide bitmap in cluster mode.  If a deranged
         * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
        BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
-       id = my_cluster | (1UL << count);
+       return my_cluster | (1UL << count);
+}
+
+static void summit_init_apic_ldr(void)
+{
+       int cpu = smp_processor_id();
+       unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
+       unsigned long val;
+
        apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
        val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
        val |= SET_APIC_LOGICAL_ID(id);
@@ -232,27 +239,6 @@ static void summit_setup_apic_routing(void)
                                                nr_ioapics);
 }
 
-static int summit_apicid_to_node(int logical_apicid)
-{
-#ifdef CONFIG_SMP
-       return apicid_2_node[hard_smp_processor_id()];
-#else
-       return 0;
-#endif
-}
-
-/* Mapping from cpu number to logical apicid */
-static inline int summit_cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return cpu_2_logical_apicid[cpu];
-#else
-       return logical_smp_processor_id();
-#endif
-}
-
 static int summit_cpu_present_to_apicid(int mps_cpu)
 {
        if (mps_cpu < nr_cpu_ids)
@@ -286,7 +272,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
         * The cpus in the mask must all be on the apic cluster.
         */
        for_each_cpu(cpu, cpumask) {
-               int new_apicid = summit_cpu_to_logical_apicid(cpu);
+               int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
 
                if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
                        printk("%s: Not a valid mask!\n", __func__);
@@ -301,7 +287,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
 static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
                              const struct cpumask *andmask)
 {
-       int apicid = summit_cpu_to_logical_apicid(0);
+       int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
        cpumask_var_t cpumask;
 
        if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
@@ -528,8 +514,6 @@ struct apic apic_summit = {
        .ioapic_phys_id_map             = summit_ioapic_phys_id_map,
        .setup_apic_routing             = summit_setup_apic_routing,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = summit_apicid_to_node,
-       .cpu_to_logical_apicid          = summit_cpu_to_logical_apicid,
        .cpu_present_to_apicid          = summit_cpu_present_to_apicid,
        .apicid_to_cpu_present          = summit_apicid_to_cpu_present,
        .setup_portio_remap             = NULL,
@@ -565,4 +549,7 @@ struct apic apic_summit = {
        .icr_write                      = native_apic_icr_write,
        .wait_icr_idle                  = native_apic_wait_icr_idle,
        .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+
+       .x86_32_early_logical_apicid    = summit_early_logical_apicid,
+       .x86_32_numa_cpu_node           = default_x86_32_numa_cpu_node,
 };
index cf69c59f491011c0856da6d298192e667ab2a418..90949bbd566df155e6596c975caa9cff6e2f8a63 100644 (file)
@@ -206,8 +206,6 @@ struct apic apic_x2apic_cluster = {
        .ioapic_phys_id_map             = NULL,
        .setup_apic_routing             = NULL,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = NULL,
-       .cpu_to_logical_apicid          = NULL,
        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
        .apicid_to_cpu_present          = NULL,
        .setup_portio_remap             = NULL,
index 8972f38c5ced332333ac014249f2a29ef335001b..c7e6d6645bf47f44df8d71419d120ad032b82275 100644 (file)
@@ -195,8 +195,6 @@ struct apic apic_x2apic_phys = {
        .ioapic_phys_id_map             = NULL,
        .setup_apic_routing             = NULL,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = NULL,
-       .cpu_to_logical_apicid          = NULL,
        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
        .apicid_to_cpu_present          = NULL,
        .setup_portio_remap             = NULL,
index bd16b58b885057454a0dd2bdca185a265db2d101..3c289281394cff3e164891e56509fa9658412755 100644 (file)
@@ -338,8 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = {
        .ioapic_phys_id_map             = NULL,
        .setup_apic_routing             = NULL,
        .multi_timer_check              = NULL,
-       .apicid_to_node                 = NULL,
-       .cpu_to_logical_apicid          = NULL,
        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
        .apicid_to_cpu_present          = NULL,
        .setup_portio_remap             = NULL,
index cfa82c899f47942550e8103d5660473e22dee8f3..4f13fafc5264c8e18bd4f8f8e00b96927b69924c 100644 (file)
@@ -1,5 +1,70 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+#define COMPILE_OFFSETS
+
+#include <linux/crypto.h>
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/hardirq.h>
+#include <linux/suspend.h>
+#include <linux/kbuild.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/sigframe.h>
+#include <asm/bootparam.h>
+#include <asm/suspend.h>
+
+#ifdef CONFIG_XEN
+#include <xen/interface/xen.h>
+#endif
+
 #ifdef CONFIG_X86_32
 # include "asm-offsets_32.c"
 #else
 # include "asm-offsets_64.c"
 #endif
+
+void common(void) {
+       BLANK();
+       OFFSET(TI_flags, thread_info, flags);
+       OFFSET(TI_status, thread_info, status);
+       OFFSET(TI_addr_limit, thread_info, addr_limit);
+       OFFSET(TI_preempt_count, thread_info, preempt_count);
+
+       BLANK();
+       OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+
+       BLANK();
+       OFFSET(pbe_address, pbe, address);
+       OFFSET(pbe_orig_address, pbe, orig_address);
+       OFFSET(pbe_next, pbe, next);
+
+#ifdef CONFIG_PARAVIRT
+       BLANK();
+       OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
+       OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
+       OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
+       OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
+       OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
+       OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
+       OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+       OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
+       OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
+#endif
+
+#ifdef CONFIG_XEN
+       BLANK();
+       OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+       OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
+#endif
+
+       BLANK();
+       OFFSET(BP_scratch, boot_params, scratch);
+       OFFSET(BP_loadflags, boot_params, hdr.loadflags);
+       OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
+       OFFSET(BP_version, boot_params, hdr.version);
+       OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
+}
index 1a4088dda37a9da32fd1a602c800256c00f0debd..c29d631af6fcf54bd8a1414255910e77d6c6426f 100644 (file)
@@ -1,26 +1,4 @@
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- */
-
-#include <linux/crypto.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/personality.h>
-#include <linux/suspend.h>
-#include <linux/kbuild.h>
 #include <asm/ucontext.h>
-#include <asm/sigframe.h>
-#include <asm/pgtable.h>
-#include <asm/fixmap.h>
-#include <asm/processor.h>
-#include <asm/thread_info.h>
-#include <asm/bootparam.h>
-#include <asm/elf.h>
-#include <asm/suspend.h>
-
-#include <xen/interface/xen.h>
 
 #include <linux/lguest.h>
 #include "../../../drivers/lguest/lg.h"
@@ -51,21 +29,10 @@ void foo(void)
        OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
        BLANK();
 
-       OFFSET(TI_task, thread_info, task);
-       OFFSET(TI_exec_domain, thread_info, exec_domain);
-       OFFSET(TI_flags, thread_info, flags);
-       OFFSET(TI_status, thread_info, status);
-       OFFSET(TI_preempt_count, thread_info, preempt_count);
-       OFFSET(TI_addr_limit, thread_info, addr_limit);
-       OFFSET(TI_restart_block, thread_info, restart_block);
        OFFSET(TI_sysenter_return, thread_info, sysenter_return);
        OFFSET(TI_cpu, thread_info, cpu);
        BLANK();
 
-       OFFSET(GDS_size, desc_ptr, size);
-       OFFSET(GDS_address, desc_ptr, address);
-       BLANK();
-
        OFFSET(PT_EBX, pt_regs, bx);
        OFFSET(PT_ECX, pt_regs, cx);
        OFFSET(PT_EDX, pt_regs, dx);
@@ -85,42 +52,13 @@ void foo(void)
        OFFSET(PT_OLDSS,  pt_regs, ss);
        BLANK();
 
-       OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
        OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
        BLANK();
 
-       OFFSET(pbe_address, pbe, address);
-       OFFSET(pbe_orig_address, pbe, orig_address);
-       OFFSET(pbe_next, pbe, next);
-
        /* Offset from the sysenter stack to tss.sp0 */
        DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
                 sizeof(struct tss_struct));
 
-       DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
-       DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
-       DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
-
-       OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-
-#ifdef CONFIG_PARAVIRT
-       BLANK();
-       OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
-       OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
-       OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
-       OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
-       OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
-       OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
-       OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
-       OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
-#endif
-
-#ifdef CONFIG_XEN
-       BLANK();
-       OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
-       OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
-#endif
-
 #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
        BLANK();
        OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
@@ -139,11 +77,4 @@ void foo(void)
        OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
        OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
 #endif
-
-       BLANK();
-       OFFSET(BP_scratch, boot_params, scratch);
-       OFFSET(BP_loadflags, boot_params, hdr.loadflags);
-       OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
-       OFFSET(BP_version, boot_params, hdr.version);
-       OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
 }
index 4a6aeedcd965bcff5968acfae61fe999091ff48e..e72a1194af22a76707a9f44865b8211b4b1e1e7c 100644 (file)
@@ -1,27 +1,4 @@
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed to extract
- * and format the required data.
- */
-#define COMPILE_OFFSETS
-
-#include <linux/crypto.h>
-#include <linux/sched.h> 
-#include <linux/stddef.h>
-#include <linux/errno.h> 
-#include <linux/hardirq.h>
-#include <linux/suspend.h>
-#include <linux/kbuild.h>
-#include <asm/processor.h>
-#include <asm/segment.h>
-#include <asm/thread_info.h>
 #include <asm/ia32.h>
-#include <asm/bootparam.h>
-#include <asm/suspend.h>
-
-#include <xen/interface/xen.h>
-
-#include <asm/sigframe.h>
 
 #define __NO_STUBS 1
 #undef __SYSCALL
@@ -33,41 +10,19 @@ static char syscalls[] = {
 
 int main(void)
 {
-#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
-       ENTRY(state);
-       ENTRY(flags); 
-       ENTRY(pid);
-       BLANK();
-#undef ENTRY
-#define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry))
-       ENTRY(flags);
-       ENTRY(addr_limit);
-       ENTRY(preempt_count);
-       ENTRY(status);
-#ifdef CONFIG_IA32_EMULATION
-       ENTRY(sysenter_return);
-#endif
-       BLANK();
-#undef ENTRY
 #ifdef CONFIG_PARAVIRT
-       BLANK();
-       OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
-       OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
-       OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
-       OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
-       OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
        OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame);
-       OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
        OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32);
        OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
-       OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
        OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
-       OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
+       BLANK();
 #endif
 
-
 #ifdef CONFIG_IA32_EMULATION
-#define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry))
+       OFFSET(TI_sysenter_return, thread_info, sysenter_return);
+       BLANK();
+
+#define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry)
        ENTRY(ax);
        ENTRY(bx);
        ENTRY(cx);
@@ -79,15 +34,12 @@ int main(void)
        ENTRY(ip);
        BLANK();
 #undef ENTRY
-       DEFINE(IA32_RT_SIGFRAME_sigcontext,
-              offsetof (struct rt_sigframe_ia32, uc.uc_mcontext));
+
+       OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
        BLANK();
 #endif
-       DEFINE(pbe_address, offsetof(struct pbe, address));
-       DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
-       DEFINE(pbe_next, offsetof(struct pbe, next));
-       BLANK();
-#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
+
+#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
        ENTRY(bx);
        ENTRY(bx);
        ENTRY(cx);
@@ -107,7 +59,8 @@ int main(void)
        ENTRY(flags);
        BLANK();
 #undef ENTRY
-#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
+
+#define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry)
        ENTRY(cr0);
        ENTRY(cr2);
        ENTRY(cr3);
@@ -115,26 +68,11 @@ int main(void)
        ENTRY(cr8);
        BLANK();
 #undef ENTRY
-       DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
-       BLANK();
-       DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
-       BLANK();
-       DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
 
+       OFFSET(TSS_ist, tss_struct, x86_tss.ist);
        BLANK();
-       OFFSET(BP_scratch, boot_params, scratch);
-       OFFSET(BP_loadflags, boot_params, hdr.loadflags);
-       OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
-       OFFSET(BP_version, boot_params, hdr.version);
-       OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
 
-       BLANK();
-       DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
-#ifdef CONFIG_XEN
-       BLANK();
-       OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
-       OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
-#undef ENTRY
-#endif
+       DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
+
        return 0;
 }
index 13a389179514eb4b17c7d5fc6d5100e1996d0770..452932d3473077cabf505d00f96073319c93b983 100644 (file)
@@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void)
                addr += size;
        }
 
-       printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
-              num_scan_areas);
+       if (num_scan_areas)
+               printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas);
 }
 
 
@@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy)
 {
        check_for_bios_corruption();
        schedule_delayed_work(&bios_check_work,
-               round_jiffies_relative(corruption_check_period*HZ)); 
+               round_jiffies_relative(corruption_check_period*HZ));
 }
 
 static int start_periodic_check_for_corruption(void)
 {
-       if (!memory_corruption_check || corruption_check_period == 0)
+       if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0)
                return 0;
 
        printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
index 7c7bedb83c5a463bad2a2cd59765d3d6b1066f8b..f771ab6b49e9c3e17b3413e2e440c81684e7b5e6 100644 (file)
@@ -233,18 +233,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
 }
 #endif
 
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
+#ifdef CONFIG_NUMA
+/*
+ * To workaround broken NUMA config.  Read the comment in
+ * srat_detect_node().
+ */
 static int __cpuinit nearby_node(int apicid)
 {
        int i, node;
 
        for (i = apicid - 1; i >= 0; i--) {
-               node = apicid_to_node[i];
+               node = __apicid_to_node[i];
                if (node != NUMA_NO_NODE && node_online(node))
                        return node;
        }
        for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
-               node = apicid_to_node[i];
+               node = __apicid_to_node[i];
                if (node != NUMA_NO_NODE && node_online(node))
                        return node;
        }
@@ -261,7 +265,7 @@ static int __cpuinit nearby_node(int apicid)
 #ifdef CONFIG_X86_HT
 static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
 {
-       u32 nodes;
+       u32 nodes, cores_per_cu = 1;
        u8 node_id;
        int cpu = smp_processor_id();
 
@@ -276,6 +280,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
                /* get compute unit information */
                smp_num_siblings = ((ebx >> 8) & 3) + 1;
                c->compute_unit_id = ebx & 0xff;
+               cores_per_cu += ((ebx >> 8) & 3);
        } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
                u64 value;
 
@@ -288,15 +293,18 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
        /* fixup multi-node processor information */
        if (nodes > 1) {
                u32 cores_per_node;
+               u32 cus_per_node;
 
                set_cpu_cap(c, X86_FEATURE_AMD_DCM);
                cores_per_node = c->x86_max_cores / nodes;
+               cus_per_node = cores_per_node / cores_per_cu;
 
                /* store NodeID, use llc_shared_map to store sibling info */
                per_cpu(cpu_llc_id, cpu) = node_id;
 
-               /* core id to be in range from 0 to (cores_per_node - 1) */
-               c->cpu_core_id = c->cpu_core_id % cores_per_node;
+               /* core id has to be in the [0 .. cores_per_node - 1] range */
+               c->cpu_core_id %= cores_per_node;
+               c->compute_unit_id %= cus_per_node;
        }
 }
 #endif
@@ -334,31 +342,40 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id);
 
 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
 {
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
+#ifdef CONFIG_NUMA
        int cpu = smp_processor_id();
        int node;
        unsigned apicid = c->apicid;
 
-       node = per_cpu(cpu_llc_id, cpu);
+       node = numa_cpu_node(cpu);
+       if (node == NUMA_NO_NODE)
+               node = per_cpu(cpu_llc_id, cpu);
 
-       if (apicid_to_node[apicid] != NUMA_NO_NODE)
-               node = apicid_to_node[apicid];
        if (!node_online(node)) {
-               /* Two possibilities here:
-                  - The CPU is missing memory and no node was created.
-                  In that case try picking one from a nearby CPU
-                  - The APIC IDs differ from the HyperTransport node IDs
-                  which the K8 northbridge parsing fills in.
-                  Assume they are all increased by a constant offset,
-                  but in the same order as the HT nodeids.
-                  If that doesn't result in a usable node fall back to the
-                  path for the previous case.  */
-
+               /*
+                * Two possibilities here:
+                *
+                * - The CPU is missing memory and no node was created.  In
+                *   that case try picking one from a nearby CPU.
+                *
+                * - The APIC IDs differ from the HyperTransport node IDs
+                *   which the K8 northbridge parsing fills in.  Assume
+                *   they are all increased by a constant offset, but in
+                *   the same order as the HT nodeids.  If that doesn't
+                *   result in a usable node fall back to the path for the
+                *   previous case.
+                *
+                * This workaround operates directly on the mapping between
+                * APIC ID and NUMA node, assuming certain relationship
+                * between APIC ID, HT node ID and NUMA topology.  As going
+                * through CPU mapping may alter the outcome, directly
+                * access __apicid_to_node[].
+                */
                int ht_nodeid = c->initial_apicid;
 
                if (ht_nodeid >= 0 &&
-                   apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
-                       node = apicid_to_node[ht_nodeid];
+                   __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
+                       node = __apicid_to_node[ht_nodeid];
                /* Pick a nearby node */
                if (!node_online(node))
                        node = nearby_node(apicid);
index 1d59834396bdc145c630e671d1bccd7769689a88..e2ced0074a45c1ffee1eeb1a7e8d56657824847f 100644 (file)
@@ -675,7 +675,7 @@ void __init early_cpu_init(void)
        const struct cpu_dev *const *cdev;
        int count = 0;
 
-#ifdef PROCESSOR_SELECT
+#ifdef CONFIG_PROCESSOR_SELECT
        printk(KERN_INFO "KERNEL supported cpus:\n");
 #endif
 
@@ -687,7 +687,7 @@ void __init early_cpu_init(void)
                cpu_devs[count] = cpudev;
                count++;
 
-#ifdef PROCESSOR_SELECT
+#ifdef CONFIG_PROCESSOR_SELECT
                {
                        unsigned int j;
 
@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
 
        select_idle_routine(c);
 
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
+#ifdef CONFIG_NUMA
        numa_add_cpu(smp_processor_id());
 #endif
 }
index bd1cac747f671e0487fd95efd41e6521528ca334..52c93648e492f10c8c6da916f7c1cd4dc25f2807 100644 (file)
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
 {
        if (c->x86 == 0x06) {
                if (cpu_has(c, X86_FEATURE_EST))
-                       printk(KERN_WARNING PFX "Warning: EST-capable CPU "
-                              "detected. The acpi-cpufreq module offers "
-                              "voltage scaling in addition of frequency "
+                       printk_once(KERN_WARNING PFX "Warning: EST-capable "
+                              "CPU detected. The acpi-cpufreq module offers "
+                              "voltage scaling in addition to frequency "
                               "scaling. You should use that instead of "
                               "p4-clockmod, if possible.\n");
                switch (c->x86_model) {
index 4f6f679f27990198640f9a1e139ea3a838b05eb8..4a5a42b842adfcc457f77745584eaca3c7882bdd 100644 (file)
@@ -195,7 +195,7 @@ static unsigned int pcc_get_freq(unsigned int cpu)
 cmd_incomplete:
        iowrite16(0, &pcch_hdr->status);
        spin_unlock(&pcc_lock);
-       return -EINVAL;
+       return 0;
 }
 
 static int pcc_cpufreq_target(struct cpufreq_policy *policy,
index 35c7e65e59be4b27e9242843184dd63af0ee8ed8..c567dec854f69f865bc92105c55cc7066cc06c6e 100644 (file)
@@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = {
 static int __cpuinit powernowk8_init(void)
 {
        unsigned int i, supported_cpus = 0, cpu;
+       int rv;
 
        for_each_online_cpu(i) {
                int rc;
@@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void)
 
                cpb_capable = true;
 
-               register_cpu_notifier(&cpb_nb);
-
                msrs = msrs_alloc();
                if (!msrs) {
                        printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
                        return -ENOMEM;
                }
 
+               register_cpu_notifier(&cpb_nb);
+
                rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
 
                for_each_cpu(cpu, cpu_online_mask) {
@@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void)
                        (cpb_enabled ? "on" : "off"));
        }
 
-       return cpufreq_register_driver(&cpufreq_amd64_driver);
+       rv = cpufreq_register_driver(&cpufreq_amd64_driver);
+       if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
+               unregister_cpu_notifier(&cpb_nb);
+               msrs_free(msrs);
+               msrs = NULL;
+       }
+       return rv;
 }
 
 /* driver entry point for term */
index d16c2c53d6bff0504595f5f0c93a973ad76248b9..df86bc8c859d6ff8596759e7a54258fd9764d7d6 100644 (file)
@@ -276,14 +276,13 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
 
 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
 {
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
+#ifdef CONFIG_NUMA
        unsigned node;
        int cpu = smp_processor_id();
-       int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
 
        /* Don't do the funky fallback heuristics the AMD version employs
           for now. */
-       node = apicid_to_node[apicid];
+       node = numa_cpu_node(cpu);
        if (node == NUMA_NO_NODE || !node_online(node)) {
                /* reuse the value from init_cpu_to_node() */
                node = cpu_to_node(cpu);
index ec2c19a7b8efe4cc366ac709072460f0023ef296..1ce1af2899df24d2f7c5a23091e5173c56f88918 100644 (file)
@@ -304,8 +304,9 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
 
 struct _cache_attr {
        struct attribute attr;
-       ssize_t (*show)(struct _cpuid4_info *, char *);
-       ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
+       ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
+       ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
+                        unsigned int);
 };
 
 #ifdef CONFIG_AMD_NB
@@ -400,7 +401,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
 
 #define SHOW_CACHE_DISABLE(slot)                                       \
 static ssize_t                                                         \
-show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf)   \
+show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf,   \
+                         unsigned int cpu)                             \
 {                                                                      \
        return show_cache_disable(this_leaf, buf, slot);                \
 }
@@ -512,7 +514,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
 #define STORE_CACHE_DISABLE(slot)                                      \
 static ssize_t                                                         \
 store_cache_disable_##slot(struct _cpuid4_info *this_leaf,             \
-                          const char *buf, size_t count)               \
+                          const char *buf, size_t count,               \
+                          unsigned int cpu)                            \
 {                                                                      \
        return store_cache_disable(this_leaf, buf, count, slot);        \
 }
@@ -524,6 +527,39 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
                show_cache_disable_1, store_cache_disable_1);
 
+static ssize_t
+show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
+{
+       if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+               return -EINVAL;
+
+       return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
+}
+
+static ssize_t
+store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
+               unsigned int cpu)
+{
+       unsigned long val;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+               return -EINVAL;
+
+       if (strict_strtoul(buf, 16, &val) < 0)
+               return -EINVAL;
+
+       if (amd_set_subcaches(cpu, val))
+               return -EINVAL;
+
+       return count;
+}
+
+static struct _cache_attr subcaches =
+       __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
+
 #else  /* CONFIG_AMD_NB */
 #define amd_init_l3_cache(x, y)
 #endif /* CONFIG_AMD_NB */
@@ -532,9 +568,9 @@ static int
 __cpuinit cpuid4_cache_lookup_regs(int index,
                                   struct _cpuid4_info_regs *this_leaf)
 {
-       union _cpuid4_leaf_eax  eax;
-       union _cpuid4_leaf_ebx  ebx;
-       union _cpuid4_leaf_ecx  ecx;
+       union _cpuid4_leaf_eax  eax;
+       union _cpuid4_leaf_ebx  ebx;
+       union _cpuid4_leaf_ecx  ecx;
        unsigned                edx;
 
        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
@@ -732,11 +768,11 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
-               for_each_cpu(i, c->llc_shared_map) {
+               for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
                        if (!per_cpu(ici_cpuid4_info, i))
                                continue;
                        this_leaf = CPUID4_INFO_IDX(i, index);
-                       for_each_cpu(sibling, c->llc_shared_map) {
+                       for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
                                if (!cpu_online(sibling))
                                        continue;
                                set_bit(sibling, this_leaf->shared_cpu_map);
@@ -870,8 +906,8 @@ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
 #define INDEX_KOBJECT_PTR(x, y)                (&((per_cpu(ici_index_kobject, x))[y]))
 
 #define show_one_plus(file_name, object, val)                          \
-static ssize_t show_##file_name                                                \
-                       (struct _cpuid4_info *this_leaf, char *buf)     \
+static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
+                               unsigned int cpu)                       \
 {                                                                      \
        return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
 }
@@ -882,7 +918,8 @@ show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
 
-static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
+static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
+                        unsigned int cpu)
 {
        return sprintf(buf, "%luK\n", this_leaf->size / 1024);
 }
@@ -906,17 +943,20 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
        return n;
 }
 
-static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
+static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
+                                         unsigned int cpu)
 {
        return show_shared_cpu_map_func(leaf, 0, buf);
 }
 
-static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
+static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
+                                          unsigned int cpu)
 {
        return show_shared_cpu_map_func(leaf, 1, buf);
 }
 
-static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
+static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
+                        unsigned int cpu)
 {
        switch (this_leaf->eax.split.type) {
        case CACHE_TYPE_DATA:
@@ -974,6 +1014,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
        if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
                n += 2;
 
+       if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+               n += 1;
+
        attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
        if (attrs == NULL)
                return attrs = default_attrs;
@@ -986,6 +1029,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
                attrs[n++] = &cache_disable_1.attr;
        }
 
+       if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+               attrs[n++] = &subcaches.attr;
+
        return attrs;
 }
 #endif
@@ -998,7 +1044,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
 
        ret = fattr->show ?
                fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
-                       buf) :
+                       buf, this_leaf->cpu) :
                0;
        return ret;
 }
@@ -1012,7 +1058,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
 
        ret = fattr->store ?
                fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
-                       buf, count) :
+                       buf, count, this_leaf->cpu) :
                0;
        return ret;
 }
index 5bf2fac52aca7771b6b7827117b9d2b2778fd8ad..167f97b5596e2f6e6e01df1f0b1096cff3392b2f 100644 (file)
@@ -527,15 +527,12 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
        int i, err = 0;
        struct threshold_bank *b = NULL;
        char name[32];
-#ifdef CONFIG_SMP
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
 
        sprintf(name, "threshold_bank%i", bank);
 
 #ifdef CONFIG_SMP
        if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {   /* symlink */
-               i = cpumask_first(c->llc_shared_map);
+               i = cpumask_first(cpu_llc_shared_mask(cpu));
 
                /* first core not up yet */
                if (cpu_data(i).cpu_core_id)
@@ -555,7 +552,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                if (err)
                        goto out;
 
-               cpumask_copy(b->cpus, c->llc_shared_map);
+               cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
                per_cpu(threshold_banks, cpu)[bank] = b;
 
                goto out;
index 9d977a2ea693c4db1d6cdf1d86b76672885a76e2..26604188aa49f3679a232fea72babb7855577928 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/stacktrace.h>
 #include <asm/nmi.h>
 #include <asm/compat.h>
+#include <asm/smp.h>
 
 #if 0
 #undef wrmsrl
@@ -93,6 +94,8 @@ struct amd_nb {
        struct event_constraint event_constraints[X86_PMC_IDX_MAX];
 };
 
+struct intel_percore;
+
 #define MAX_LBR_ENTRIES                16
 
 struct cpu_hw_events {
@@ -127,6 +130,13 @@ struct cpu_hw_events {
        struct perf_branch_stack        lbr_stack;
        struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
 
+       /*
+        * Intel percore register state.
+        * Coordinate shared resources between HT threads.
+        */
+       int                             percore_used; /* Used by this CPU? */
+       struct intel_percore            *per_core;
+
        /*
         * AMD specific bits
         */
@@ -166,8 +176,10 @@ struct cpu_hw_events {
 /*
  * Constraint on the Event code + UMask
  */
-#define PEBS_EVENT_CONSTRAINT(c, n)    \
+#define INTEL_UEVENT_CONSTRAINT(c, n)  \
        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
+#define PEBS_EVENT_CONSTRAINT(c, n)    \
+       INTEL_UEVENT_CONSTRAINT(c, n)
 
 #define EVENT_CONSTRAINT_END           \
        EVENT_CONSTRAINT(0, 0, 0)
@@ -175,6 +187,28 @@ struct cpu_hw_events {
 #define for_each_event_constraint(e, c)        \
        for ((e) = (c); (e)->weight; (e)++)
 
+/*
+ * Extra registers for specific events.
+ * Some events need large masks and require external MSRs.
+ * Define a mapping to these extra registers.
+ */
+struct extra_reg {
+       unsigned int            event;
+       unsigned int            msr;
+       u64                     config_mask;
+       u64                     valid_mask;
+};
+
+#define EVENT_EXTRA_REG(e, ms, m, vm) {        \
+       .event = (e),           \
+       .msr = (ms),            \
+       .config_mask = (m),     \
+       .valid_mask = (vm),     \
+       }
+#define INTEL_EVENT_EXTRA_REG(event, msr, vm)  \
+       EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm)
+#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0)
+
 union perf_capabilities {
        struct {
                u64     lbr_format    : 6;
@@ -219,6 +253,7 @@ struct x86_pmu {
        void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
                                                 struct perf_event *event);
        struct event_constraint *event_constraints;
+       struct event_constraint *percore_constraints;
        void            (*quirks)(void);
        int             perfctr_second_write;
 
@@ -247,6 +282,11 @@ struct x86_pmu {
         */
        unsigned long   lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
        int             lbr_nr;                    /* hardware stack size */
+
+       /*
+        * Extra registers for events
+        */
+       struct extra_reg *extra_regs;
 };
 
 static struct x86_pmu x86_pmu __read_mostly;
@@ -271,6 +311,10 @@ static u64 __read_mostly hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
+static u64 __read_mostly hw_cache_extra_regs
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX];
 
 /*
  * Propagate event elapsed time into the generic event.
@@ -298,7 +342,7 @@ x86_perf_event_update(struct perf_event *event)
         */
 again:
        prev_raw_count = local64_read(&hwc->prev_count);
-       rdmsrl(hwc->event_base + idx, new_raw_count);
+       rdmsrl(hwc->event_base, new_raw_count);
 
        if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                                        new_raw_count) != prev_raw_count)
@@ -321,6 +365,49 @@ again:
        return new_raw_count;
 }
 
+/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */
+static inline int x86_pmu_addr_offset(int index)
+{
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
+               return index << 1;
+       return index;
+}
+
+static inline unsigned int x86_pmu_config_addr(int index)
+{
+       return x86_pmu.eventsel + x86_pmu_addr_offset(index);
+}
+
+static inline unsigned int x86_pmu_event_addr(int index)
+{
+       return x86_pmu.perfctr + x86_pmu_addr_offset(index);
+}
+
+/*
+ * Find and validate any extra registers to set up.
+ */
+static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
+{
+       struct extra_reg *er;
+
+       event->hw.extra_reg = 0;
+       event->hw.extra_config = 0;
+
+       if (!x86_pmu.extra_regs)
+               return 0;
+
+       for (er = x86_pmu.extra_regs; er->msr; er++) {
+               if (er->event != (config & er->config_mask))
+                       continue;
+               if (event->attr.config1 & ~er->valid_mask)
+                       return -EINVAL;
+               event->hw.extra_reg = er->msr;
+               event->hw.extra_config = event->attr.config1;
+               break;
+       }
+       return 0;
+}
+
 static atomic_t active_events;
 static DEFINE_MUTEX(pmc_reserve_mutex);
 
@@ -331,12 +418,12 @@ static bool reserve_pmc_hardware(void)
        int i;
 
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
+               if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
                        goto perfctr_fail;
        }
 
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
+               if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
                        goto eventsel_fail;
        }
 
@@ -344,13 +431,13 @@ static bool reserve_pmc_hardware(void)
 
 eventsel_fail:
        for (i--; i >= 0; i--)
-               release_evntsel_nmi(x86_pmu.eventsel + i);
+               release_evntsel_nmi(x86_pmu_config_addr(i));
 
        i = x86_pmu.num_counters;
 
 perfctr_fail:
        for (i--; i >= 0; i--)
-               release_perfctr_nmi(x86_pmu.perfctr + i);
+               release_perfctr_nmi(x86_pmu_event_addr(i));
 
        return false;
 }
@@ -360,8 +447,8 @@ static void release_pmc_hardware(void)
        int i;
 
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               release_perfctr_nmi(x86_pmu.perfctr + i);
-               release_evntsel_nmi(x86_pmu.eventsel + i);
+               release_perfctr_nmi(x86_pmu_event_addr(i));
+               release_evntsel_nmi(x86_pmu_config_addr(i));
        }
 }
 
@@ -382,7 +469,7 @@ static bool check_hw_exists(void)
         * complain and bail.
         */
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               reg = x86_pmu.eventsel + i;
+               reg = x86_pmu_config_addr(i);
                ret = rdmsrl_safe(reg, &val);
                if (ret)
                        goto msr_fail;
@@ -407,8 +494,8 @@ static bool check_hw_exists(void)
         * that don't trap on the MSR access and always return 0s.
         */
        val = 0xabcdUL;
-       ret = checking_wrmsrl(x86_pmu.perfctr, val);
-       ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
+       ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
+       ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
        if (ret || val != val_new)
                goto msr_fail;
 
@@ -442,8 +529,9 @@ static inline int x86_pmu_initialized(void)
 }
 
 static inline int
-set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
+set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
 {
+       struct perf_event_attr *attr = &event->attr;
        unsigned int cache_type, cache_op, cache_result;
        u64 config, val;
 
@@ -470,8 +558,8 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
                return -EINVAL;
 
        hwc->config |= val;
-
-       return 0;
+       attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
+       return x86_pmu_extra_regs(val, event);
 }
 
 static int x86_setup_perfctr(struct perf_event *event)
@@ -496,10 +584,10 @@ static int x86_setup_perfctr(struct perf_event *event)
        }
 
        if (attr->type == PERF_TYPE_RAW)
-               return 0;
+               return x86_pmu_extra_regs(event->attr.config, event);
 
        if (attr->type == PERF_TYPE_HW_CACHE)
-               return set_ext_hw_attr(hwc, attr);
+               return set_ext_hw_attr(hwc, event);
 
        if (attr->config >= x86_pmu.max_events)
                return -EINVAL;
@@ -617,11 +705,11 @@ static void x86_pmu_disable_all(void)
 
                if (!test_bit(idx, cpuc->active_mask))
                        continue;
-               rdmsrl(x86_pmu.eventsel + idx, val);
+               rdmsrl(x86_pmu_config_addr(idx), val);
                if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
                        continue;
                val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
-               wrmsrl(x86_pmu.eventsel + idx, val);
+               wrmsrl(x86_pmu_config_addr(idx), val);
        }
 }
 
@@ -642,21 +730,26 @@ static void x86_pmu_disable(struct pmu *pmu)
        x86_pmu.disable_all();
 }
 
+static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
+                                         u64 enable_mask)
+{
+       if (hwc->extra_reg)
+               wrmsrl(hwc->extra_reg, hwc->extra_config);
+       wrmsrl(hwc->config_base, hwc->config | enable_mask);
+}
+
 static void x86_pmu_enable_all(int added)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx;
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               struct perf_event *event = cpuc->events[idx];
-               u64 val;
+               struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
 
                if (!test_bit(idx, cpuc->active_mask))
                        continue;
 
-               val = event->hw.config;
-               val |= ARCH_PERFMON_EVENTSEL_ENABLE;
-               wrmsrl(x86_pmu.eventsel + idx, val);
+               __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
        }
 }
 
@@ -821,15 +914,10 @@ static inline void x86_assign_hw_event(struct perf_event *event,
                hwc->event_base = 0;
        } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
                hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
-               /*
-                * We set it so that event_base + idx in wrmsr/rdmsr maps to
-                * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
-                */
-               hwc->event_base =
-                       MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
+               hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0;
        } else {
-               hwc->config_base = x86_pmu.eventsel;
-               hwc->event_base  = x86_pmu.perfctr;
+               hwc->config_base = x86_pmu_config_addr(hwc->idx);
+               hwc->event_base  = x86_pmu_event_addr(hwc->idx);
        }
 }
 
@@ -915,17 +1003,11 @@ static void x86_pmu_enable(struct pmu *pmu)
        x86_pmu.enable_all(added);
 }
 
-static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
-                                         u64 enable_mask)
-{
-       wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
-}
-
 static inline void x86_pmu_disable_event(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
 
-       wrmsrl(hwc->config_base + hwc->idx, hwc->config);
+       wrmsrl(hwc->config_base, hwc->config);
 }
 
 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -978,7 +1060,7 @@ x86_perf_event_set_period(struct perf_event *event)
         */
        local64_set(&hwc->prev_count, (u64)-left);
 
-       wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
+       wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
 
        /*
         * Due to erratum on certan cpu we need
@@ -986,7 +1068,7 @@ x86_perf_event_set_period(struct perf_event *event)
         * is updated properly
         */
        if (x86_pmu.perfctr_second_write) {
-               wrmsrl(hwc->event_base + idx,
+               wrmsrl(hwc->event_base,
                        (u64)(-left) & x86_pmu.cntval_mask);
        }
 
@@ -1113,8 +1195,8 @@ void perf_event_print_debug(void)
        pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
-               rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
+               rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
+               rdmsrl(x86_pmu_event_addr(idx), pmc_count);
 
                prev_left = per_cpu(pmc_prev_left[idx], cpu);
 
@@ -1389,7 +1471,7 @@ static void __init pmu_check_apic(void)
        pr_info("no hardware sampling interrupt available.\n");
 }
 
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
 {
        struct event_constraint *c;
        int err;
@@ -1608,7 +1690,7 @@ out:
        return ret;
 }
 
-int x86_pmu_event_init(struct perf_event *event)
+static int x86_pmu_event_init(struct perf_event *event)
 {
        struct pmu *tmp;
        int err;
index 67e2202a60393cd48a0f2251862c59f65bf7e667..461f62bbd774a0adec028334f8c723e5a33bea1d 100644 (file)
@@ -127,6 +127,11 @@ static int amd_pmu_hw_config(struct perf_event *event)
 /*
  * AMD64 events are detected based on their event codes.
  */
+static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
+{
+       return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
+}
+
 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
 {
        return (hwc->config & 0xe0) == 0xe0;
@@ -385,13 +390,181 @@ static __initconst const struct x86_pmu amd_pmu = {
        .cpu_dead               = amd_pmu_cpu_dead,
 };
 
+/* AMD Family 15h */
+
+#define AMD_EVENT_TYPE_MASK    0x000000F0ULL
+
+#define AMD_EVENT_FP           0x00000000ULL ... 0x00000010ULL
+#define AMD_EVENT_LS           0x00000020ULL ... 0x00000030ULL
+#define AMD_EVENT_DC           0x00000040ULL ... 0x00000050ULL
+#define AMD_EVENT_CU           0x00000060ULL ... 0x00000070ULL
+#define AMD_EVENT_IC_DE                0x00000080ULL ... 0x00000090ULL
+#define AMD_EVENT_EX_LS                0x000000C0ULL
+#define AMD_EVENT_DE           0x000000D0ULL
+#define AMD_EVENT_NB           0x000000E0ULL ... 0x000000F0ULL
+
+/*
+ * AMD family 15h event code/PMC mappings:
+ *
+ * type = event_code & 0x0F0:
+ *
+ * 0x000       FP      PERF_CTL[5:3]
+ * 0x010       FP      PERF_CTL[5:3]
+ * 0x020       LS      PERF_CTL[5:0]
+ * 0x030       LS      PERF_CTL[5:0]
+ * 0x040       DC      PERF_CTL[5:0]
+ * 0x050       DC      PERF_CTL[5:0]
+ * 0x060       CU      PERF_CTL[2:0]
+ * 0x070       CU      PERF_CTL[2:0]
+ * 0x080       IC/DE   PERF_CTL[2:0]
+ * 0x090       IC/DE   PERF_CTL[2:0]
+ * 0x0A0       ---
+ * 0x0B0       ---
+ * 0x0C0       EX/LS   PERF_CTL[5:0]
+ * 0x0D0       DE      PERF_CTL[2:0]
+ * 0x0E0       NB      NB_PERF_CTL[3:0]
+ * 0x0F0       NB      NB_PERF_CTL[3:0]
+ *
+ * Exceptions:
+ *
+ * 0x003       FP      PERF_CTL[3]
+ * 0x00B       FP      PERF_CTL[3]
+ * 0x00D       FP      PERF_CTL[3]
+ * 0x023       DE      PERF_CTL[2:0]
+ * 0x02D       LS      PERF_CTL[3]
+ * 0x02E       LS      PERF_CTL[3,0]
+ * 0x043       CU      PERF_CTL[2:0]
+ * 0x045       CU      PERF_CTL[2:0]
+ * 0x046       CU      PERF_CTL[2:0]
+ * 0x054       CU      PERF_CTL[2:0]
+ * 0x055       CU      PERF_CTL[2:0]
+ * 0x08F       IC      PERF_CTL[0]
+ * 0x187       DE      PERF_CTL[0]
+ * 0x188       DE      PERF_CTL[0]
+ * 0x0DB       EX      PERF_CTL[5:0]
+ * 0x0DC       LS      PERF_CTL[5:0]
+ * 0x0DD       LS      PERF_CTL[5:0]
+ * 0x0DE       LS      PERF_CTL[5:0]
+ * 0x0DF       LS      PERF_CTL[5:0]
+ * 0x1D6       EX      PERF_CTL[5:0]
+ * 0x1D8       EX      PERF_CTL[5:0]
+ */
+
+static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
+static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
+static struct event_constraint amd_f15_PMC3  = EVENT_CONSTRAINT(0, 0x08, 0);
+static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
+static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
+static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
+
+static struct event_constraint *
+amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+       unsigned int event_code = amd_get_event_code(&event->hw);
+
+       switch (event_code & AMD_EVENT_TYPE_MASK) {
+       case AMD_EVENT_FP:
+               switch (event_code) {
+               case 0x003:
+               case 0x00B:
+               case 0x00D:
+                       return &amd_f15_PMC3;
+               default:
+                       return &amd_f15_PMC53;
+               }
+       case AMD_EVENT_LS:
+       case AMD_EVENT_DC:
+       case AMD_EVENT_EX_LS:
+               switch (event_code) {
+               case 0x023:
+               case 0x043:
+               case 0x045:
+               case 0x046:
+               case 0x054:
+               case 0x055:
+                       return &amd_f15_PMC20;
+               case 0x02D:
+                       return &amd_f15_PMC3;
+               case 0x02E:
+                       return &amd_f15_PMC30;
+               default:
+                       return &amd_f15_PMC50;
+               }
+       case AMD_EVENT_CU:
+       case AMD_EVENT_IC_DE:
+       case AMD_EVENT_DE:
+               switch (event_code) {
+               case 0x08F:
+               case 0x187:
+               case 0x188:
+                       return &amd_f15_PMC0;
+               case 0x0DB ... 0x0DF:
+               case 0x1D6:
+               case 0x1D8:
+                       return &amd_f15_PMC50;
+               default:
+                       return &amd_f15_PMC20;
+               }
+       case AMD_EVENT_NB:
+               /* not yet implemented */
+               return &emptyconstraint;
+       default:
+               return &emptyconstraint;
+       }
+}
+
+static __initconst const struct x86_pmu amd_pmu_f15h = {
+       .name                   = "AMD Family 15h",
+       .handle_irq             = x86_pmu_handle_irq,
+       .disable_all            = x86_pmu_disable_all,
+       .enable_all             = x86_pmu_enable_all,
+       .enable                 = x86_pmu_enable_event,
+       .disable                = x86_pmu_disable_event,
+       .hw_config              = amd_pmu_hw_config,
+       .schedule_events        = x86_schedule_events,
+       .eventsel               = MSR_F15H_PERF_CTL,
+       .perfctr                = MSR_F15H_PERF_CTR,
+       .event_map              = amd_pmu_event_map,
+       .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
+       .num_counters           = 6,
+       .cntval_bits            = 48,
+       .cntval_mask            = (1ULL << 48) - 1,
+       .apic                   = 1,
+       /* use highest bit to detect overflow */
+       .max_period             = (1ULL << 47) - 1,
+       .get_event_constraints  = amd_get_event_constraints_f15h,
+       /* nortbridge counters not yet implemented: */
+#if 0
+       .put_event_constraints  = amd_put_event_constraints,
+
+       .cpu_prepare            = amd_pmu_cpu_prepare,
+       .cpu_starting           = amd_pmu_cpu_starting,
+       .cpu_dead               = amd_pmu_cpu_dead,
+#endif
+};
+
 static __init int amd_pmu_init(void)
 {
        /* Performance-monitoring supported from K7 and later: */
        if (boot_cpu_data.x86 < 6)
                return -ENODEV;
 
-       x86_pmu = amd_pmu;
+       /*
+        * If core performance counter extensions exists, it must be
+        * family 15h, otherwise fail. See x86_pmu_addr_offset().
+        */
+       switch (boot_cpu_data.x86) {
+       case 0x15:
+               if (!cpu_has_perfctr_core)
+                       return -ENODEV;
+               x86_pmu = amd_pmu_f15h;
+               break;
+       default:
+               if (cpu_has_perfctr_core)
+                       return -ENODEV;
+               x86_pmu = amd_pmu;
+               break;
+       }
 
        /* Events are common for all AMDs */
        memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
index 008835c1d79ca2c5fb83b2c2ca2ddfeab657cca1..8fc2b2cee1da32713746cc7e871aab075deaee41 100644 (file)
@@ -1,5 +1,27 @@
 #ifdef CONFIG_CPU_SUP_INTEL
 
+#define MAX_EXTRA_REGS 2
+
+/*
+ * Per register state.
+ */
+struct er_account {
+       int                     ref;            /* reference count */
+       unsigned int            extra_reg;      /* extra MSR number */
+       u64                     extra_config;   /* extra MSR config */
+};
+
+/*
+ * Per core state
+ * This used to coordinate shared registers for HT threads.
+ */
+struct intel_percore {
+       raw_spinlock_t          lock;           /* protect structure */
+       struct er_account       regs[MAX_EXTRA_REGS];
+       int                     refcnt;         /* number of threads */
+       unsigned                core_id;
+};
+
 /*
  * Intel PerfMon, used on Core and later.
  */
@@ -64,6 +86,18 @@ static struct event_constraint intel_nehalem_event_constraints[] =
        EVENT_CONSTRAINT_END
 };
 
+static struct extra_reg intel_nehalem_extra_regs[] =
+{
+       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
+       EVENT_EXTRA_END
+};
+
+static struct event_constraint intel_nehalem_percore_constraints[] =
+{
+       INTEL_EVENT_CONSTRAINT(0xb7, 0),
+       EVENT_CONSTRAINT_END
+};
+
 static struct event_constraint intel_westmere_event_constraints[] =
 {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
@@ -76,6 +110,33 @@ static struct event_constraint intel_westmere_event_constraints[] =
        EVENT_CONSTRAINT_END
 };
 
+static struct event_constraint intel_snb_event_constraints[] =
+{
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+       INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
+       INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
+       INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
+       INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
+       INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
+       EVENT_CONSTRAINT_END
+};
+
+static struct extra_reg intel_westmere_extra_regs[] =
+{
+       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
+       INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff),
+       EVENT_EXTRA_END
+};
+
+static struct event_constraint intel_westmere_percore_constraints[] =
+{
+       INTEL_EVENT_CONSTRAINT(0xb7, 0),
+       INTEL_EVENT_CONSTRAINT(0xbb, 0),
+       EVENT_CONSTRAINT_END
+};
+
 static struct event_constraint intel_gen_event_constraints[] =
 {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
@@ -89,6 +150,106 @@ static u64 intel_pmu_event_map(int hw_event)
        return intel_perfmon_event_map[hw_event];
 }
 
+static __initconst const u64 snb_hw_cache_event_ids
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
+               [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
+               [ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0,
+               [ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
+       },
+ },
+ [ C(L1I ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0,
+               [ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0,
+               [ C(RESULT_MISS)   ] = 0x0,
+       },
+ },
+ [ C(LL  ) ] = {
+       /*
+        * TBD: Need Off-core Response Performance Monitoring support
+        */
+       [ C(OP_READ) ] = {
+               /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
+               [ C(RESULT_ACCESS) ] = 0x01b7,
+               /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01bb,
+       },
+       [ C(OP_WRITE) ] = {
+               /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */
+               [ C(RESULT_ACCESS) ] = 0x01b7,
+               /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01bb,
+       },
+       [ C(OP_PREFETCH) ] = {
+               /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
+               [ C(RESULT_ACCESS) ] = 0x01b7,
+               /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01bb,
+       },
+ },
+ [ C(DTLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
+               [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
+               [ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0,
+               [ C(RESULT_MISS)   ] = 0x0,
+       },
+ },
+ [ C(ITLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
+               [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+ [ C(BPU ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
+               [ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+};
+
 static __initconst const u64 westmere_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -124,16 +285,26 @@ static __initconst const u64 westmere_hw_cache_event_ids
  },
  [ C(LL  ) ] = {
        [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
-               [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
+               /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
+               [ C(RESULT_ACCESS) ] = 0x01b7,
+               /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01bb,
        },
+       /*
+        * Use RFO, not WRITEBACK, because a write miss would typically occur
+        * on RFO.
+        */
        [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
-               [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
+               /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */
+               [ C(RESULT_ACCESS) ] = 0x01bb,
+               /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01b7,
        },
        [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
-               [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
+               /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
+               [ C(RESULT_ACCESS) ] = 0x01b7,
+               /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01bb,
        },
  },
  [ C(DTLB) ] = {
@@ -180,6 +351,39 @@ static __initconst const u64 westmere_hw_cache_event_ids
  },
 };
 
+/*
+ * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3
+ */
+
+#define DMND_DATA_RD     (1 << 0)
+#define DMND_RFO         (1 << 1)
+#define DMND_WB          (1 << 3)
+#define PF_DATA_RD       (1 << 4)
+#define PF_DATA_RFO      (1 << 5)
+#define RESP_UNCORE_HIT  (1 << 8)
+#define RESP_MISS        (0xf600) /* non uncore hit */
+
+static __initconst const u64 nehalem_hw_cache_extra_regs
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(LL  ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT,
+               [ C(RESULT_MISS)   ] = DMND_DATA_RD|RESP_MISS,
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT,
+               [ C(RESULT_MISS)   ] = DMND_RFO|DMND_WB|RESP_MISS,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT,
+               [ C(RESULT_MISS)   ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS,
+       },
+ }
+};
+
 static __initconst const u64 nehalem_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -215,16 +419,26 @@ static __initconst const u64 nehalem_hw_cache_event_ids
  },
  [ C(LL  ) ] = {
        [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
-               [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
+               /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
+               [ C(RESULT_ACCESS) ] = 0x01b7,
+               /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01b7,
        },
+       /*
+        * Use RFO, not WRITEBACK, because a write miss would typically occur
+        * on RFO.
+        */
        [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
-               [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
+               /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
+               [ C(RESULT_ACCESS) ] = 0x01b7,
+               /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01b7,
        },
        [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
-               [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
+               /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
+               [ C(RESULT_ACCESS) ] = 0x01b7,
+               /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01b7,
        },
  },
  [ C(DTLB) ] = {
@@ -691,8 +905,8 @@ static void intel_pmu_reset(void)
        printk("clearing PMU state on CPU#%d\n", smp_processor_id());
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
-               checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
+               checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
+               checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
        }
        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
                checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
@@ -793,6 +1007,67 @@ intel_bts_constraints(struct perf_event *event)
        return NULL;
 }
 
+static struct event_constraint *
+intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
+       struct event_constraint *c;
+       struct intel_percore *pc;
+       struct er_account *era;
+       int i;
+       int free_slot;
+       int found;
+
+       if (!x86_pmu.percore_constraints || hwc->extra_alloc)
+               return NULL;
+
+       for (c = x86_pmu.percore_constraints; c->cmask; c++) {
+               if (e != c->code)
+                       continue;
+
+               /*
+                * Allocate resource per core.
+                */
+               pc = cpuc->per_core;
+               if (!pc)
+                       break;
+               c = &emptyconstraint;
+               raw_spin_lock(&pc->lock);
+               free_slot = -1;
+               found = 0;
+               for (i = 0; i < MAX_EXTRA_REGS; i++) {
+                       era = &pc->regs[i];
+                       if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
+                               /* Allow sharing same config */
+                               if (hwc->extra_config == era->extra_config) {
+                                       era->ref++;
+                                       cpuc->percore_used = 1;
+                                       hwc->extra_alloc = 1;
+                                       c = NULL;
+                               }
+                               /* else conflict */
+                               found = 1;
+                               break;
+                       } else if (era->ref == 0 && free_slot == -1)
+                               free_slot = i;
+               }
+               if (!found && free_slot != -1) {
+                       era = &pc->regs[free_slot];
+                       era->ref = 1;
+                       era->extra_reg = hwc->extra_reg;
+                       era->extra_config = hwc->extra_config;
+                       cpuc->percore_used = 1;
+                       hwc->extra_alloc = 1;
+                       c = NULL;
+               }
+               raw_spin_unlock(&pc->lock);
+               return c;
+       }
+
+       return NULL;
+}
+
 static struct event_constraint *
 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
 {
@@ -806,9 +1081,51 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
        if (c)
                return c;
 
+       c = intel_percore_constraints(cpuc, event);
+       if (c)
+               return c;
+
        return x86_get_event_constraints(cpuc, event);
 }
 
+static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
+                                       struct perf_event *event)
+{
+       struct extra_reg *er;
+       struct intel_percore *pc;
+       struct er_account *era;
+       struct hw_perf_event *hwc = &event->hw;
+       int i, allref;
+
+       if (!cpuc->percore_used)
+               return;
+
+       for (er = x86_pmu.extra_regs; er->msr; er++) {
+               if (er->event != (hwc->config & er->config_mask))
+                       continue;
+
+               pc = cpuc->per_core;
+               raw_spin_lock(&pc->lock);
+               for (i = 0; i < MAX_EXTRA_REGS; i++) {
+                       era = &pc->regs[i];
+                       if (era->ref > 0 &&
+                           era->extra_config == hwc->extra_config &&
+                           era->extra_reg == er->msr) {
+                               era->ref--;
+                               hwc->extra_alloc = 0;
+                               break;
+                       }
+               }
+               allref = 0;
+               for (i = 0; i < MAX_EXTRA_REGS; i++)
+                       allref += pc->regs[i].ref;
+               if (allref == 0)
+                       cpuc->percore_used = 0;
+               raw_spin_unlock(&pc->lock);
+               break;
+       }
+}
+
 static int intel_pmu_hw_config(struct perf_event *event)
 {
        int ret = x86_pmu_hw_config(event);
@@ -880,20 +1197,67 @@ static __initconst const struct x86_pmu core_pmu = {
         */
        .max_period             = (1ULL << 31) - 1,
        .get_event_constraints  = intel_get_event_constraints,
+       .put_event_constraints  = intel_put_event_constraints,
        .event_constraints      = intel_core_event_constraints,
 };
 
+static int intel_pmu_cpu_prepare(int cpu)
+{
+       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
+       if (!cpu_has_ht_siblings())
+               return NOTIFY_OK;
+
+       cpuc->per_core = kzalloc_node(sizeof(struct intel_percore),
+                                     GFP_KERNEL, cpu_to_node(cpu));
+       if (!cpuc->per_core)
+               return NOTIFY_BAD;
+
+       raw_spin_lock_init(&cpuc->per_core->lock);
+       cpuc->per_core->core_id = -1;
+       return NOTIFY_OK;
+}
+
 static void intel_pmu_cpu_starting(int cpu)
 {
+       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+       int core_id = topology_core_id(cpu);
+       int i;
+
        init_debug_store_on_cpu(cpu);
        /*
         * Deal with CPUs that don't clear their LBRs on power-up.
         */
        intel_pmu_lbr_reset();
+
+       if (!cpu_has_ht_siblings())
+               return;
+
+       for_each_cpu(i, topology_thread_cpumask(cpu)) {
+               struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core;
+
+               if (pc && pc->core_id == core_id) {
+                       kfree(cpuc->per_core);
+                       cpuc->per_core = pc;
+                       break;
+               }
+       }
+
+       cpuc->per_core->core_id = core_id;
+       cpuc->per_core->refcnt++;
 }
 
 static void intel_pmu_cpu_dying(int cpu)
 {
+       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+       struct intel_percore *pc = cpuc->per_core;
+
+       if (pc) {
+               if (pc->core_id == -1 || --pc->refcnt == 0)
+                       kfree(pc);
+               cpuc->per_core = NULL;
+       }
+
        fini_debug_store_on_cpu(cpu);
 }
 
@@ -918,7 +1282,9 @@ static __initconst const struct x86_pmu intel_pmu = {
         */
        .max_period             = (1ULL << 31) - 1,
        .get_event_constraints  = intel_get_event_constraints,
+       .put_event_constraints  = intel_put_event_constraints,
 
+       .cpu_prepare            = intel_pmu_cpu_prepare,
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
 };
@@ -1024,6 +1390,7 @@ static __init int intel_pmu_init(void)
                intel_pmu_lbr_init_core();
 
                x86_pmu.event_constraints = intel_core2_event_constraints;
+               x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
                pr_cont("Core2 events, ");
                break;
 
@@ -1032,11 +1399,16 @@ static __init int intel_pmu_init(void)
        case 46: /* 45 nm nehalem-ex, "Beckton" */
                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
+               memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
+                      sizeof(hw_cache_extra_regs));
 
                intel_pmu_lbr_init_nhm();
 
                x86_pmu.event_constraints = intel_nehalem_event_constraints;
+               x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
+               x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
+               x86_pmu.extra_regs = intel_nehalem_extra_regs;
                pr_cont("Nehalem events, ");
                break;
 
@@ -1047,6 +1419,7 @@ static __init int intel_pmu_init(void)
                intel_pmu_lbr_init_atom();
 
                x86_pmu.event_constraints = intel_gen_event_constraints;
+               x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
                pr_cont("Atom events, ");
                break;
 
@@ -1054,14 +1427,30 @@ static __init int intel_pmu_init(void)
        case 44: /* 32 nm nehalem, "Gulftown" */
                memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
+               memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
+                      sizeof(hw_cache_extra_regs));
 
                intel_pmu_lbr_init_nhm();
 
                x86_pmu.event_constraints = intel_westmere_event_constraints;
+               x86_pmu.percore_constraints = intel_westmere_percore_constraints;
                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
+               x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
+               x86_pmu.extra_regs = intel_westmere_extra_regs;
                pr_cont("Westmere events, ");
                break;
 
+       case 42: /* SandyBridge */
+               memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
+                      sizeof(hw_cache_event_ids));
+
+               intel_pmu_lbr_init_nhm();
+
+               x86_pmu.event_constraints = intel_snb_event_constraints;
+               x86_pmu.pebs_constraints = intel_snb_pebs_events;
+               pr_cont("SandyBridge events, ");
+               break;
+
        default:
                /*
                 * default constraints for v2 and up
index b7dcd9f2b8a04b8204762e50700264570e8ce498..b95c66ae4a2ae5486e9f5866063678e8d96c944a 100644 (file)
@@ -361,30 +361,88 @@ static int intel_pmu_drain_bts_buffer(void)
 /*
  * PEBS
  */
-
-static struct event_constraint intel_core_pebs_events[] = {
-       PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
+static struct event_constraint intel_core2_pebs_event_constraints[] = {
+       PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
        PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
        PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
        PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
-       PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
-       PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
-       PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
-       PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
-       PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
+       INTEL_EVENT_CONSTRAINT(0xcb, 0x1),  /* MEM_LOAD_RETIRED.* */
+       EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint intel_atom_pebs_event_constraints[] = {
+       PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
+       PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
+       INTEL_EVENT_CONSTRAINT(0xcb, 0x1),  /* MEM_LOAD_RETIRED.* */
        EVENT_CONSTRAINT_END
 };
 
-static struct event_constraint intel_nehalem_pebs_events[] = {
-       PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
-       PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
-       PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
-       PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
-       PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
-       PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
-       PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
-       PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
-       PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
+static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
+       INTEL_EVENT_CONSTRAINT(0x0b, 0xf),  /* MEM_INST_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0x0f, 0xf),  /* MEM_UNCORE_RETIRED.* */
+       PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
+       INTEL_EVENT_CONSTRAINT(0xc0, 0xf),  /* INST_RETIRED.ANY */
+       INTEL_EVENT_CONSTRAINT(0xc2, 0xf),  /* UOPS_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xc4, 0xf),  /* BR_INST_RETIRED.* */
+       PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
+       INTEL_EVENT_CONSTRAINT(0xc7, 0xf),  /* SSEX_UOPS_RETIRED.* */
+       PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
+       INTEL_EVENT_CONSTRAINT(0xcb, 0xf),  /* MEM_LOAD_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xf7, 0xf),  /* FP_ASSIST.* */
+       EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint intel_westmere_pebs_event_constraints[] = {
+       INTEL_EVENT_CONSTRAINT(0x0b, 0xf),  /* MEM_INST_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0x0f, 0xf),  /* MEM_UNCORE_RETIRED.* */
+       PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
+       INTEL_EVENT_CONSTRAINT(0xc0, 0xf),  /* INSTR_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xc2, 0xf),  /* UOPS_RETIRED.* */
+
+       INTEL_EVENT_CONSTRAINT(0xc4, 0xf),  /* BR_INST_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xc5, 0xf),  /* BR_MISP_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xc7, 0xf),  /* SSEX_UOPS_RETIRED.* */
+       PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
+       INTEL_EVENT_CONSTRAINT(0xcb, 0xf),  /* MEM_LOAD_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xf7, 0xf),  /* FP_ASSIST.* */
+       EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint intel_snb_pebs_events[] = {
+       PEBS_EVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
+       PEBS_EVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+       PEBS_EVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
+       PEBS_EVENT_CONSTRAINT(0x01c4, 0xf), /* BR_INST_RETIRED.CONDITIONAL */
+       PEBS_EVENT_CONSTRAINT(0x02c4, 0xf), /* BR_INST_RETIRED.NEAR_CALL */
+       PEBS_EVENT_CONSTRAINT(0x04c4, 0xf), /* BR_INST_RETIRED.ALL_BRANCHES */
+       PEBS_EVENT_CONSTRAINT(0x08c4, 0xf), /* BR_INST_RETIRED.NEAR_RETURN */
+       PEBS_EVENT_CONSTRAINT(0x10c4, 0xf), /* BR_INST_RETIRED.NOT_TAKEN */
+       PEBS_EVENT_CONSTRAINT(0x20c4, 0xf), /* BR_INST_RETIRED.NEAR_TAKEN */
+       PEBS_EVENT_CONSTRAINT(0x40c4, 0xf), /* BR_INST_RETIRED.FAR_BRANCH */
+       PEBS_EVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */
+       PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
+       PEBS_EVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */
+       PEBS_EVENT_CONSTRAINT(0x10c5, 0xf), /* BR_MISP_RETIRED.NOT_TAKEN */
+       PEBS_EVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.TAKEN */
+       PEBS_EVENT_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
+       PEBS_EVENT_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORE */
+       PEBS_EVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
+       PEBS_EVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
+       PEBS_EVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
+       PEBS_EVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
+       PEBS_EVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
+       PEBS_EVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
+       PEBS_EVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
+       PEBS_EVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
+       PEBS_EVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */
+       PEBS_EVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */
+       PEBS_EVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.LLC_HIT */
+       PEBS_EVENT_CONSTRAINT(0x40d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */
+       PEBS_EVENT_CONSTRAINT(0x01d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */
+       PEBS_EVENT_CONSTRAINT(0x02d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */
+       PEBS_EVENT_CONSTRAINT(0x04d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM */
+       PEBS_EVENT_CONSTRAINT(0x08d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE */
+       PEBS_EVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
        EVENT_CONSTRAINT_END
 };
 
@@ -695,20 +753,17 @@ static void intel_ds_init(void)
                        printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
                        x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
                        x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
-                       x86_pmu.pebs_constraints = intel_core_pebs_events;
                        break;
 
                case 1:
                        printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
                        x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
                        x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
-                       x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
                        break;
 
                default:
                        printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
                        x86_pmu.pebs = 0;
-                       break;
                }
        }
 }
index f7a0993c1e7c7a7a321b9057f9aaad75a75cc611..3769ac822f96b09a64548f0a43fa6a6cf7dac82a 100644 (file)
@@ -764,15 +764,20 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
        u64 v;
 
        /* an official way for overflow indication */
-       rdmsrl(hwc->config_base + hwc->idx, v);
+       rdmsrl(hwc->config_base, v);
        if (v & P4_CCCR_OVF) {
-               wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF);
+               wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
                return 1;
        }
 
-       /* it might be unflagged overflow */
-       rdmsrl(hwc->event_base + hwc->idx, v);
-       if (!(v & ARCH_P4_CNTRVAL_MASK))
+       /*
+        * In some circumstances the overflow might issue an NMI but did
+        * not set P4_CCCR_OVF bit. Because a counter holds a negative value
+        * we simply check for high bit being set, if it's cleared it means
+        * the counter has reached zero value and continued counting before
+        * real NMI signal was received:
+        */
+       if (!(v & ARCH_P4_UNFLAGGED_BIT))
                return 1;
 
        return 0;
@@ -810,7 +815,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
         * state we need to clear P4_CCCR_OVF, otherwise interrupt get
         * asserted again and again
         */
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+       (void)checking_wrmsrl(hwc->config_base,
                (u64)(p4_config_unpack_cccr(hwc->config)) &
                        ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
 }
@@ -880,7 +885,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
        p4_pmu_enable_pebs(hwc->config);
 
        (void)checking_wrmsrl(escr_addr, escr_conf);
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+       (void)checking_wrmsrl(hwc->config_base,
                                (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
 }
 
index 34ba07be2cdab5ffa45b3dfe783fc24a939bceb1..20c097e33860b7d5a599fbf132622b9c383ad8a5 100644 (file)
@@ -68,7 +68,7 @@ p6_pmu_disable_event(struct perf_event *event)
        if (cpuc->enabled)
                val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
+       (void)checking_wrmsrl(hwc->config_base, val);
 }
 
 static void p6_pmu_enable_event(struct perf_event *event)
@@ -81,7 +81,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
        if (cpuc->enabled)
                val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
+       (void)checking_wrmsrl(hwc->config_base, val);
 }
 
 static __initconst const struct x86_pmu p6_pmu = {
index d5a236615501fd6a41fb6f6bc76bfd2369a47bc5..966512b2cacf37824bc2b2bccda04e6f26f12ddf 100644 (file)
@@ -46,6 +46,8 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
        /* returns the bit offset of the performance counter register */
        switch (boot_cpu_data.x86_vendor) {
        case X86_VENDOR_AMD:
+               if (msr >= MSR_F15H_PERF_CTR)
+                       return (msr - MSR_F15H_PERF_CTR) >> 1;
                return msr - MSR_K7_PERFCTR0;
        case X86_VENDOR_INTEL:
                if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
@@ -70,6 +72,8 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
        /* returns the bit offset of the event selection register */
        switch (boot_cpu_data.x86_vendor) {
        case X86_VENDOR_AMD:
+               if (msr >= MSR_F15H_PERF_CTL)
+                       return (msr - MSR_F15H_PERF_CTL) >> 1;
                return msr - MSR_K7_EVNTSEL0;
        case X86_VENDOR_INTEL:
                if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
index df20723a6a1b3b0ced782412eef7c4b72f3c1e36..220a1c11cfde4280e22ccf6b691e1c2b4c8ea64c 100644 (file)
@@ -320,31 +320,6 @@ void die(const char *str, struct pt_regs *regs, long err)
        oops_end(flags, regs, sig);
 }
 
-void notrace __kprobes
-die_nmi(char *str, struct pt_regs *regs, int do_panic)
-{
-       unsigned long flags;
-
-       if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
-               return;
-
-       /*
-        * We are in trouble anyway, lets at least try
-        * to get a message out.
-        */
-       flags = oops_begin();
-       printk(KERN_EMERG "%s", str);
-       printk(" on CPU%d, ip %08lx, registers:\n",
-               smp_processor_id(), regs->ip);
-       show_registers(regs);
-       oops_end(flags, regs, 0);
-       if (do_panic || panic_on_oops)
-               panic("Non maskable interrupt");
-       nmi_exit();
-       local_irq_enable();
-       do_exit(SIGBUS);
-}
-
 static int __init oops_setup(char *s)
 {
        if (!s)
index 5fad62684651b6c3706d6ff42fab054190be0f6e..cdf5bfd9d4d50d8eb2503fd15c38b7d013e90f4a 100644 (file)
@@ -841,15 +841,21 @@ static int __init parse_memopt(char *p)
        if (!p)
                return -EINVAL;
 
-#ifdef CONFIG_X86_32
        if (!strcmp(p, "nopentium")) {
+#ifdef CONFIG_X86_32
                setup_clear_cpu_cap(X86_FEATURE_PSE);
                return 0;
-       }
+#else
+               printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n");
+               return -EINVAL;
 #endif
+       }
 
        userdef = 1;
        mem_size = memparse(p, &p);
+       /* don't remove all of memory when handling "mem={invalid}" param */
+       if (mem_size == 0)
+               return -EINVAL;
        e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
 
        return 0;
index 76b8cd953deed9f8a50d572cdc52b5edb68bc3b7..9efbdcc56425b18ce38224cf7b866c1bf66c5d5a 100644 (file)
@@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func)
 
 static u32 __init ati_sbx00_rev(int num, int slot, int func)
 {
-       u32 old, d;
+       u32 d;
 
-       d = read_pci_config(num, slot, func, 0x70);
-       old = d;
-       d &= ~(1<<8);
-       write_pci_config(num, slot, func, 0x70, d);
        d = read_pci_config(num, slot, func, 0x8);
        d &= 0xff;
-       write_pci_config(num, slot, func, 0x70, old);
 
        return d;
 }
@@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func)
 {
        u32 d, rev;
 
-       if (acpi_use_timer_override)
-               return;
-
        rev = ati_sbx00_rev(num, slot, func);
+       if (rev >= 0x40)
+               acpi_fix_pin2_polarity = 1;
+
        if (rev > 0x13)
                return;
 
+       if (acpi_use_timer_override)
+               return;
+
        /* check for IRQ0 interrupt swap */
        d = read_pci_config(num, slot, func, 0x64);
        if (!(d & (1<<14)))
index c8b4efad7ebb080faeb56a9f6b539ef8cb6d7493..fa41f7298c84569e053b52fc101e5cbf939b68b7 100644 (file)
@@ -65,6 +65,8 @@
 #define sysexit_audit  syscall_exit_work
 #endif
 
+       .section .entry.text, "ax"
+
 /*
  * We use macros for low-level operations which need to be overridden
  * for paravirtualization.  The following will never clobber any registers:
@@ -395,7 +397,7 @@ sysenter_past_esp:
         * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
         * pushed above; +8 corresponds to copy_thread's esp0 setting.
         */
-       pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp)
+       pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
        CFI_REL_OFFSET eip, 0
 
        pushl_cfi %eax
@@ -788,7 +790,7 @@ ENDPROC(ptregs_clone)
  */
 .section .init.rodata,"a"
 ENTRY(interrupt)
-.text
+.section .entry.text, "ax"
        .p2align 5
        .p2align CONFIG_X86_L1_CACHE_SHIFT
 ENTRY(irq_entries_start)
@@ -807,7 +809,7 @@ vector=FIRST_EXTERNAL_VECTOR
       .endif
       .previous
        .long 1b
-      .text
+      .section .entry.text, "ax"
 vector=vector+1
     .endif
   .endr
@@ -1409,8 +1411,7 @@ END(general_protection)
 #ifdef CONFIG_KVM_GUEST
 ENTRY(async_page_fault)
        RING0_EC_FRAME
-       pushl $do_async_page_fault
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $do_async_page_fault
        jmp error_code
        CFI_ENDPROC
 END(apf_page_fault)
index aed1ffbeb0c9beaacbf0fe6bed4dc2714d12d22d..b72b4a6466a9cae5197e9d7c324f8bae88977f47 100644 (file)
@@ -61,6 +61,8 @@
 #define __AUDIT_ARCH_LE           0x40000000
 
        .code64
+       .section .entry.text, "ax"
+
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(mcount)
@@ -744,7 +746,7 @@ END(stub_rt_sigreturn)
  */
        .section .init.rodata,"a"
 ENTRY(interrupt)
-       .text
+       .section .entry.text
        .p2align 5
        .p2align CONFIG_X86_L1_CACHE_SHIFT
 ENTRY(irq_entries_start)
@@ -763,7 +765,7 @@ vector=FIRST_EXTERNAL_VECTOR
       .endif
       .previous
        .quad 1b
-      .text
+      .section .entry.text
 vector=vector+1
     .endif
   .endr
@@ -975,9 +977,12 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \
        x86_platform_ipi smp_x86_platform_ipi
 
 #ifdef CONFIG_SMP
-.irpc idx, "01234567"
+.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
+       16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+.if NUM_INVALIDATE_TLB_VECTORS > \idx
 apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \
        invalidate_interrupt\idx smp_invalidate_interrupt
+.endif
 .endr
 #endif
 
@@ -1248,7 +1253,7 @@ ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
        decl PER_CPU_VAR(irq_count)
        jmp  error_exit
        CFI_ENDPROC
-END(do_hypervisor_callback)
+END(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
index 382eb2936d4d57f40db5cb360efb8c7421cdc0cf..a93742a57468770bd7f8be02dae04fc4765fe306 100644 (file)
@@ -437,18 +437,19 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
                return;
        }
 
-       if (ftrace_push_return_trace(old, self_addr, &trace.depth,
-                   frame_pointer) == -EBUSY) {
-               *parent = old;
-               return;
-       }
-
        trace.func = self_addr;
+       trace.depth = current->curr_ret_stack + 1;
 
        /* Only trace if the calling function expects to */
        if (!ftrace_graph_entry(&trace)) {
-               current->curr_ret_stack--;
                *parent = old;
+               return;
+       }
+
+       if (ftrace_push_return_trace(old, self_addr, &trace.depth,
+                   frame_pointer) == -EBUSY) {
+               *parent = old;
+               return;
        }
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index d8cc18a83260e233b4d0d529b4dfdecf738e992c..ce0be7cd085e025759da9b1fd6445d34e7772de0 100644 (file)
@@ -73,7 +73,7 @@ MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
  */
 KERNEL_PAGES = LOWMEM_PAGES
 
-INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
+INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
 
 /*
@@ -623,7 +623,7 @@ ENTRY(initial_code)
  * BSS section
  */
 __PAGE_ALIGNED_BSS
-       .align PAGE_SIZE_asm
+       .align PAGE_SIZE
 #ifdef CONFIG_X86_PAE
 initial_pg_pmd:
        .fill 1024*KPMDS,4,0
@@ -644,7 +644,7 @@ ENTRY(swapper_pg_dir)
 #ifdef CONFIG_X86_PAE
 __PAGE_ALIGNED_DATA
        /* Page-aligned for the benefit of paravirt? */
-       .align PAGE_SIZE_asm
+       .align PAGE_SIZE
 ENTRY(initial_page_table)
        .long   pa(initial_pg_pmd+PGD_IDENT_ATTR),0     /* low identity map */
 # if KPMDS == 3
@@ -662,7 +662,7 @@ ENTRY(initial_page_table)
 # else
 #  error "Kernel PMDs should be 1, 2 or 3"
 # endif
-       .align PAGE_SIZE_asm            /* needs to be page-sized too */
+       .align PAGE_SIZE                /* needs to be page-sized too */
 #endif
 
 .data
index 4ff5968f12d295ac00a55ecbbae06dd97675a6c9..bfe8f729e0861a9edf636e2acfddcaec46261ca5 100644 (file)
@@ -503,7 +503,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
        if (!irq)
                return -EINVAL;
 
-       set_irq_data(irq, dev);
+       irq_set_handler_data(irq, dev);
 
        if (hpet_setup_msi_irq(irq))
                return -EINVAL;
index 20757cb2efa3121841f89bcbcd2f5e0ae0a6006c..d9ca749c123be72be8a67b3b23014f81fb701472 100644 (file)
@@ -112,7 +112,7 @@ static void make_8259A_irq(unsigned int irq)
 {
        disable_irq_nosync(irq);
        io_apic_irqs &= ~(1<<irq);
-       set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
+       irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
                                      i8259A_chip.name);
        enable_irq(irq);
 }
index 8eec0ec59af2a6545c074b330ca9d9565a7a3a70..8c968974253de19660b91f6da2b80f64e909c537 100644 (file)
 #include <linux/slab.h>
 #include <linux/thread_info.h>
 #include <linux/syscalls.h>
+#include <linux/bitmap.h>
 #include <asm/syscalls.h>
 
-/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
-static void set_bitmap(unsigned long *bitmap, unsigned int base,
-                      unsigned int extent, int new_value)
-{
-       unsigned int i;
-
-       for (i = base; i < base + extent; i++) {
-               if (new_value)
-                       __set_bit(i, bitmap);
-               else
-                       __clear_bit(i, bitmap);
-       }
-}
-
 /*
  * this changes the io permissions bitmap in the current task.
  */
@@ -69,7 +56,10 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
         */
        tss = &per_cpu(init_tss, get_cpu());
 
-       set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
+       if (turn_on)
+               bitmap_clear(t->io_bitmap_ptr, from, num);
+       else
+               bitmap_set(t->io_bitmap_ptr, from, num);
 
        /*
         * Search for a (possibly new) maximum. This is simple and stupid,
index 753136003af18e8369dc153802d0fc1b9ba40d2d..948a31eae75fdf837d9eee344108ce7b7fb4200d 100644 (file)
@@ -44,9 +44,9 @@ void ack_bad_irq(unsigned int irq)
 
 #define irq_stats(x)           (&per_cpu(irq_stat, x))
 /*
- * /proc/interrupts printing:
+ * /proc/interrupts printing for arch specific interrupts
  */
-static int show_other_interrupts(struct seq_file *p, int prec)
+int arch_show_interrupts(struct seq_file *p, int prec)
 {
        int j;
 
@@ -122,59 +122,6 @@ static int show_other_interrupts(struct seq_file *p, int prec)
        return 0;
 }
 
-int show_interrupts(struct seq_file *p, void *v)
-{
-       unsigned long flags, any_count = 0;
-       int i = *(loff_t *) v, j, prec;
-       struct irqaction *action;
-       struct irq_desc *desc;
-
-       if (i > nr_irqs)
-               return 0;
-
-       for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
-               j *= 10;
-
-       if (i == nr_irqs)
-               return show_other_interrupts(p, prec);
-
-       /* print header */
-       if (i == 0) {
-               seq_printf(p, "%*s", prec + 8, "");
-               for_each_online_cpu(j)
-                       seq_printf(p, "CPU%-8d", j);
-               seq_putc(p, '\n');
-       }
-
-       desc = irq_to_desc(i);
-       if (!desc)
-               return 0;
-
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       for_each_online_cpu(j)
-               any_count |= kstat_irqs_cpu(i, j);
-       action = desc->action;
-       if (!action && !any_count)
-               goto out;
-
-       seq_printf(p, "%*d: ", prec, i);
-       for_each_online_cpu(j)
-               seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-       seq_printf(p, " %8s", desc->irq_data.chip->name);
-       seq_printf(p, "-%-8s", desc->name);
-
-       if (action) {
-               seq_printf(p, "  %s", action->name);
-               while ((action = action->next) != NULL)
-                       seq_printf(p, ", %s", action->name);
-       }
-
-       seq_putc(p, '\n');
-out:
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
-       return 0;
-}
-
 /*
  * /proc/stat helpers
  */
@@ -284,6 +231,7 @@ void fixup_irqs(void)
        static int warned;
        struct irq_desc *desc;
        struct irq_data *data;
+       struct irq_chip *chip;
 
        for_each_irq_desc(irq, desc) {
                int break_affinity = 0;
@@ -298,10 +246,10 @@ void fixup_irqs(void)
                /* interrupt's are disabled at this point */
                raw_spin_lock(&desc->lock);
 
-               data = &desc->irq_data;
+               data = irq_desc_get_irq_data(desc);
                affinity = data->affinity;
                if (!irq_has_action(irq) ||
-                   cpumask_equal(affinity, cpu_online_mask)) {
+                   cpumask_subset(affinity, cpu_online_mask)) {
                        raw_spin_unlock(&desc->lock);
                        continue;
                }
@@ -318,16 +266,17 @@ void fixup_irqs(void)
                        affinity = cpu_all_mask;
                }
 
-               if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask)
-                       data->chip->irq_mask(data);
+               chip = irq_data_get_irq_chip(data);
+               if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
+                       chip->irq_mask(data);
 
-               if (data->chip->irq_set_affinity)
-                       data->chip->irq_set_affinity(data, affinity, true);
+               if (chip->irq_set_affinity)
+                       chip->irq_set_affinity(data, affinity, true);
                else if (!(warned++))
                        set_affinity = 0;
 
-               if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask)
-                       data->chip->irq_unmask(data);
+               if (!irqd_can_move_in_process_context(data) && chip->irq_unmask)
+                       chip->irq_unmask(data);
 
                raw_spin_unlock(&desc->lock);
 
@@ -359,10 +308,11 @@ void fixup_irqs(void)
                        irq = __this_cpu_read(vector_irq[vector]);
 
                        desc = irq_to_desc(irq);
-                       data = &desc->irq_data;
+                       data = irq_desc_get_irq_data(desc);
+                       chip = irq_data_get_irq_chip(data);
                        raw_spin_lock(&desc->lock);
-                       if (data->chip->irq_retrigger)
-                               data->chip->irq_retrigger(data);
+                       if (chip->irq_retrigger)
+                               chip->irq_retrigger(data);
                        raw_spin_unlock(&desc->lock);
                }
        }
index 9f76f89f43a4248cb8d7511d195fa5678a6849cc..f470e4ef993e057f6f3fd67fd75aecaaec1ddc5c 100644 (file)
@@ -72,6 +72,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
 static struct irqaction fpu_irq = {
        .handler = math_error_irq,
        .name = "fpu",
+       .flags = IRQF_NO_THREAD,
 };
 #endif
 
@@ -81,6 +82,7 @@ static struct irqaction fpu_irq = {
 static struct irqaction irq2 = {
        .handler = no_action,
        .name = "cascade",
+       .flags = IRQF_NO_THREAD,
 };
 
 DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
@@ -111,7 +113,7 @@ void __init init_ISA_irqs(void)
        legacy_pic->init(0);
 
        for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
-               set_irq_chip_and_handler_name(i, chip, handle_level_irq, name);
+               irq_set_chip_and_handler_name(i, chip, handle_level_irq, name);
 }
 
 void __init init_IRQ(void)
@@ -171,14 +173,77 @@ static void __init smp_intr_init(void)
        alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
 
        /* IPIs for invalidation */
-       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
-       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
-       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
-       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
-       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
-       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
-       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
-       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
+#define ALLOC_INVTLB_VEC(NR) \
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \
+               invalidate_interrupt##NR)
+
+       switch (NUM_INVALIDATE_TLB_VECTORS) {
+       default:
+               ALLOC_INVTLB_VEC(31);
+       case 31:
+               ALLOC_INVTLB_VEC(30);
+       case 30:
+               ALLOC_INVTLB_VEC(29);
+       case 29:
+               ALLOC_INVTLB_VEC(28);
+       case 28:
+               ALLOC_INVTLB_VEC(27);
+       case 27:
+               ALLOC_INVTLB_VEC(26);
+       case 26:
+               ALLOC_INVTLB_VEC(25);
+       case 25:
+               ALLOC_INVTLB_VEC(24);
+       case 24:
+               ALLOC_INVTLB_VEC(23);
+       case 23:
+               ALLOC_INVTLB_VEC(22);
+       case 22:
+               ALLOC_INVTLB_VEC(21);
+       case 21:
+               ALLOC_INVTLB_VEC(20);
+       case 20:
+               ALLOC_INVTLB_VEC(19);
+       case 19:
+               ALLOC_INVTLB_VEC(18);
+       case 18:
+               ALLOC_INVTLB_VEC(17);
+       case 17:
+               ALLOC_INVTLB_VEC(16);
+       case 16:
+               ALLOC_INVTLB_VEC(15);
+       case 15:
+               ALLOC_INVTLB_VEC(14);
+       case 14:
+               ALLOC_INVTLB_VEC(13);
+       case 13:
+               ALLOC_INVTLB_VEC(12);
+       case 12:
+               ALLOC_INVTLB_VEC(11);
+       case 11:
+               ALLOC_INVTLB_VEC(10);
+       case 10:
+               ALLOC_INVTLB_VEC(9);
+       case 9:
+               ALLOC_INVTLB_VEC(8);
+       case 8:
+               ALLOC_INVTLB_VEC(7);
+       case 7:
+               ALLOC_INVTLB_VEC(6);
+       case 6:
+               ALLOC_INVTLB_VEC(5);
+       case 5:
+               ALLOC_INVTLB_VEC(4);
+       case 4:
+               ALLOC_INVTLB_VEC(3);
+       case 3:
+               ALLOC_INVTLB_VEC(2);
+       case 2:
+               ALLOC_INVTLB_VEC(1);
+       case 1:
+               ALLOC_INVTLB_VEC(0);
+               break;
+       }
 
        /* IPI for generic function call */
        alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
index a4130005028ac870dd50e296d98701842824e065..7c64c420a9f688a9add0b75235b01ea6119b3605 100644 (file)
@@ -533,15 +533,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
                }
                return NOTIFY_DONE;
 
-       case DIE_NMIWATCHDOG:
-               if (atomic_read(&kgdb_active) != -1) {
-                       /* KGDB CPU roundup: */
-                       kgdb_nmicallback(raw_smp_processor_id(), regs);
-                       return NOTIFY_STOP;
-               }
-               /* Enter debugger: */
-               break;
-
        case DIE_DEBUG:
                if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
                        if (user_mode(regs))
index d91c477b3f6234cf122a08cda38b8d9a571a27cf..c969fd9d156651c9d0cc6dae1a606bb091cbcde6 100644 (file)
@@ -1276,6 +1276,14 @@ static int __kprobes can_optimize(unsigned long paddr)
        if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
                return 0;
 
+       /*
+        * Do not optimize in the entry code due to the unstable
+        * stack handling.
+        */
+       if ((paddr >= (unsigned long )__entry_text_start) &&
+           (paddr <  (unsigned long )__entry_text_end))
+               return 0;
+
        /* Check there is enough space for a relative jump. */
        if (size - offset < RELATIVEJUMP_SIZE)
                return 0;
index 0fe6d1a66c38cf0aaea3383ac000eefbb5d34fca..c5610384ab167c162f33abbb6da8401b5cbba45d 100644 (file)
@@ -66,7 +66,6 @@ struct microcode_amd {
        unsigned int                    mpb[0];
 };
 
-#define UCODE_MAX_SIZE                 2048
 #define UCODE_CONTAINER_SECTION_HDR    8
 #define UCODE_CONTAINER_HEADER_SIZE    12
 
@@ -77,20 +76,20 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        u32 dummy;
 
-       memset(csig, 0, sizeof(*csig));
        if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
-               pr_warning("microcode: CPU%d: AMD CPU family 0x%x not "
-                          "supported\n", cpu, c->x86);
+               pr_warning("CPU%d: family %d not supported\n", cpu, c->x86);
                return -1;
        }
+
        rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy);
-       pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev);
+       pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
+
        return 0;
 }
 
-static int get_matching_microcode(int cpu, void *mc, int rev)
+static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr,
+                                 int rev)
 {
-       struct microcode_header_amd *mc_header = mc;
        unsigned int current_cpu_id;
        u16 equiv_cpu_id = 0;
        unsigned int i = 0;
@@ -109,17 +108,17 @@ static int get_matching_microcode(int cpu, void *mc, int rev)
        if (!equiv_cpu_id)
                return 0;
 
-       if (mc_header->processor_rev_id != equiv_cpu_id)
+       if (mc_hdr->processor_rev_id != equiv_cpu_id)
                return 0;
 
        /* ucode might be chipset specific -- currently we don't support this */
-       if (mc_header->nb_dev_id || mc_header->sb_dev_id) {
-               pr_err("CPU%d: loading of chipset specific code not yet supported\n",
+       if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
+               pr_err("CPU%d: chipset specific code not yet supported\n",
                       cpu);
                return 0;
        }
 
-       if (mc_header->patch_id <= rev)
+       if (mc_hdr->patch_id <= rev)
                return 0;
 
        return 1;
@@ -144,71 +143,93 @@ static int apply_microcode_amd(int cpu)
 
        /* check current patch id and patch's id for match */
        if (rev != mc_amd->hdr.patch_id) {
-               pr_err("CPU%d: update failed (for patch_level=0x%x)\n",
+               pr_err("CPU%d: update failed for patch_level=0x%08x\n",
                       cpu, mc_amd->hdr.patch_id);
                return -1;
        }
 
-       pr_info("CPU%d: updated (new patch_level=0x%x)\n", cpu, rev);
+       pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
        uci->cpu_sig.rev = rev;
 
        return 0;
 }
 
-static void *
-get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size)
+static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
 {
-       unsigned int total_size;
-       u8 section_hdr[UCODE_CONTAINER_SECTION_HDR];
-       void *mc;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+       unsigned int max_size, actual_size;
+
+#define F1XH_MPB_MAX_SIZE 2048
+#define F14H_MPB_MAX_SIZE 1824
+#define F15H_MPB_MAX_SIZE 4096
+
+       switch (c->x86) {
+       case 0x14:
+               max_size = F14H_MPB_MAX_SIZE;
+               break;
+       case 0x15:
+               max_size = F15H_MPB_MAX_SIZE;
+               break;
+       default:
+               max_size = F1XH_MPB_MAX_SIZE;
+               break;
+       }
 
-       get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR);
+       actual_size = buf[4] + (buf[5] << 8);
 
-       if (section_hdr[0] != UCODE_UCODE_TYPE) {
-               pr_err("error: invalid type field in container file section header\n");
-               return NULL;
+       if (actual_size > size || actual_size > max_size) {
+               pr_err("section size mismatch\n");
+               return 0;
        }
 
-       total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8));
+       return actual_size;
+}
 
-       if (total_size > size || total_size > UCODE_MAX_SIZE) {
-               pr_err("error: size mismatch\n");
-               return NULL;
+static struct microcode_header_amd *
+get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size)
+{
+       struct microcode_header_amd *mc = NULL;
+       unsigned int actual_size = 0;
+
+       if (buf[0] != UCODE_UCODE_TYPE) {
+               pr_err("invalid type field in container file section header\n");
+               goto out;
        }
 
-       mc = vzalloc(UCODE_MAX_SIZE);
+       actual_size = verify_ucode_size(cpu, buf, size);
+       if (!actual_size)
+               goto out;
+
+       mc = vzalloc(actual_size);
        if (!mc)
-               return NULL;
+               goto out;
 
-       get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size);
-       *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR;
+       get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, actual_size);
+       *mc_size = actual_size + UCODE_CONTAINER_SECTION_HDR;
 
+out:
        return mc;
 }
 
 static int install_equiv_cpu_table(const u8 *buf)
 {
-       u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE];
-       unsigned int *buf_pos = (unsigned int *)container_hdr;
-       unsigned long size;
-
-       get_ucode_data(&container_hdr, buf, UCODE_CONTAINER_HEADER_SIZE);
-
-       size = buf_pos[2];
-
-       if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
-               pr_err("error: invalid type field in container file section header\n");
-               return 0;
+       unsigned int *ibuf = (unsigned int *)buf;
+       unsigned int type = ibuf[1];
+       unsigned int size = ibuf[2];
+
+       if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
+               pr_err("empty section/"
+                      "invalid type field in container file section header\n");
+               return -EINVAL;
        }
 
        equiv_cpu_table = vmalloc(size);
        if (!equiv_cpu_table) {
                pr_err("failed to allocate equivalent CPU table\n");
-               return 0;
+               return -ENOMEM;
        }
 
-       buf += UCODE_CONTAINER_HEADER_SIZE;
-       get_ucode_data(equiv_cpu_table, buf, size);
+       get_ucode_data(equiv_cpu_table, buf + UCODE_CONTAINER_HEADER_SIZE, size);
 
        return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */
 }
@@ -223,16 +244,16 @@ static enum ucode_state
 generic_load_microcode(int cpu, const u8 *data, size_t size)
 {
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+       struct microcode_header_amd *mc_hdr = NULL;
+       unsigned int mc_size, leftover;
+       int offset;
        const u8 *ucode_ptr = data;
        void *new_mc = NULL;
-       void *mc;
-       int new_rev = uci->cpu_sig.rev;
-       unsigned int leftover;
-       unsigned long offset;
+       unsigned int new_rev = uci->cpu_sig.rev;
        enum ucode_state state = UCODE_OK;
 
        offset = install_equiv_cpu_table(ucode_ptr);
-       if (!offset) {
+       if (offset < 0) {
                pr_err("failed to create equivalent cpu table\n");
                return UCODE_ERROR;
        }
@@ -241,64 +262,65 @@ generic_load_microcode(int cpu, const u8 *data, size_t size)
        leftover = size - offset;
 
        while (leftover) {
-               unsigned int uninitialized_var(mc_size);
-               struct microcode_header_amd *mc_header;
-
-               mc = get_next_ucode(ucode_ptr, leftover, &mc_size);
-               if (!mc)
+               mc_hdr = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size);
+               if (!mc_hdr)
                        break;
 
-               mc_header = (struct microcode_header_amd *)mc;
-               if (get_matching_microcode(cpu, mc, new_rev)) {
+               if (get_matching_microcode(cpu, mc_hdr, new_rev)) {
                        vfree(new_mc);
-                       new_rev = mc_header->patch_id;
-                       new_mc  = mc;
+                       new_rev = mc_hdr->patch_id;
+                       new_mc  = mc_hdr;
                } else
-                       vfree(mc);
+                       vfree(mc_hdr);
 
                ucode_ptr += mc_size;
                leftover  -= mc_size;
        }
 
-       if (new_mc) {
-               if (!leftover) {
-                       vfree(uci->mc);
-                       uci->mc = new_mc;
-                       pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
-                                cpu, new_rev, uci->cpu_sig.rev);
-               } else {
-                       vfree(new_mc);
-                       state = UCODE_ERROR;
-               }
-       } else
+       if (!new_mc) {
                state = UCODE_NFOUND;
+               goto free_table;
+       }
 
+       if (!leftover) {
+               vfree(uci->mc);
+               uci->mc = new_mc;
+               pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n",
+                        cpu, uci->cpu_sig.rev, new_rev);
+       } else {
+               vfree(new_mc);
+               state = UCODE_ERROR;
+       }
+
+free_table:
        free_equiv_cpu_table();
 
        return state;
 }
 
-static enum ucode_state request_microcode_fw(int cpu, struct device *device)
+static enum ucode_state request_microcode_amd(int cpu, struct device *device)
 {
        const char *fw_name = "amd-ucode/microcode_amd.bin";
-       const struct firmware *firmware;
-       enum ucode_state ret;
+       const struct firmware *fw;
+       enum ucode_state ret = UCODE_NFOUND;
 
-       if (request_firmware(&firmware, fw_name, device)) {
-               printk(KERN_ERR "microcode: failed to load file %s\n", fw_name);
-               return UCODE_NFOUND;
+       if (request_firmware(&fw, fw_name, device)) {
+               pr_err("failed to load file %s\n", fw_name);
+               goto out;
        }
 
-       if (*(u32 *)firmware->data != UCODE_MAGIC) {
-               pr_err("invalid UCODE_MAGIC (0x%08x)\n",
-                      *(u32 *)firmware->data);
-               return UCODE_ERROR;
+       ret = UCODE_ERROR;
+       if (*(u32 *)fw->data != UCODE_MAGIC) {
+               pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
+               goto fw_release;
        }
 
-       ret = generic_load_microcode(cpu, firmware->data, firmware->size);
+       ret = generic_load_microcode(cpu, fw->data, fw->size);
 
-       release_firmware(firmware);
+fw_release:
+       release_firmware(fw);
 
+out:
        return ret;
 }
 
@@ -319,7 +341,7 @@ static void microcode_fini_cpu_amd(int cpu)
 
 static struct microcode_ops microcode_amd_ops = {
        .request_microcode_user           = request_microcode_user,
-       .request_microcode_fw             = request_microcode_fw,
+       .request_microcode_fw             = request_microcode_amd,
        .collect_cpu_info                 = collect_cpu_info_amd,
        .apply_microcode                  = apply_microcode_amd,
        .microcode_fini_cpu               = microcode_fini_cpu_amd,
index 1cca374a2bacc8f1f5275847ef4d4cd2787c0545..87af68e0e1e1458a00d90d7e95e6755a4a332aa4 100644 (file)
@@ -417,8 +417,10 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
        if (err)
                return err;
 
-       if (microcode_init_cpu(cpu) == UCODE_ERROR)
-               err = -EINVAL;
+       if (microcode_init_cpu(cpu) == UCODE_ERROR) {
+               sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
+               return -EINVAL;
+       }
 
        return err;
 }
index ff4554198981217172f8db1a43cac5228f1ff41d..99fa3adf014190d166c9c8d7b2250febf27cec42 100644 (file)
@@ -110,12 +110,9 @@ void show_regs_common(void)
                init_utsname()->release,
                (int)strcspn(init_utsname()->version, " "),
                init_utsname()->version);
-       printk(KERN_CONT " ");
-       printk(KERN_CONT "%s %s", vendor, product);
-       if (board) {
-               printk(KERN_CONT "/");
-               printk(KERN_CONT "%s", board);
-       }
+       printk(KERN_CONT " %s %s", vendor, product);
+       if (board)
+               printk(KERN_CONT "/%s", board);
        printk(KERN_CONT "\n");
 }
 
index fc7aae1e2bc72a9907eee4aa1011fce507091f4a..715037caeb43518054f083454ce7be53ffcafe8b 100644 (file)
@@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
                },
        },
+       {       /* Handle problems with rebooting on VersaLogic Menlow boards */
+               .callback = set_bios_reboot,
+               .ident = "VersaLogic Menlow based board",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
+                       DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
+               },
+       },
        { }
 };
 
index b3143bc74e6c4fe6564af79ff12a584694cec295..b176f2b1f45d8ddfd27fad371411e5c76ec832f5 100644 (file)
@@ -294,10 +294,32 @@ static void __init init_gbpages(void)
        else
                direct_gbpages = 0;
 }
+
+static void __init cleanup_highmap_brk_end(void)
+{
+       pud_t *pud;
+       pmd_t *pmd;
+
+       mmu_cr4_features = read_cr4();
+
+       /*
+        * _brk_end cannot change anymore, but it and _end may be
+        * located on different 2M pages. cleanup_highmap(), however,
+        * can only consider _end when it runs, so destroy any
+        * mappings beyond _brk_end here.
+        */
+       pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
+       pmd = pmd_offset(pud, _brk_end - 1);
+       while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
+               pmd_clear(pmd);
+}
 #else
 static inline void init_gbpages(void)
 {
 }
+static inline void cleanup_highmap_brk_end(void)
+{
+}
 #endif
 
 static void __init reserve_brk(void)
@@ -308,6 +330,8 @@ static void __init reserve_brk(void)
        /* Mark brk area as locked down and no longer taking any
           new allocations */
        _brk_start = 0;
+
+       cleanup_highmap_brk_end();
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -695,15 +719,6 @@ static int __init parse_reservelow(char *p)
 
 early_param("reservelow", parse_reservelow);
 
-static u64 __init get_max_mapped(void)
-{
-       u64 end = max_pfn_mapped;
-
-       end <<= PAGE_SHIFT;
-
-       return end;
-}
-
 /*
  * Determine if we were loaded by an EFI loader.  If so, then we have also been
  * passed the efi memmap, systab, etc., so we should use these data structures
@@ -719,8 +734,6 @@ static u64 __init get_max_mapped(void)
 
 void __init setup_arch(char **cmdline_p)
 {
-       int acpi = 0;
-       int amd = 0;
        unsigned long flags;
 
 #ifdef CONFIG_X86_32
@@ -999,19 +1012,7 @@ void __init setup_arch(char **cmdline_p)
 
        early_acpi_boot_init();
 
-#ifdef CONFIG_ACPI_NUMA
-       /*
-        * Parse SRAT to discover nodes.
-        */
-       acpi = acpi_numa_init();
-#endif
-
-#ifdef CONFIG_AMD_NUMA
-       if (!acpi)
-               amd = !amd_numa_init(0, max_pfn);
-#endif
-
-       initmem_init(0, max_pfn, acpi, amd);
+       initmem_init();
        memblock_find_dma_reserve();
        dma32_reserve_bootmem();
 
@@ -1055,9 +1056,7 @@ void __init setup_arch(char **cmdline_p)
 
        prefill_possible_map();
 
-#ifdef CONFIG_X86_64
        init_cpu_to_node();
-#endif
 
        init_apic_mappings();
        ioapic_and_gsi_init();
index 002b79685f738014a8cf9127f804851459a6216b..71f4727da3735ffc93c2346a2b7712208df9a020 100644 (file)
@@ -225,10 +225,15 @@ void __init setup_per_cpu_areas(void)
                per_cpu(x86_bios_cpu_apicid, cpu) =
                        early_per_cpu_map(x86_bios_cpu_apicid, cpu);
 #endif
+#ifdef CONFIG_X86_32
+               per_cpu(x86_cpu_to_logical_apicid, cpu) =
+                       early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
+#endif
 #ifdef CONFIG_X86_64
                per_cpu(irq_stack_ptr, cpu) =
                        per_cpu(irq_stack_union.irq_stack, cpu) +
                        IRQ_STACK_SIZE - 64;
+#endif
 #ifdef CONFIG_NUMA
                per_cpu(x86_cpu_to_node_map, cpu) =
                        early_per_cpu_map(x86_cpu_to_node_map, cpu);
@@ -241,7 +246,6 @@ void __init setup_per_cpu_areas(void)
                 * So set them all (boot cpu and all APs).
                 */
                set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
-#endif
 #endif
                /*
                 * Up to this point, the boot CPU has been using .init.data
@@ -256,7 +260,10 @@ void __init setup_per_cpu_areas(void)
        early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
        early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
 #endif
-#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
+#ifdef CONFIG_X86_32
+       early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
+#endif
+#ifdef CONFIG_NUMA
        early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
 #endif
 
index 09d0172a005955e8119df8a175961684b9956ca9..e9efdfd51c8d1fc822fb9b5ff50adc50cd34a4a0 100644 (file)
 #include <asm/smpboot_hooks.h>
 #include <asm/i8259.h>
 
-#ifdef CONFIG_X86_32
-u8 apicid_2_node[MAX_APICID];
-#endif
-
 /* State of each CPU */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
@@ -131,68 +127,14 @@ EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
+DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
+
 /* Per CPU bogomips and other parameters */
 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
 
 atomic_t init_deasserted;
 
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
-/* which node each logical CPU is on */
-int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
-EXPORT_SYMBOL(cpu_to_node_map);
-
-/* set up a mapping between cpu and node. */
-static void map_cpu_to_node(int cpu, int node)
-{
-       printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
-       cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
-       cpu_to_node_map[cpu] = node;
-}
-
-/* undo a mapping between cpu and node. */
-static void unmap_cpu_to_node(int cpu)
-{
-       int node;
-
-       printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
-       for (node = 0; node < MAX_NUMNODES; node++)
-               cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
-       cpu_to_node_map[cpu] = 0;
-}
-#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
-#define map_cpu_to_node(cpu, node)     ({})
-#define unmap_cpu_to_node(cpu) ({})
-#endif
-
-#ifdef CONFIG_X86_32
-static int boot_cpu_logical_apicid;
-
-u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
-                                       { [0 ... NR_CPUS-1] = BAD_APICID };
-
-static void map_cpu_to_logical_apicid(void)
-{
-       int cpu = smp_processor_id();
-       int apicid = logical_smp_processor_id();
-       int node = apic->apicid_to_node(apicid);
-
-       if (!node_online(node))
-               node = first_online_node;
-
-       cpu_2_logical_apicid[cpu] = apicid;
-       map_cpu_to_node(cpu, node);
-}
-
-void numa_remove_cpu(int cpu)
-{
-       cpu_2_logical_apicid[cpu] = BAD_APICID;
-       unmap_cpu_to_node(cpu);
-}
-#else
-#define map_cpu_to_logical_apicid()  do {} while (0)
-#endif
-
 /*
  * Report back to the Boot Processor.
  * Running on AP.
@@ -260,7 +202,6 @@ static void __cpuinit smp_callin(void)
                apic->smp_callin_clear_local_apic();
        setup_local_APIC();
        end_local_APIC_setup();
-       map_cpu_to_logical_apicid();
 
        /*
         * Need to setup vector mappings before we enable interrupts.
@@ -356,23 +297,6 @@ notrace static void __cpuinit start_secondary(void *unused)
        cpu_idle();
 }
 
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* In this case, llc_shared_map is a pointer to a cpumask. */
-static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
-                                   const struct cpuinfo_x86 *src)
-{
-       struct cpumask *llc = dst->llc_shared_map;
-       *dst = *src;
-       dst->llc_shared_map = llc;
-}
-#else
-static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
-                                   const struct cpuinfo_x86 *src)
-{
-       *dst = *src;
-}
-#endif /* CONFIG_CPUMASK_OFFSTACK */
-
 /*
  * The bootstrap kernel entry code has set these up. Save them for
  * a given CPU
@@ -382,7 +306,7 @@ void __cpuinit smp_store_cpu_info(int id)
 {
        struct cpuinfo_x86 *c = &cpu_data(id);
 
-       copy_cpuinfo_x86(c, &boot_cpu_data);
+       *c = boot_cpu_data;
        c->cpu_index = id;
        if (id != 0)
                identify_secondary_cpu(c);
@@ -390,15 +314,12 @@ void __cpuinit smp_store_cpu_info(int id)
 
 static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
 {
-       struct cpuinfo_x86 *c1 = &cpu_data(cpu1);
-       struct cpuinfo_x86 *c2 = &cpu_data(cpu2);
-
        cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
        cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
        cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
        cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
-       cpumask_set_cpu(cpu1, c2->llc_shared_map);
-       cpumask_set_cpu(cpu2, c1->llc_shared_map);
+       cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
+       cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
 }
 
 
@@ -415,6 +336,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
 
                        if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
                                if (c->phys_proc_id == o->phys_proc_id &&
+                                   per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) &&
                                    c->compute_unit_id == o->compute_unit_id)
                                        link_thread_siblings(cpu, i);
                        } else if (c->phys_proc_id == o->phys_proc_id &&
@@ -426,7 +348,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
        }
 
-       cpumask_set_cpu(cpu, c->llc_shared_map);
+       cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
 
        if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
                cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
@@ -437,8 +359,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
        for_each_cpu(i, cpu_sibling_setup_mask) {
                if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
-                       cpumask_set_cpu(i, c->llc_shared_map);
-                       cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
+                       cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
+                       cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
                }
                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
                        cpumask_set_cpu(i, cpu_core_mask(cpu));
@@ -477,7 +399,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
            !(cpu_has(c, X86_FEATURE_AMD_DCM)))
                return cpu_core_mask(cpu);
        else
-               return c->llc_shared_map;
+               return cpu_llc_shared_mask(cpu);
 }
 
 static void impress_friends(void)
@@ -969,7 +891,6 @@ static __init void disable_smp(void)
                physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
        else
                physid_set_mask_of_physid(0, &phys_cpu_present_map);
-       map_cpu_to_logical_apicid();
        cpumask_set_cpu(0, cpu_sibling_mask(0));
        cpumask_set_cpu(0, cpu_core_mask(0));
 }
@@ -1098,21 +1019,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
        preempt_disable();
        smp_cpu_index_default();
-       memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
-       cpumask_copy(cpu_callin_mask, cpumask_of(0));
-       mb();
+
        /*
         * Setup boot CPU information
         */
        smp_store_cpu_info(0); /* Final full version of the data */
-#ifdef CONFIG_X86_32
-       boot_cpu_logical_apicid = logical_smp_processor_id();
-#endif
+       cpumask_copy(cpu_callin_mask, cpumask_of(0));
+       mb();
+
        current_thread_info()->cpu = 0;  /* needed? */
        for_each_possible_cpu(i) {
                zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
                zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
-               zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
+               zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
        }
        set_cpu_sibling_map(0);
 
@@ -1148,8 +1067,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
        bsp_end_local_APIC_setup();
 
-       map_cpu_to_logical_apicid();
-
        if (apic->setup_portio_remap)
                apic->setup_portio_remap();
 
index b35786dc9b8f133fc03fd5f25629e62d1f6ae287..5f181742e8f91c9510fbac2e0fd0ec7e12daf34d 100644 (file)
@@ -340,3 +340,6 @@ ENTRY(sys_call_table)
        .long sys_fanotify_init
        .long sys_fanotify_mark
        .long sys_prlimit64             /* 340 */
+       .long sys_name_to_handle_at
+       .long sys_open_by_handle_at
+       .long sys_clock_adjtime
index bf4700755184e32d4b4e549bd19f4014caa46468..0381e1f3baed0dbfd5215c3e82ac767747796012 100644 (file)
@@ -105,6 +105,7 @@ SECTIONS
                SCHED_TEXT
                LOCK_TEXT
                KPROBES_TEXT
+               ENTRY_TEXT
                IRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
@@ -305,7 +306,7 @@ SECTIONS
        }
 
 #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
-       PERCPU(THREAD_SIZE)
+       PERCPU(PAGE_SIZE)
 #endif
 
        . = ALIGN(PAGE_SIZE);
index 1b950d151e58a3154568f78ae21b8260fe77335c..9796c2f3d0745e8b73b690fff13f7529b5792507 100644 (file)
@@ -52,6 +52,7 @@ extern void *__memcpy(void *, const void *, __kernel_size_t);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(memmove);
 
 EXPORT_SYMBOL(empty_zero_page);
 #ifndef CONFIG_PARAVIRT
index 54ce246a383ee0fa029d444b99b8faca2d6c3702..63fec1531e89be18889e0bfd240191c2954f03ac 100644 (file)
@@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm)
                        kvm_register_write(&svm->vcpu, reg, val);
        }
 
+       skip_emulated_instruction(&svm->vcpu);
+
        return 1;
 }
 
index 1357d7cf4ec86d3d4c05d4ec8d13a701db8ff2d8..db932760ea8228eceb07c4c9e36a8215b53269a5 100644 (file)
@@ -62,21 +62,21 @@ TRACE_EVENT(kvm_hv_hypercall,
        TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
 
        TP_STRUCT__entry(
-               __field(        __u16,          code            )
-               __field(        bool,           fast            )
                __field(        __u16,          rep_cnt         )
                __field(        __u16,          rep_idx         )
                __field(        __u64,          ingpa           )
                __field(        __u64,          outgpa          )
+               __field(        __u16,          code            )
+               __field(        bool,           fast            )
        ),
 
        TP_fast_assign(
-               __entry->code           = code;
-               __entry->fast           = fast;
                __entry->rep_cnt        = rep_cnt;
                __entry->rep_idx        = rep_idx;
                __entry->ingpa          = ingpa;
                __entry->outgpa         = outgpa;
+               __entry->code           = code;
+               __entry->fast           = fast;
        ),
 
        TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
index eba687f0cc0cee1d630c6630e539d78f7b478836..b9ec1c74943c67d4b3f89b78bc70c8c178af60a7 100644 (file)
@@ -847,7 +847,7 @@ static void __init lguest_init_IRQ(void)
 void lguest_setup_irq(unsigned int irq)
 {
        irq_alloc_desc_at(irq, 0);
-       set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
+       irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
                                      handle_level_irq, "level");
 }
 
@@ -995,7 +995,7 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
 static void lguest_time_init(void)
 {
        /* Set up the timer interrupt (0) to go to our simple timer routine */
-       set_irq_handler(0, lguest_time_irq);
+       irq_set_handler(0, lguest_time_irq);
 
        clocksource_register(&lguest_clock);
 
index 2cda60a06e654ae6e66c5f8f4dadb1f85c5b7b1d..e8e7e0d06f4210daeec66f198b13a79a025eac6c 100644 (file)
 
 /* if you want SMP support, implement these with real spinlocks */
 .macro LOCK reg
-       pushfl
-       CFI_ADJUST_CFA_OFFSET 4
+       pushfl_cfi
        cli
 .endm
 
 .macro UNLOCK reg
-       popfl
-       CFI_ADJUST_CFA_OFFSET -4
+       popfl_cfi
 .endm
 
 #define BEGIN(op) \
index 71e080de3352471fca97c2b29816cddcbb746524..391a083674b443a6ae6efa244bfc430739e579c4 100644 (file)
 #include <asm/dwarf2.h>
 
 .macro SAVE reg
-       pushl %\reg
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %\reg
        CFI_REL_OFFSET \reg, 0
 .endm
 
 .macro RESTORE reg
-       popl %\reg
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %\reg
        CFI_RESTORE \reg
 .endm
 
index adbccd0bbb78a1b4905be30ce7ddea89fad4038f..78d16a554db00f51c13b7d49df2c4cfafabcb276 100644 (file)
@@ -50,11 +50,9 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
           */           
 ENTRY(csum_partial)
        CFI_STARTPROC
-       pushl %esi
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %esi
        CFI_REL_OFFSET esi, 0
-       pushl %ebx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebx
        CFI_REL_OFFSET ebx, 0
        movl 20(%esp),%eax      # Function arg: unsigned int sum
        movl 16(%esp),%ecx      # Function arg: int len
@@ -132,11 +130,9 @@ ENTRY(csum_partial)
        jz 8f
        roll $8, %eax
 8:
-       popl %ebx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ebx
        CFI_RESTORE ebx
-       popl %esi
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %esi
        CFI_RESTORE esi
        ret
        CFI_ENDPROC
@@ -148,11 +144,9 @@ ENDPROC(csum_partial)
 
 ENTRY(csum_partial)
        CFI_STARTPROC
-       pushl %esi
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %esi
        CFI_REL_OFFSET esi, 0
-       pushl %ebx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebx
        CFI_REL_OFFSET ebx, 0
        movl 20(%esp),%eax      # Function arg: unsigned int sum
        movl 16(%esp),%ecx      # Function arg: int len
@@ -260,11 +254,9 @@ ENTRY(csum_partial)
        jz 90f
        roll $8, %eax
 90: 
-       popl %ebx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ebx
        CFI_RESTORE ebx
-       popl %esi
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %esi
        CFI_RESTORE esi
        ret
        CFI_ENDPROC
@@ -309,14 +301,11 @@ ENTRY(csum_partial_copy_generic)
        CFI_STARTPROC
        subl  $4,%esp   
        CFI_ADJUST_CFA_OFFSET 4
-       pushl %edi
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %edi
        CFI_REL_OFFSET edi, 0
-       pushl %esi
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %esi
        CFI_REL_OFFSET esi, 0
-       pushl %ebx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebx
        CFI_REL_OFFSET ebx, 0
        movl ARGBASE+16(%esp),%eax      # sum
        movl ARGBASE+12(%esp),%ecx      # len
@@ -426,17 +415,13 @@ DST(      movb %cl, (%edi)        )
 
 .previous
 
-       popl %ebx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ebx
        CFI_RESTORE ebx
-       popl %esi
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %esi
        CFI_RESTORE esi
-       popl %edi
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %edi
        CFI_RESTORE edi
-       popl %ecx                       # equivalent to addl $4,%esp
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ecx                   # equivalent to addl $4,%esp
        ret     
        CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
@@ -459,14 +444,11 @@ ENDPROC(csum_partial_copy_generic)
                
 ENTRY(csum_partial_copy_generic)
        CFI_STARTPROC
-       pushl %ebx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebx
        CFI_REL_OFFSET ebx, 0
-       pushl %edi
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %edi
        CFI_REL_OFFSET edi, 0
-       pushl %esi
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %esi
        CFI_REL_OFFSET esi, 0
        movl ARGBASE+4(%esp),%esi       #src
        movl ARGBASE+8(%esp),%edi       #dst    
@@ -527,14 +509,11 @@ DST(      movb %dl, (%edi)         )
        jmp  7b                 
 .previous                              
 
-       popl %esi
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %esi
        CFI_RESTORE esi
-       popl %edi
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %edi
        CFI_RESTORE edi
-       popl %ebx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ebx
        CFI_RESTORE ebx
        ret
        CFI_ENDPROC
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
new file mode 100644 (file)
index 0000000..0ecb843
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+ * Normally compiler builtins are used, but sometimes the compiler calls out
+ * of line code. Based on asm-i386/string.h.
+ *
+ * This assembly file is re-written from memmove_64.c file.
+ *     - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
+ */
+#define _STRING_C
+#include <linux/linkage.h>
+#include <asm/dwarf2.h>
+
+#undef memmove
+
+/*
+ * Implement memmove(). This can handle overlap between src and dst.
+ *
+ * Input:
+ * rdi: dest
+ * rsi: src
+ * rdx: count
+ *
+ * Output:
+ * rax: dest
+ */
+ENTRY(memmove)
+       CFI_STARTPROC
+       /* Handle more 32bytes in loop */
+       mov %rdi, %rax
+       cmp $0x20, %rdx
+       jb      1f
+
+       /* Decide forward/backward copy mode */
+       cmp %rdi, %rsi
+       jb      2f
+
+       /*
+        * movsq instruction have many startup latency
+        * so we handle small size by general register.
+        */
+       cmp  $680, %rdx
+       jb      3f
+       /*
+        * movsq instruction is only good for aligned case.
+        */
+
+       cmpb %dil, %sil
+       je 4f
+3:
+       sub $0x20, %rdx
+       /*
+        * We gobble 32byts forward in each loop.
+        */
+5:
+       sub $0x20, %rdx
+       movq 0*8(%rsi), %r11
+       movq 1*8(%rsi), %r10
+       movq 2*8(%rsi), %r9
+       movq 3*8(%rsi), %r8
+       leaq 4*8(%rsi), %rsi
+
+       movq %r11, 0*8(%rdi)
+       movq %r10, 1*8(%rdi)
+       movq %r9, 2*8(%rdi)
+       movq %r8, 3*8(%rdi)
+       leaq 4*8(%rdi), %rdi
+       jae 5b
+       addq $0x20, %rdx
+       jmp 1f
+       /*
+        * Handle data forward by movsq.
+        */
+       .p2align 4
+4:
+       movq %rdx, %rcx
+       movq -8(%rsi, %rdx), %r11
+       lea -8(%rdi, %rdx), %r10
+       shrq $3, %rcx
+       rep movsq
+       movq %r11, (%r10)
+       jmp 13f
+       /*
+        * Handle data backward by movsq.
+        */
+       .p2align 4
+7:
+       movq %rdx, %rcx
+       movq (%rsi), %r11
+       movq %rdi, %r10
+       leaq -8(%rsi, %rdx), %rsi
+       leaq -8(%rdi, %rdx), %rdi
+       shrq $3, %rcx
+       std
+       rep movsq
+       cld
+       movq %r11, (%r10)
+       jmp 13f
+
+       /*
+        * Start to prepare for backward copy.
+        */
+       .p2align 4
+2:
+       cmp $680, %rdx
+       jb 6f
+       cmp %dil, %sil
+       je 7b
+6:
+       /*
+        * Calculate copy position to tail.
+        */
+       addq %rdx, %rsi
+       addq %rdx, %rdi
+       subq $0x20, %rdx
+       /*
+        * We gobble 32byts backward in each loop.
+        */
+8:
+       subq $0x20, %rdx
+       movq -1*8(%rsi), %r11
+       movq -2*8(%rsi), %r10
+       movq -3*8(%rsi), %r9
+       movq -4*8(%rsi), %r8
+       leaq -4*8(%rsi), %rsi
+
+       movq %r11, -1*8(%rdi)
+       movq %r10, -2*8(%rdi)
+       movq %r9, -3*8(%rdi)
+       movq %r8, -4*8(%rdi)
+       leaq -4*8(%rdi), %rdi
+       jae 8b
+       /*
+        * Calculate copy position to head.
+        */
+       addq $0x20, %rdx
+       subq %rdx, %rsi
+       subq %rdx, %rdi
+1:
+       cmpq $16, %rdx
+       jb 9f
+       /*
+        * Move data from 16 bytes to 31 bytes.
+        */
+       movq 0*8(%rsi), %r11
+       movq 1*8(%rsi), %r10
+       movq -2*8(%rsi, %rdx), %r9
+       movq -1*8(%rsi, %rdx), %r8
+       movq %r11, 0*8(%rdi)
+       movq %r10, 1*8(%rdi)
+       movq %r9, -2*8(%rdi, %rdx)
+       movq %r8, -1*8(%rdi, %rdx)
+       jmp 13f
+       .p2align 4
+9:
+       cmpq $8, %rdx
+       jb 10f
+       /*
+        * Move data from 8 bytes to 15 bytes.
+        */
+       movq 0*8(%rsi), %r11
+       movq -1*8(%rsi, %rdx), %r10
+       movq %r11, 0*8(%rdi)
+       movq %r10, -1*8(%rdi, %rdx)
+       jmp 13f
+10:
+       cmpq $4, %rdx
+       jb 11f
+       /*
+        * Move data from 4 bytes to 7 bytes.
+        */
+       movl (%rsi), %r11d
+       movl -4(%rsi, %rdx), %r10d
+       movl %r11d, (%rdi)
+       movl %r10d, -4(%rdi, %rdx)
+       jmp 13f
+11:
+       cmp $2, %rdx
+       jb 12f
+       /*
+        * Move data from 2 bytes to 3 bytes.
+        */
+       movw (%rsi), %r11w
+       movw -2(%rsi, %rdx), %r10w
+       movw %r11w, (%rdi)
+       movw %r10w, -2(%rdi, %rdx)
+       jmp 13f
+12:
+       cmp $1, %rdx
+       jb 13f
+       /*
+        * Move data for 1 byte.
+        */
+       movb (%rsi), %r11b
+       movb %r11b, (%rdi)
+13:
+       retq
+       CFI_ENDPROC
+ENDPROC(memmove)
diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c
deleted file mode 100644 (file)
index 6d0f0ec..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-/* Normally compiler builtins are used, but sometimes the compiler calls out
-   of line code. Based on asm-i386/string.h.
- */
-#define _STRING_C
-#include <linux/string.h>
-#include <linux/module.h>
-
-#undef memmove
-void *memmove(void *dest, const void *src, size_t count)
-{
-       unsigned long d0,d1,d2,d3,d4,d5,d6,d7;
-       char *ret;
-
-       __asm__ __volatile__(
-               /* Handle more 32bytes in loop */
-               "mov %2, %3\n\t"
-               "cmp $0x20, %0\n\t"
-               "jb     1f\n\t"
-
-               /* Decide forward/backward copy mode */
-               "cmp %2, %1\n\t"
-               "jb     2f\n\t"
-
-               /*
-                * movsq instruction have many startup latency
-                * so we handle small size by general register.
-                */
-               "cmp  $680, %0\n\t"
-               "jb 3f\n\t"
-               /*
-                * movsq instruction is only good for aligned case.
-                */
-               "cmpb %%dil, %%sil\n\t"
-               "je 4f\n\t"
-               "3:\n\t"
-               "sub $0x20, %0\n\t"
-               /*
-                * We gobble 32byts forward in each loop.
-                */
-               "5:\n\t"
-               "sub $0x20, %0\n\t"
-               "movq 0*8(%1), %4\n\t"
-               "movq 1*8(%1), %5\n\t"
-               "movq 2*8(%1), %6\n\t"
-               "movq 3*8(%1), %7\n\t"
-               "leaq 4*8(%1), %1\n\t"
-
-               "movq %4, 0*8(%2)\n\t"
-               "movq %5, 1*8(%2)\n\t"
-               "movq %6, 2*8(%2)\n\t"
-               "movq %7, 3*8(%2)\n\t"
-               "leaq 4*8(%2), %2\n\t"
-               "jae 5b\n\t"
-               "addq $0x20, %0\n\t"
-               "jmp 1f\n\t"
-               /*
-                * Handle data forward by movsq.
-                */
-               ".p2align 4\n\t"
-               "4:\n\t"
-               "movq %0, %8\n\t"
-               "movq -8(%1, %0), %4\n\t"
-               "lea -8(%2, %0), %5\n\t"
-               "shrq $3, %8\n\t"
-               "rep movsq\n\t"
-               "movq %4, (%5)\n\t"
-               "jmp 13f\n\t"
-               /*
-                * Handle data backward by movsq.
-                */
-               ".p2align 4\n\t"
-               "7:\n\t"
-               "movq %0, %8\n\t"
-               "movq (%1), %4\n\t"
-               "movq %2, %5\n\t"
-               "leaq -8(%1, %0), %1\n\t"
-               "leaq -8(%2, %0), %2\n\t"
-               "shrq $3, %8\n\t"
-               "std\n\t"
-               "rep movsq\n\t"
-               "cld\n\t"
-               "movq %4, (%5)\n\t"
-               "jmp 13f\n\t"
-
-               /*
-                * Start to prepare for backward copy.
-                */
-               ".p2align 4\n\t"
-               "2:\n\t"
-               "cmp $680, %0\n\t"
-               "jb 6f \n\t"
-               "cmp %%dil, %%sil\n\t"
-               "je 7b \n\t"
-               "6:\n\t"
-               /*
-                * Calculate copy position to tail.
-                */
-               "addq %0, %1\n\t"
-               "addq %0, %2\n\t"
-               "subq $0x20, %0\n\t"
-               /*
-                * We gobble 32byts backward in each loop.
-                */
-               "8:\n\t"
-               "subq $0x20, %0\n\t"
-               "movq -1*8(%1), %4\n\t"
-               "movq -2*8(%1), %5\n\t"
-               "movq -3*8(%1), %6\n\t"
-               "movq -4*8(%1), %7\n\t"
-               "leaq -4*8(%1), %1\n\t"
-
-               "movq %4, -1*8(%2)\n\t"
-               "movq %5, -2*8(%2)\n\t"
-               "movq %6, -3*8(%2)\n\t"
-               "movq %7, -4*8(%2)\n\t"
-               "leaq -4*8(%2), %2\n\t"
-               "jae 8b\n\t"
-               /*
-                * Calculate copy position to head.
-                */
-               "addq $0x20, %0\n\t"
-               "subq %0, %1\n\t"
-               "subq %0, %2\n\t"
-               "1:\n\t"
-               "cmpq $16, %0\n\t"
-               "jb 9f\n\t"
-               /*
-                * Move data from 16 bytes to 31 bytes.
-                */
-               "movq 0*8(%1), %4\n\t"
-               "movq 1*8(%1), %5\n\t"
-               "movq -2*8(%1, %0), %6\n\t"
-               "movq -1*8(%1, %0), %7\n\t"
-               "movq %4, 0*8(%2)\n\t"
-               "movq %5, 1*8(%2)\n\t"
-               "movq %6, -2*8(%2, %0)\n\t"
-               "movq %7, -1*8(%2, %0)\n\t"
-               "jmp 13f\n\t"
-               ".p2align 4\n\t"
-               "9:\n\t"
-               "cmpq $8, %0\n\t"
-               "jb 10f\n\t"
-               /*
-                * Move data from 8 bytes to 15 bytes.
-                */
-               "movq 0*8(%1), %4\n\t"
-               "movq -1*8(%1, %0), %5\n\t"
-               "movq %4, 0*8(%2)\n\t"
-               "movq %5, -1*8(%2, %0)\n\t"
-               "jmp 13f\n\t"
-               "10:\n\t"
-               "cmpq $4, %0\n\t"
-               "jb 11f\n\t"
-               /*
-                * Move data from 4 bytes to 7 bytes.
-                */
-               "movl (%1), %4d\n\t"
-               "movl -4(%1, %0), %5d\n\t"
-               "movl %4d, (%2)\n\t"
-               "movl %5d, -4(%2, %0)\n\t"
-               "jmp 13f\n\t"
-               "11:\n\t"
-               "cmp $2, %0\n\t"
-               "jb 12f\n\t"
-               /*
-                * Move data from 2 bytes to 3 bytes.
-                */
-               "movw (%1), %4w\n\t"
-               "movw -2(%1, %0), %5w\n\t"
-               "movw %4w, (%2)\n\t"
-               "movw %5w, -2(%2, %0)\n\t"
-               "jmp 13f\n\t"
-               "12:\n\t"
-               "cmp $1, %0\n\t"
-               "jb 13f\n\t"
-               /*
-                * Move data for 1 byte.
-                */
-               "movb (%1), %4b\n\t"
-               "movb %4b, (%2)\n\t"
-               "13:\n\t"
-               : "=&d" (d0), "=&S" (d1), "=&D" (d2), "=&a" (ret) ,
-                 "=r"(d3), "=r"(d4), "=r"(d5), "=r"(d6), "=&c" (d7)
-               :"0" (count),
-                "1" (src),
-                "2" (dest)
-               :"memory");
-
-               return ret;
-
-}
-EXPORT_SYMBOL(memmove);
index 41fcf00e49dfc7ca9549ce5e4dd26f360ee81353..67743977398b6485286aaedad76e9b24e4e54230 100644 (file)
 #include <asm/dwarf2.h>
 
 #define save_common_regs \
-       pushq %rdi; \
-       pushq %rsi; \
-       pushq %rcx; \
-       pushq %r8; \
-       pushq %r9; \
-       pushq %r10; \
-       pushq %r11
+       pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \
+       pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \
+       pushq_cfi %rcx; CFI_REL_OFFSET rcx, 0; \
+       pushq_cfi %r8;  CFI_REL_OFFSET r8,  0; \
+       pushq_cfi %r9;  CFI_REL_OFFSET r9,  0; \
+       pushq_cfi %r10; CFI_REL_OFFSET r10, 0; \
+       pushq_cfi %r11; CFI_REL_OFFSET r11, 0
 
 #define restore_common_regs \
-       popq %r11; \
-       popq %r10; \
-       popq %r9; \
-       popq %r8; \
-       popq %rcx; \
-       popq %rsi; \
-       popq %rdi
+       popq_cfi %r11; CFI_RESTORE r11; \
+       popq_cfi %r10; CFI_RESTORE r10; \
+       popq_cfi %r9;  CFI_RESTORE r9; \
+       popq_cfi %r8;  CFI_RESTORE r8; \
+       popq_cfi %rcx; CFI_RESTORE rcx; \
+       popq_cfi %rsi; CFI_RESTORE rsi; \
+       popq_cfi %rdi; CFI_RESTORE rdi
 
 /* Fix up special calling conventions */
 ENTRY(call_rwsem_down_read_failed)
+       CFI_STARTPROC
        save_common_regs
-       pushq %rdx
+       pushq_cfi %rdx
+       CFI_REL_OFFSET rdx, 0
        movq %rax,%rdi
        call rwsem_down_read_failed
-       popq %rdx
+       popq_cfi %rdx
+       CFI_RESTORE rdx
        restore_common_regs
        ret
-       ENDPROC(call_rwsem_down_read_failed)
+       CFI_ENDPROC
+ENDPROC(call_rwsem_down_read_failed)
 
 ENTRY(call_rwsem_down_write_failed)
+       CFI_STARTPROC
        save_common_regs
        movq %rax,%rdi
        call rwsem_down_write_failed
        restore_common_regs
        ret
-       ENDPROC(call_rwsem_down_write_failed)
+       CFI_ENDPROC
+ENDPROC(call_rwsem_down_write_failed)
 
 ENTRY(call_rwsem_wake)
+       CFI_STARTPROC
        decl %edx       /* do nothing if still outstanding active readers */
        jnz 1f
        save_common_regs
@@ -67,15 +74,20 @@ ENTRY(call_rwsem_wake)
        call rwsem_wake
        restore_common_regs
 1:     ret
-       ENDPROC(call_rwsem_wake)
+       CFI_ENDPROC
+ENDPROC(call_rwsem_wake)
 
 /* Fix up special calling conventions */
 ENTRY(call_rwsem_downgrade_wake)
+       CFI_STARTPROC
        save_common_regs
-       pushq %rdx
+       pushq_cfi %rdx
+       CFI_REL_OFFSET rdx, 0
        movq %rax,%rdi
        call rwsem_downgrade_wake
-       popq %rdx
+       popq_cfi %rdx
+       CFI_RESTORE rdx
        restore_common_regs
        ret
-       ENDPROC(call_rwsem_downgrade_wake)
+       CFI_ENDPROC
+ENDPROC(call_rwsem_downgrade_wake)
index 648fe474178234e24d2c688bd44756ba24e75b4e..06691daa4108cacb205d198f40e5f6254f2644d4 100644 (file)
@@ -36,7 +36,7 @@
  */
 #ifdef CONFIG_SMP
 ENTRY(__write_lock_failed)
-       CFI_STARTPROC simple
+       CFI_STARTPROC
        FRAME
 2:     LOCK_PREFIX
        addl    $ RW_LOCK_BIAS,(%eax)
@@ -74,29 +74,23 @@ ENTRY(__read_lock_failed)
 /* Fix up special calling conventions */
 ENTRY(call_rwsem_down_read_failed)
        CFI_STARTPROC
-       push %ecx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ecx
        CFI_REL_OFFSET ecx,0
-       push %edx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %edx
        CFI_REL_OFFSET edx,0
        call rwsem_down_read_failed
-       pop %edx
-       CFI_ADJUST_CFA_OFFSET -4
-       pop %ecx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %edx
+       popl_cfi %ecx
        ret
        CFI_ENDPROC
        ENDPROC(call_rwsem_down_read_failed)
 
 ENTRY(call_rwsem_down_write_failed)
        CFI_STARTPROC
-       push %ecx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ecx
        CFI_REL_OFFSET ecx,0
        calll rwsem_down_write_failed
-       pop %ecx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ecx
        ret
        CFI_ENDPROC
        ENDPROC(call_rwsem_down_write_failed)
@@ -105,12 +99,10 @@ ENTRY(call_rwsem_wake)
        CFI_STARTPROC
        decw %dx    /* do nothing if still outstanding active readers */
        jnz 1f
-       push %ecx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ecx
        CFI_REL_OFFSET ecx,0
        call rwsem_wake
-       pop %ecx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ecx
 1:     ret
        CFI_ENDPROC
        ENDPROC(call_rwsem_wake)
@@ -118,17 +110,13 @@ ENTRY(call_rwsem_wake)
 /* Fix up special calling conventions */
 ENTRY(call_rwsem_downgrade_wake)
        CFI_STARTPROC
-       push %ecx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ecx
        CFI_REL_OFFSET ecx,0
-       push %edx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %edx
        CFI_REL_OFFSET edx,0
        call rwsem_downgrade_wake
-       pop %edx
-       CFI_ADJUST_CFA_OFFSET -4
-       pop %ecx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %edx
+       popl_cfi %ecx
        ret
        CFI_ENDPROC
        ENDPROC(call_rwsem_downgrade_wake)
index 650b11e00ecc9746a521ab61d0aa9b98719cb3d4..2930ae05d77305a3c3f76b821f843203969ffb2b 100644 (file)
@@ -7,24 +7,6 @@
 
        #include <linux/linkage.h>
 
-#define ARCH_TRACE_IRQS_ON                     \
-       pushl %eax;                             \
-       pushl %ecx;                             \
-       pushl %edx;                             \
-       call trace_hardirqs_on;                 \
-       popl %edx;                              \
-       popl %ecx;                              \
-       popl %eax;
-
-#define ARCH_TRACE_IRQS_OFF                    \
-       pushl %eax;                             \
-       pushl %ecx;                             \
-       pushl %edx;                             \
-       call trace_hardirqs_off;                \
-       popl %edx;                              \
-       popl %ecx;                              \
-       popl %eax;
-
 #ifdef CONFIG_TRACE_IRQFLAGS
        /* put return address in eax (arg1) */
        .macro thunk_ra name,func
index bf9a7d5a54288763b35dc355790c00aaa43b637e..782b082c9ff74a3ba62b86f5c07830e9eaadccfb 100644 (file)
        CFI_ENDPROC
        .endm
 
-       /* rdi: arg1 ... normal C conventions. rax is passed from C. */         
-       .macro thunk_retrax name,func
-       .globl \name
-\name: 
-       CFI_STARTPROC
-       SAVE_ARGS
-       call \func
-       jmp  restore_norax
-       CFI_ENDPROC
-       .endm
-       
-
-       .section .sched.text, "ax"
-#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
-       thunk rwsem_down_read_failed_thunk,rwsem_down_read_failed
-       thunk rwsem_down_write_failed_thunk,rwsem_down_write_failed
-       thunk rwsem_wake_thunk,rwsem_wake
-       thunk rwsem_downgrade_thunk,rwsem_downgrade_wake
-#endif 
-       
 #ifdef CONFIG_TRACE_IRQFLAGS
        /* put return address in rdi (arg1) */
        .macro thunk_ra name,func
@@ -72,10 +52,3 @@ restore:
        RESTORE_ARGS
        ret     
        CFI_ENDPROC
-       
-       CFI_STARTPROC
-       SAVE_ARGS
-restore_norax: 
-       RESTORE_ARGS 1
-       ret
-       CFI_ENDPROC
index 09df2f9a3d69ce36a20ec86c82bc9b719d44a1ae..3e608edf99586608235fe366e10171b68a6a01fa 100644 (file)
@@ -25,6 +25,7 @@ obj-$(CONFIG_MMIOTRACE_TEST)  += testmmiotrace.o
 obj-$(CONFIG_NUMA)             += numa.o numa_$(BITS).o
 obj-$(CONFIG_AMD_NUMA)         += amdtopology_64.o
 obj-$(CONFIG_ACPI_NUMA)                += srat_$(BITS).o
+obj-$(CONFIG_NUMA_EMU)         += numa_emulation.o
 
 obj-$(CONFIG_HAVE_MEMBLOCK)            += memblock.o
 
index f21962c435ed78026a41b97d40717a7dc035599c..0919c26820d429e3d9660e0a1ec8852ae0910d27 100644 (file)
@@ -26,9 +26,7 @@
 #include <asm/apic.h>
 #include <asm/amd_nb.h>
 
-static struct bootnode __initdata nodes[8];
 static unsigned char __initdata nodeids[8];
-static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
 
 static __init int find_northbridge(void)
 {
@@ -51,7 +49,7 @@ static __init int find_northbridge(void)
                return num;
        }
 
-       return -1;
+       return -ENOENT;
 }
 
 static __init void early_get_boot_cpu_id(void)
@@ -69,17 +67,18 @@ static __init void early_get_boot_cpu_id(void)
 #endif
 }
 
-int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
+int __init amd_numa_init(void)
 {
-       unsigned long start = PFN_PHYS(start_pfn);
-       unsigned long end = PFN_PHYS(end_pfn);
+       unsigned long start = PFN_PHYS(0);
+       unsigned long end = PFN_PHYS(max_pfn);
        unsigned numnodes;
        unsigned long prevbase;
-       int i, nb, found = 0;
+       int i, j, nb;
        u32 nodeid, reg;
+       unsigned int bits, cores, apicid_base;
 
        if (!early_pci_allowed())
-               return -1;
+               return -EINVAL;
 
        nb = find_northbridge();
        if (nb < 0)
@@ -90,7 +89,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
        reg = read_pci_config(0, nb, 0, 0x60);
        numnodes = ((reg >> 4) & 0xF) + 1;
        if (numnodes <= 1)
-               return -1;
+               return -ENOENT;
 
        pr_info("Number of physical nodes %d\n", numnodes);
 
@@ -121,9 +120,9 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
                if ((base >> 8) & 3 || (limit >> 8) & 3) {
                        pr_err("Node %d using interleaving mode %lx/%lx\n",
                               nodeid, (base >> 8) & 3, (limit >> 8) & 3);
-                       return -1;
+                       return -EINVAL;
                }
-               if (node_isset(nodeid, nodes_parsed)) {
+               if (node_isset(nodeid, numa_nodes_parsed)) {
                        pr_info("Node %d already present, skipping\n",
                                nodeid);
                        continue;
@@ -160,117 +159,28 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
                if (prevbase > base) {
                        pr_err("Node map not sorted %lx,%lx\n",
                               prevbase, base);
-                       return -1;
+                       return -EINVAL;
                }
 
                pr_info("Node %d MemBase %016lx Limit %016lx\n",
                        nodeid, base, limit);
 
-               found++;
-
-               nodes[nodeid].start = base;
-               nodes[nodeid].end = limit;
-
                prevbase = base;
-
-               node_set(nodeid, nodes_parsed);
-       }
-
-       if (!found)
-               return -1;
-       return 0;
-}
-
-#ifdef CONFIG_NUMA_EMU
-static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
-       [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
-};
-
-void __init amd_get_nodes(struct bootnode *physnodes)
-{
-       int i;
-
-       for_each_node_mask(i, nodes_parsed) {
-               physnodes[i].start = nodes[i].start;
-               physnodes[i].end = nodes[i].end;
+               numa_add_memblk(nodeid, base, limit);
+               node_set(nodeid, numa_nodes_parsed);
        }
-}
-
-static int __init find_node_by_addr(unsigned long addr)
-{
-       int ret = NUMA_NO_NODE;
-       int i;
-
-       for (i = 0; i < 8; i++)
-               if (addr >= nodes[i].start && addr < nodes[i].end) {
-                       ret = i;
-                       break;
-               }
-       return ret;
-}
 
-/*
- * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be
- * setup to represent the physical topology but reflect the emulated
- * environment.  For each emulated node, the real node which it appears on is
- * found and a fake pxm to nid mapping is created which mirrors the actual
- * locality.  node_distance() then represents the correct distances between
- * emulated nodes by using the fake acpi mappings to pxms.
- */
-void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
-{
-       unsigned int bits;
-       unsigned int cores;
-       unsigned int apicid_base = 0;
-       int i;
+       if (!nodes_weight(numa_nodes_parsed))
+               return -ENOENT;
 
+       /*
+        * We seem to have valid NUMA configuration.  Map apicids to nodes
+        * using the coreid bits from early_identify_cpu.
+        */
        bits = boot_cpu_data.x86_coreid_bits;
        cores = 1 << bits;
-       early_get_boot_cpu_id();
-       if (boot_cpu_physical_apicid > 0)
-               apicid_base = boot_cpu_physical_apicid;
-
-       for (i = 0; i < nr_nodes; i++) {
-               int index;
-               int nid;
-               int j;
-
-               nid = find_node_by_addr(nodes[i].start);
-               if (nid == NUMA_NO_NODE)
-                       continue;
-
-               index = nodeids[nid] << bits;
-               if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE)
-                       for (j = apicid_base; j < cores + apicid_base; j++)
-                               fake_apicid_to_node[index + j] = i;
-#ifdef CONFIG_ACPI_NUMA
-               __acpi_map_pxm_to_node(nid, i);
-#endif
-       }
-       memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
-}
-#endif /* CONFIG_NUMA_EMU */
-
-int __init amd_scan_nodes(void)
-{
-       unsigned int bits;
-       unsigned int cores;
-       unsigned int apicid_base;
-       int i;
-
-       BUG_ON(nodes_empty(nodes_parsed));
-       node_possible_map = nodes_parsed;
-       memnode_shift = compute_hash_shift(nodes, 8, NULL);
-       if (memnode_shift < 0) {
-               pr_err("No NUMA node hash function found. Contact maintainer\n");
-               return -1;
-       }
-       pr_info("Using node hash shift of %d\n", memnode_shift);
-
-       /* use the coreid bits from early_identify_cpu */
-       bits = boot_cpu_data.x86_coreid_bits;
-       cores = (1<<bits);
        apicid_base = 0;
+
        /* get the APIC ID of the BSP early for systems with apicid lifting */
        early_get_boot_cpu_id();
        if (boot_cpu_physical_apicid > 0) {
@@ -278,17 +188,9 @@ int __init amd_scan_nodes(void)
                apicid_base = boot_cpu_physical_apicid;
        }
 
-       for_each_node_mask(i, node_possible_map) {
-               int j;
-
-               memblock_x86_register_active_regions(i,
-                               nodes[i].start >> PAGE_SHIFT,
-                               nodes[i].end >> PAGE_SHIFT);
+       for_each_node_mask(i, numa_nodes_parsed)
                for (j = apicid_base; j < cores + apicid_base; j++)
-                       apicid_to_node[(i << bits) + j] = i;
-               setup_node_bootmem(i, nodes[i].start, nodes[i].end);
-       }
+                       set_apicid_to_node((i << bits) + j, i);
 
-       numa_init_array();
        return 0;
 }
index 7d90ceb882a41ec55f0d8aea7b2c40cd806afd81..20e3f8702d1e5701b130d2891e23bf39fbe81663 100644 (file)
@@ -229,15 +229,14 @@ void vmalloc_sync_all(void)
        for (address = VMALLOC_START & PMD_MASK;
             address >= TASK_SIZE && address < FIXADDR_TOP;
             address += PMD_SIZE) {
-
-               unsigned long flags;
                struct page *page;
 
-               spin_lock_irqsave(&pgd_lock, flags);
+               spin_lock(&pgd_lock);
                list_for_each_entry(page, &pgd_list, lru) {
                        spinlock_t *pgt_lock;
                        pmd_t *ret;
 
+                       /* the pgt_lock only for Xen */
                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 
                        spin_lock(pgt_lock);
@@ -247,7 +246,7 @@ void vmalloc_sync_all(void)
                        if (!ret)
                                break;
                }
-               spin_unlock_irqrestore(&pgd_lock, flags);
+               spin_unlock(&pgd_lock);
        }
 }
 
@@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
               unsigned long address, unsigned int fault)
 {
        if (fault & VM_FAULT_OOM) {
+               /* Kernel mode? Handle exceptions or die: */
+               if (!(error_code & PF_USER)) {
+                       up_read(&current->mm->mmap_sem);
+                       no_context(regs, error_code, address);
+                       return;
+               }
+
                out_of_memory(regs, error_code, address);
        } else {
                if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
index 947f42abe820eed9e47388ff3fcfd6fc937bb96a..286d289b039b876ef046565d29552827d76f243c 100644 (file)
@@ -18,9 +18,9 @@
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
-unsigned long __initdata e820_table_start;
-unsigned long __meminitdata e820_table_end;
-unsigned long __meminitdata e820_table_top;
+unsigned long __initdata pgt_buf_start;
+unsigned long __meminitdata pgt_buf_end;
+unsigned long __meminitdata pgt_buf_top;
 
 int after_bootmem;
 
@@ -33,7 +33,7 @@ int direct_gbpages
 static void __init find_early_table_space(unsigned long end, int use_pse,
                                          int use_gbpages)
 {
-       unsigned long puds, pmds, ptes, tables, start;
+       unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
        phys_addr_t base;
 
        puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
@@ -65,29 +65,20 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
 #ifdef CONFIG_X86_32
        /* for fixmap */
        tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
-#endif
 
-       /*
-        * RED-PEN putting page tables only on node 0 could
-        * cause a hotspot and fill up ZONE_DMA. The page tables
-        * need roughly 0.5KB per GB.
-        */
-#ifdef CONFIG_X86_32
-       start = 0x7000;
-#else
-       start = 0x8000;
+       good_end = max_pfn_mapped << PAGE_SHIFT;
 #endif
-       base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT,
-                                       tables, PAGE_SIZE);
+
+       base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
        if (base == MEMBLOCK_ERROR)
                panic("Cannot find space for the kernel page tables");
 
-       e820_table_start = base >> PAGE_SHIFT;
-       e820_table_end = e820_table_start;
-       e820_table_top = e820_table_start + (tables >> PAGE_SHIFT);
+       pgt_buf_start = base >> PAGE_SHIFT;
+       pgt_buf_end = pgt_buf_start;
+       pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
 
        printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
-               end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT);
+               end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
 }
 
 struct map_range {
@@ -279,30 +270,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
        load_cr3(swapper_pg_dir);
 #endif
 
-#ifdef CONFIG_X86_64
-       if (!after_bootmem && !start) {
-               pud_t *pud;
-               pmd_t *pmd;
-
-               mmu_cr4_features = read_cr4();
-
-               /*
-                * _brk_end cannot change anymore, but it and _end may be
-                * located on different 2M pages. cleanup_highmap(), however,
-                * can only consider _end when it runs, so destroy any
-                * mappings beyond _brk_end here.
-                */
-               pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
-               pmd = pmd_offset(pud, _brk_end - 1);
-               while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
-                       pmd_clear(pmd);
-       }
-#endif
        __flush_tlb_all();
 
-       if (!after_bootmem && e820_table_end > e820_table_start)
-               memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT,
-                                e820_table_end << PAGE_SHIFT, "PGTABLE");
+       if (!after_bootmem && pgt_buf_end > pgt_buf_start)
+               memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT,
+                                pgt_buf_end << PAGE_SHIFT, "PGTABLE");
 
        if (!after_bootmem)
                early_memtest(start, end);
index c821074b7f0bcfaa0cdf2d25cd78835df099faee..73ad7ebd6e9cf6faa74a77879fc427bfc1ea6382 100644 (file)
@@ -62,10 +62,10 @@ bool __read_mostly __vmalloc_start_set = false;
 
 static __init void *alloc_low_page(void)
 {
-       unsigned long pfn = e820_table_end++;
+       unsigned long pfn = pgt_buf_end++;
        void *adr;
 
-       if (pfn >= e820_table_top)
+       if (pfn >= pgt_buf_top)
                panic("alloc_low_page: ran out of memory");
 
        adr = __va(pfn * PAGE_SIZE);
@@ -163,8 +163,8 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
        if (pmd_idx_kmap_begin != pmd_idx_kmap_end
            && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
            && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
-           && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
-               || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
+           && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
+               || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
                pte_t *newpte;
                int i;
 
@@ -644,8 +644,7 @@ void __init find_low_pfn_range(void)
 }
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
-void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
-                               int acpi, int k8)
+void __init initmem_init(void)
 {
 #ifdef CONFIG_HIGHMEM
        highstart_pfn = highend_pfn = max_pfn;
index 71a59296af80779f56d3f31c98f0ce2d790fe373..a08a62cb136e409892701924732d8791bde4cff7 100644 (file)
@@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)
 
        for (address = start; address <= end; address += PGDIR_SIZE) {
                const pgd_t *pgd_ref = pgd_offset_k(address);
-               unsigned long flags;
                struct page *page;
 
                if (pgd_none(*pgd_ref))
                        continue;
 
-               spin_lock_irqsave(&pgd_lock, flags);
+               spin_lock(&pgd_lock);
                list_for_each_entry(page, &pgd_list, lru) {
                        pgd_t *pgd;
                        spinlock_t *pgt_lock;
 
                        pgd = (pgd_t *)page_address(page) + pgd_index(address);
+                       /* the pgt_lock only for Xen */
                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
                        spin_lock(pgt_lock);
 
@@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
 
                        spin_unlock(pgt_lock);
                }
-               spin_unlock_irqrestore(&pgd_lock, flags);
+               spin_unlock(&pgd_lock);
        }
 }
 
@@ -314,7 +314,7 @@ void __init cleanup_highmap(void)
 
 static __ref void *alloc_low_page(unsigned long *phys)
 {
-       unsigned long pfn = e820_table_end++;
+       unsigned long pfn = pgt_buf_end++;
        void *adr;
 
        if (after_bootmem) {
@@ -324,7 +324,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
                return adr;
        }
 
-       if (pfn >= e820_table_top)
+       if (pfn >= pgt_buf_top)
                panic("alloc_low_page: ran out of memory");
 
        adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
@@ -333,12 +333,28 @@ static __ref void *alloc_low_page(unsigned long *phys)
        return adr;
 }
 
+static __ref void *map_low_page(void *virt)
+{
+       void *adr;
+       unsigned long phys, left;
+
+       if (after_bootmem)
+               return virt;
+
+       phys = __pa(virt);
+       left = phys & (PAGE_SIZE - 1);
+       adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
+       adr = (void *)(((unsigned long)adr) | left);
+
+       return adr;
+}
+
 static __ref void unmap_low_page(void *adr)
 {
        if (after_bootmem)
                return;
 
-       early_iounmap(adr, PAGE_SIZE);
+       early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE);
 }
 
 static unsigned long __meminit
@@ -385,15 +401,6 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
        return last_map_addr;
 }
 
-static unsigned long __meminit
-phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end,
-               pgprot_t prot)
-{
-       pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
-
-       return phys_pte_init(pte, address, end, prot);
-}
-
 static unsigned long __meminit
 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
              unsigned long page_size_mask, pgprot_t prot)
@@ -420,8 +427,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                if (pmd_val(*pmd)) {
                        if (!pmd_large(*pmd)) {
                                spin_lock(&init_mm.page_table_lock);
-                               last_map_addr = phys_pte_update(pmd, address,
+                               pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd));
+                               last_map_addr = phys_pte_init(pte, address,
                                                                end, prot);
+                               unmap_low_page(pte);
                                spin_unlock(&init_mm.page_table_lock);
                                continue;
                        }
@@ -467,18 +476,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
        return last_map_addr;
 }
 
-static unsigned long __meminit
-phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
-               unsigned long page_size_mask, pgprot_t prot)
-{
-       pmd_t *pmd = pmd_offset(pud, 0);
-       unsigned long last_map_addr;
-
-       last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot);
-       __flush_tlb_all();
-       return last_map_addr;
-}
-
 static unsigned long __meminit
 phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                         unsigned long page_size_mask)
@@ -504,8 +501,11 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 
                if (pud_val(*pud)) {
                        if (!pud_large(*pud)) {
-                               last_map_addr = phys_pmd_update(pud, addr, end,
+                               pmd = map_low_page(pmd_offset(pud, 0));
+                               last_map_addr = phys_pmd_init(pmd, addr, end,
                                                         page_size_mask, prot);
+                               unmap_low_page(pmd);
+                               __flush_tlb_all();
                                continue;
                        }
                        /*
@@ -553,17 +553,6 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
        return last_map_addr;
 }
 
-static unsigned long __meminit
-phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
-                unsigned long page_size_mask)
-{
-       pud_t *pud;
-
-       pud = (pud_t *)pgd_page_vaddr(*pgd);
-
-       return phys_pud_init(pud, addr, end, page_size_mask);
-}
-
 unsigned long __meminit
 kernel_physical_mapping_init(unsigned long start,
                             unsigned long end,
@@ -587,8 +576,10 @@ kernel_physical_mapping_init(unsigned long start,
                        next = end;
 
                if (pgd_val(*pgd)) {
-                       last_map_addr = phys_pud_update(pgd, __pa(start),
+                       pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd));
+                       last_map_addr = phys_pud_init(pud, __pa(start),
                                                 __pa(end), page_size_mask);
+                       unmap_low_page(pud);
                        continue;
                }
 
@@ -612,10 +603,9 @@ kernel_physical_mapping_init(unsigned long start,
 }
 
 #ifndef CONFIG_NUMA
-void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
-                               int acpi, int k8)
+void __init initmem_init(void)
 {
-       memblock_x86_register_active_regions(0, start_pfn, end_pfn);
+       memblock_x86_register_active_regions(0, 0, max_pfn);
 }
 #endif
 
index ebf6d7887a38fa54481b6b7645e42bb7c59499a7..9559d360fde79b8373d042fb48e24c98b836684d 100644 (file)
@@ -26,11 +26,49 @@ static __init int numa_setup(char *opt)
 early_param("numa", numa_setup);
 
 /*
- * Which logical CPUs are on which nodes
+ * apicid, cpu, node mappings
  */
+s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
+       [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+};
+
 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
 EXPORT_SYMBOL(node_to_cpumask_map);
 
+/*
+ * Map cpu index to node index
+ */
+DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
+EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
+
+void __cpuinit numa_set_node(int cpu, int node)
+{
+       int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
+
+       /* early setting, no percpu area yet */
+       if (cpu_to_node_map) {
+               cpu_to_node_map[cpu] = node;
+               return;
+       }
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+       if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
+               printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
+               dump_stack();
+               return;
+       }
+#endif
+       per_cpu(x86_cpu_to_node_map, cpu) = node;
+
+       if (node != NUMA_NO_NODE)
+               set_cpu_numa_node(cpu, node);
+}
+
+void __cpuinit numa_clear_node(int cpu)
+{
+       numa_set_node(cpu, NUMA_NO_NODE);
+}
+
 /*
  * Allocate node_to_cpumask_map based on number of available nodes
  * Requires node_possible_map to be valid.
@@ -57,7 +95,174 @@ void __init setup_node_to_cpumask_map(void)
        pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
 }
 
-#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+/*
+ * There are unfortunately some poorly designed mainboards around that
+ * only connect memory to a single CPU. This breaks the 1:1 cpu->node
+ * mapping. To avoid this fill in the mapping for all possible CPUs,
+ * as the number of CPUs is not known yet. We round robin the existing
+ * nodes.
+ */
+void __init numa_init_array(void)
+{
+       int rr, i;
+
+       rr = first_node(node_online_map);
+       for (i = 0; i < nr_cpu_ids; i++) {
+               if (early_cpu_to_node(i) != NUMA_NO_NODE)
+                       continue;
+               numa_set_node(i, rr);
+               rr = next_node(rr, node_online_map);
+               if (rr == MAX_NUMNODES)
+                       rr = first_node(node_online_map);
+       }
+}
+
+static __init int find_near_online_node(int node)
+{
+       int n, val;
+       int min_val = INT_MAX;
+       int best_node = -1;
+
+       for_each_online_node(n) {
+               val = node_distance(node, n);
+
+               if (val < min_val) {
+                       min_val = val;
+                       best_node = n;
+               }
+       }
+
+       return best_node;
+}
+
+/*
+ * Setup early cpu_to_node.
+ *
+ * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
+ * and apicid_to_node[] tables have valid entries for a CPU.
+ * This means we skip cpu_to_node[] initialisation for NUMA
+ * emulation and faking node case (when running a kernel compiled
+ * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
+ * is already initialized in a round robin manner at numa_init_array,
+ * prior to this call, and this initialization is good enough
+ * for the fake NUMA cases.
+ *
+ * Called before the per_cpu areas are setup.
+ */
+void __init init_cpu_to_node(void)
+{
+       int cpu;
+       u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
+
+       BUG_ON(cpu_to_apicid == NULL);
+
+       for_each_possible_cpu(cpu) {
+               int node = numa_cpu_node(cpu);
+
+               if (node == NUMA_NO_NODE)
+                       continue;
+               if (!node_online(node))
+                       node = find_near_online_node(node);
+               numa_set_node(cpu, node);
+       }
+}
+
+#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+
+# ifndef CONFIG_NUMA_EMU
+void __cpuinit numa_add_cpu(int cpu)
+{
+       cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+       cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+}
+# endif        /* !CONFIG_NUMA_EMU */
+
+#else  /* !CONFIG_DEBUG_PER_CPU_MAPS */
+
+int __cpu_to_node(int cpu)
+{
+       if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
+               printk(KERN_WARNING
+                       "cpu_to_node(%d): usage too early!\n", cpu);
+               dump_stack();
+               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+       }
+       return per_cpu(x86_cpu_to_node_map, cpu);
+}
+EXPORT_SYMBOL(__cpu_to_node);
+
+/*
+ * Same function as cpu_to_node() but used if called before the
+ * per_cpu areas are setup.
+ */
+int early_cpu_to_node(int cpu)
+{
+       if (early_per_cpu_ptr(x86_cpu_to_node_map))
+               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+
+       if (!cpu_possible(cpu)) {
+               printk(KERN_WARNING
+                       "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
+               dump_stack();
+               return NUMA_NO_NODE;
+       }
+       return per_cpu(x86_cpu_to_node_map, cpu);
+}
+
+struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
+{
+       int node = early_cpu_to_node(cpu);
+       struct cpumask *mask;
+       char buf[64];
+
+       if (node == NUMA_NO_NODE) {
+               /* early_cpu_to_node() already emits a warning and trace */
+               return NULL;
+       }
+       mask = node_to_cpumask_map[node];
+       if (!mask) {
+               pr_err("node_to_cpumask_map[%i] NULL\n", node);
+               dump_stack();
+               return NULL;
+       }
+
+       cpulist_scnprintf(buf, sizeof(buf), mask);
+       printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+               enable ? "numa_add_cpu" : "numa_remove_cpu",
+               cpu, node, buf);
+       return mask;
+}
+
+# ifndef CONFIG_NUMA_EMU
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+       struct cpumask *mask;
+
+       mask = debug_cpumask_set_cpu(cpu, enable);
+       if (!mask)
+               return;
+
+       if (enable)
+               cpumask_set_cpu(cpu, mask);
+       else
+               cpumask_clear_cpu(cpu, mask);
+}
+
+void __cpuinit numa_add_cpu(int cpu)
+{
+       numa_set_cpumask(cpu, 1);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+       numa_set_cpumask(cpu, 0);
+}
+# endif        /* !CONFIG_NUMA_EMU */
+
 /*
  * Returns a pointer to the bitmask of CPUs on Node 'node'.
  */
@@ -80,4 +285,5 @@ const struct cpumask *cpumask_of_node(int node)
        return node_to_cpumask_map[node];
 }
 EXPORT_SYMBOL(cpumask_of_node);
-#endif
+
+#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
index 84a3e4c9f277d82175b1603dae6b0c9c40dc0aef..bde3906420df74559bd49a52aad171216be52108 100644 (file)
@@ -110,6 +110,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
 
 static unsigned long kva_start_pfn;
 static unsigned long kva_pages;
+
+int __cpuinit numa_cpu_node(int cpu)
+{
+       return apic->x86_32_numa_cpu_node(cpu);
+}
+
 /*
  * FLAT - support for basic PC memory model with discontig enabled, essentially
  *        a single node with all available processors in it with a flat
@@ -346,8 +352,7 @@ static void init_remap_allocator(int nid)
                (ulong) node_remap_end_vaddr[nid]);
 }
 
-void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
-                               int acpi, int k8)
+void __init initmem_init(void)
 {
        int nid;
        long kva_target_pfn;
@@ -361,6 +366,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
         */
 
        get_memcfg_numa();
+       numa_init_array();
 
        kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
 
index 95ea1551eebca344bee38ebc160ba8f3057c1734..9ec0f209a6a4e3fd1a7390d206bfdad33e24c85b 100644 (file)
 #include <linux/module.h>
 #include <linux/nodemask.h>
 #include <linux/sched.h>
+#include <linux/acpi.h>
 
 #include <asm/e820.h>
 #include <asm/proto.h>
 #include <asm/dma.h>
-#include <asm/numa.h>
 #include <asm/acpi.h>
 #include <asm/amd_nb.h>
 
+#include "numa_internal.h"
+
 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 
-struct memnode memnode;
+nodemask_t numa_nodes_parsed __initdata;
 
-s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
-       [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
-};
+struct memnode memnode;
 
 static unsigned long __initdata nodemap_addr;
 static unsigned long __initdata nodemap_size;
 
-/*
- * Map cpu index to node index
- */
-DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
+static struct numa_meminfo numa_meminfo __initdata;
+
+static int numa_distance_cnt;
+static u8 *numa_distance;
 
 /*
  * Given a shift value, try to populate memnodemap[]
@@ -46,16 +45,15 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
  * 0 if memnodmap[] too small (of shift too small)
  * -1 if node overlap or lost ram (shift too big)
  */
-static int __init populate_memnodemap(const struct bootnode *nodes,
-                                     int numnodes, int shift, int *nodeids)
+static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift)
 {
        unsigned long addr, end;
        int i, res = -1;
 
        memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
-       for (i = 0; i < numnodes; i++) {
-               addr = nodes[i].start;
-               end = nodes[i].end;
+       for (i = 0; i < mi->nr_blks; i++) {
+               addr = mi->blk[i].start;
+               end = mi->blk[i].end;
                if (addr >= end)
                        continue;
                if ((end >> shift) >= memnodemapsize)
@@ -63,12 +61,7 @@ static int __init populate_memnodemap(const struct bootnode *nodes,
                do {
                        if (memnodemap[addr >> shift] != NUMA_NO_NODE)
                                return -1;
-
-                       if (!nodeids)
-                               memnodemap[addr >> shift] = i;
-                       else
-                               memnodemap[addr >> shift] = nodeids[i];
-
+                       memnodemap[addr >> shift] = mi->blk[i].nid;
                        addr += (1UL << shift);
                } while (addr < end);
                res = 1;
@@ -86,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void)
 
        addr = 0x8000;
        nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
-       nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT,
+       nodemap_addr = memblock_find_in_range(addr, get_max_mapped(),
                                      nodemap_size, L1_CACHE_BYTES);
        if (nodemap_addr == MEMBLOCK_ERROR) {
                printk(KERN_ERR
@@ -106,16 +99,15 @@ static int __init allocate_cachealigned_memnodemap(void)
  * The LSB of all start and end addresses in the node map is the value of the
  * maximum possible shift.
  */
-static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
-                                        int numnodes)
+static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi)
 {
        int i, nodes_used = 0;
        unsigned long start, end;
        unsigned long bitfield = 0, memtop = 0;
 
-       for (i = 0; i < numnodes; i++) {
-               start = nodes[i].start;
-               end = nodes[i].end;
+       for (i = 0; i < mi->nr_blks; i++) {
+               start = mi->blk[i].start;
+               end = mi->blk[i].end;
                if (start >= end)
                        continue;
                bitfield |= start;
@@ -131,18 +123,17 @@ static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
        return i;
 }
 
-int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
-                             int *nodeids)
+static int __init compute_hash_shift(const struct numa_meminfo *mi)
 {
        int shift;
 
-       shift = extract_lsb_from_nodes(nodes, numnodes);
+       shift = extract_lsb_from_nodes(mi);
        if (allocate_cachealigned_memnodemap())
                return -1;
        printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
                shift);
 
-       if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
+       if (populate_memnodemap(mi, shift) != 1) {
                printk(KERN_INFO "Your memory is not aligned you need to "
                       "rebuild your kernel with a bigger NODEMAPSIZE "
                       "shift=%d\n", shift);
@@ -188,6 +179,63 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
        return NULL;
 }
 
+static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
+                                    struct numa_meminfo *mi)
+{
+       /* ignore zero length blks */
+       if (start == end)
+               return 0;
+
+       /* whine about and ignore invalid blks */
+       if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
+               pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
+                          nid, start, end);
+               return 0;
+       }
+
+       if (mi->nr_blks >= NR_NODE_MEMBLKS) {
+               pr_err("NUMA: too many memblk ranges\n");
+               return -EINVAL;
+       }
+
+       mi->blk[mi->nr_blks].start = start;
+       mi->blk[mi->nr_blks].end = end;
+       mi->blk[mi->nr_blks].nid = nid;
+       mi->nr_blks++;
+       return 0;
+}
+
+/**
+ * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
+ * @idx: Index of memblk to remove
+ * @mi: numa_meminfo to remove memblk from
+ *
+ * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
+ * decrementing @mi->nr_blks.
+ */
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
+{
+       mi->nr_blks--;
+       memmove(&mi->blk[idx], &mi->blk[idx + 1],
+               (mi->nr_blks - idx) * sizeof(mi->blk[0]));
+}
+
+/**
+ * numa_add_memblk - Add one numa_memblk to numa_meminfo
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @end: End address of the new memblk
+ *
+ * Add a new memblk to the default numa_meminfo.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+       return numa_add_memblk_to(nid, start, end, &numa_meminfo);
+}
+
 /* Initialize bootmem allocator for a node */
 void __init
 setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
@@ -234,696 +282,386 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
        node_set_online(nodeid);
 }
 
-/*
- * There are unfortunately some poorly designed mainboards around that
- * only connect memory to a single CPU. This breaks the 1:1 cpu->node
- * mapping. To avoid this fill in the mapping for all possible CPUs,
- * as the number of CPUs is not known yet. We round robin the existing
- * nodes.
+/**
+ * numa_cleanup_meminfo - Cleanup a numa_meminfo
+ * @mi: numa_meminfo to clean up
+ *
+ * Sanitize @mi by merging and removing unncessary memblks.  Also check for
+ * conflicts and clear unused memblks.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
  */
-void __init numa_init_array(void)
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
 {
-       int rr, i;
+       const u64 low = 0;
+       const u64 high = (u64)max_pfn << PAGE_SHIFT;
+       int i, j, k;
 
-       rr = first_node(node_online_map);
-       for (i = 0; i < nr_cpu_ids; i++) {
-               if (early_cpu_to_node(i) != NUMA_NO_NODE)
-                       continue;
-               numa_set_node(i, rr);
-               rr = next_node(rr, node_online_map);
-               if (rr == MAX_NUMNODES)
-                       rr = first_node(node_online_map);
-       }
-}
-
-#ifdef CONFIG_NUMA_EMU
-/* Numa emulation */
-static struct bootnode nodes[MAX_NUMNODES] __initdata;
-static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
-static char *cmdline __initdata;
+       for (i = 0; i < mi->nr_blks; i++) {
+               struct numa_memblk *bi = &mi->blk[i];
 
-void __init numa_emu_cmdline(char *str)
-{
-       cmdline = str;
-}
+               /* make sure all blocks are inside the limits */
+               bi->start = max(bi->start, low);
+               bi->end = min(bi->end, high);
 
-static int __init setup_physnodes(unsigned long start, unsigned long end,
-                                       int acpi, int amd)
-{
-       int ret = 0;
-       int i;
-
-       memset(physnodes, 0, sizeof(physnodes));
-#ifdef CONFIG_ACPI_NUMA
-       if (acpi)
-               acpi_get_nodes(physnodes, start, end);
-#endif
-#ifdef CONFIG_AMD_NUMA
-       if (amd)
-               amd_get_nodes(physnodes);
-#endif
-       /*
-        * Basic sanity checking on the physical node map: there may be errors
-        * if the SRAT or AMD code incorrectly reported the topology or the mem=
-        * kernel parameter is used.
-        */
-       for (i = 0; i < MAX_NUMNODES; i++) {
-               if (physnodes[i].start == physnodes[i].end)
-                       continue;
-               if (physnodes[i].start > end) {
-                       physnodes[i].end = physnodes[i].start;
-                       continue;
-               }
-               if (physnodes[i].end < start) {
-                       physnodes[i].start = physnodes[i].end;
+               /* and there's no empty block */
+               if (bi->start == bi->end) {
+                       numa_remove_memblk_from(i--, mi);
                        continue;
                }
-               if (physnodes[i].start < start)
-                       physnodes[i].start = start;
-               if (physnodes[i].end > end)
-                       physnodes[i].end = end;
-               ret++;
-       }
-
-       /*
-        * If no physical topology was detected, a single node is faked to cover
-        * the entire address space.
-        */
-       if (!ret) {
-               physnodes[ret].start = start;
-               physnodes[ret].end = end;
-               ret = 1;
-       }
-       return ret;
-}
-
-static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
-{
-       int i;
-
-       BUG_ON(acpi && amd);
-#ifdef CONFIG_ACPI_NUMA
-       if (acpi)
-               acpi_fake_nodes(nodes, nr_nodes);
-#endif
-#ifdef CONFIG_AMD_NUMA
-       if (amd)
-               amd_fake_nodes(nodes, nr_nodes);
-#endif
-       if (!acpi && !amd)
-               for (i = 0; i < nr_cpu_ids; i++)
-                       numa_set_node(i, 0);
-}
-
-/*
- * Setups up nid to range from addr to addr + size.  If the end
- * boundary is greater than max_addr, then max_addr is used instead.
- * The return value is 0 if there is additional memory left for
- * allocation past addr and -1 otherwise.  addr is adjusted to be at
- * the end of the node.
- */
-static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
-{
-       int ret = 0;
-       nodes[nid].start = *addr;
-       *addr += size;
-       if (*addr >= max_addr) {
-               *addr = max_addr;
-               ret = -1;
-       }
-       nodes[nid].end = *addr;
-       node_set(nid, node_possible_map);
-       printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
-              nodes[nid].start, nodes[nid].end,
-              (nodes[nid].end - nodes[nid].start) >> 20);
-       return ret;
-}
-
-/*
- * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
- * to max_addr.  The return value is the number of nodes allocated.
- */
-static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
-{
-       nodemask_t physnode_mask = NODE_MASK_NONE;
-       u64 size;
-       int big;
-       int ret = 0;
-       int i;
-
-       if (nr_nodes <= 0)
-               return -1;
-       if (nr_nodes > MAX_NUMNODES) {
-               pr_info("numa=fake=%d too large, reducing to %d\n",
-                       nr_nodes, MAX_NUMNODES);
-               nr_nodes = MAX_NUMNODES;
-       }
-
-       size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
-       /*
-        * Calculate the number of big nodes that can be allocated as a result
-        * of consolidating the remainder.
-        */
-       big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
-               FAKE_NODE_MIN_SIZE;
-
-       size &= FAKE_NODE_MIN_HASH_MASK;
-       if (!size) {
-               pr_err("Not enough memory for each node.  "
-                       "NUMA emulation disabled.\n");
-               return -1;
-       }
 
-       for (i = 0; i < MAX_NUMNODES; i++)
-               if (physnodes[i].start != physnodes[i].end)
-                       node_set(i, physnode_mask);
-
-       /*
-        * Continue to fill physical nodes with fake nodes until there is no
-        * memory left on any of them.
-        */
-       while (nodes_weight(physnode_mask)) {
-               for_each_node_mask(i, physnode_mask) {
-                       u64 end = physnodes[i].start + size;
-                       u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
-
-                       if (ret < big)
-                               end += FAKE_NODE_MIN_SIZE;
+               for (j = i + 1; j < mi->nr_blks; j++) {
+                       struct numa_memblk *bj = &mi->blk[j];
+                       unsigned long start, end;
 
                        /*
-                        * Continue to add memory to this fake node if its
-                        * non-reserved memory is less than the per-node size.
+                        * See whether there are overlapping blocks.  Whine
+                        * about but allow overlaps of the same nid.  They
+                        * will be merged below.
                         */
-                       while (end - physnodes[i].start -
-                               memblock_x86_hole_size(physnodes[i].start, end) < size) {
-                               end += FAKE_NODE_MIN_SIZE;
-                               if (end > physnodes[i].end) {
-                                       end = physnodes[i].end;
-                                       break;
+                       if (bi->end > bj->start && bi->start < bj->end) {
+                               if (bi->nid != bj->nid) {
+                                       pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
+                                              bi->nid, bi->start, bi->end,
+                                              bj->nid, bj->start, bj->end);
+                                       return -EINVAL;
                                }
+                               pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
+                                          bi->nid, bi->start, bi->end,
+                                          bj->start, bj->end);
                        }
 
                        /*
-                        * If there won't be at least FAKE_NODE_MIN_SIZE of
-                        * non-reserved memory in ZONE_DMA32 for the next node,
-                        * this one must extend to the boundary.
+                        * Join together blocks on the same node, holes
+                        * between which don't overlap with memory on other
+                        * nodes.
                         */
-                       if (end < dma32_end && dma32_end - end -
-                           memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
-                               end = dma32_end;
-
-                       /*
-                        * If there won't be enough non-reserved memory for the
-                        * next node, this one must extend to the end of the
-                        * physical node.
-                        */
-                       if (physnodes[i].end - end -
-                           memblock_x86_hole_size(end, physnodes[i].end) < size)
-                               end = physnodes[i].end;
-
-                       /*
-                        * Avoid allocating more nodes than requested, which can
-                        * happen as a result of rounding down each node's size
-                        * to FAKE_NODE_MIN_SIZE.
-                        */
-                       if (nodes_weight(physnode_mask) + ret >= nr_nodes)
-                               end = physnodes[i].end;
-
-                       if (setup_node_range(ret++, &physnodes[i].start,
-                                               end - physnodes[i].start,
-                                               physnodes[i].end) < 0)
-                               node_clear(i, physnode_mask);
+                       if (bi->nid != bj->nid)
+                               continue;
+                       start = max(min(bi->start, bj->start), low);
+                       end = min(max(bi->end, bj->end), high);
+                       for (k = 0; k < mi->nr_blks; k++) {
+                               struct numa_memblk *bk = &mi->blk[k];
+
+                               if (bi->nid == bk->nid)
+                                       continue;
+                               if (start < bk->end && end > bk->start)
+                                       break;
+                       }
+                       if (k < mi->nr_blks)
+                               continue;
+                       printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
+                              bi->nid, bi->start, bi->end, bj->start, bj->end,
+                              start, end);
+                       bi->start = start;
+                       bi->end = end;
+                       numa_remove_memblk_from(j--, mi);
                }
        }
-       return ret;
-}
 
-/*
- * Returns the end address of a node so that there is at least `size' amount of
- * non-reserved memory or `max_addr' is reached.
- */
-static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
-{
-       u64 end = start + size;
-
-       while (end - start - memblock_x86_hole_size(start, end) < size) {
-               end += FAKE_NODE_MIN_SIZE;
-               if (end > max_addr) {
-                       end = max_addr;
-                       break;
-               }
+       for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
+               mi->blk[i].start = mi->blk[i].end = 0;
+               mi->blk[i].nid = NUMA_NO_NODE;
        }
-       return end;
+
+       return 0;
 }
 
 /*
- * Sets up fake nodes of `size' interleaved over physical nodes ranging from
- * `addr' to `max_addr'.  The return value is the number of nodes allocated.
+ * Set nodes, which have memory in @mi, in *@nodemask.
  */
-static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
+static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
+                                             const struct numa_meminfo *mi)
 {
-       nodemask_t physnode_mask = NODE_MASK_NONE;
-       u64 min_size;
-       int ret = 0;
        int i;
 
-       if (!size)
-               return -1;
-       /*
-        * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
-        * increased accordingly if the requested size is too small.  This
-        * creates a uniform distribution of node sizes across the entire
-        * machine (but not necessarily over physical nodes).
-        */
-       min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
-                                               MAX_NUMNODES;
-       min_size = max(min_size, FAKE_NODE_MIN_SIZE);
-       if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
-               min_size = (min_size + FAKE_NODE_MIN_SIZE) &
-                                               FAKE_NODE_MIN_HASH_MASK;
-       if (size < min_size) {
-               pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
-                       size >> 20, min_size >> 20);
-               size = min_size;
-       }
-       size &= FAKE_NODE_MIN_HASH_MASK;
-
-       for (i = 0; i < MAX_NUMNODES; i++)
-               if (physnodes[i].start != physnodes[i].end)
-                       node_set(i, physnode_mask);
-       /*
-        * Fill physical nodes with fake nodes of size until there is no memory
-        * left on any of them.
-        */
-       while (nodes_weight(physnode_mask)) {
-               for_each_node_mask(i, physnode_mask) {
-                       u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
-                       u64 end;
-
-                       end = find_end_of_node(physnodes[i].start,
-                                               physnodes[i].end, size);
-                       /*
-                        * If there won't be at least FAKE_NODE_MIN_SIZE of
-                        * non-reserved memory in ZONE_DMA32 for the next node,
-                        * this one must extend to the boundary.
-                        */
-                       if (end < dma32_end && dma32_end - end -
-                           memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
-                               end = dma32_end;
+       for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
+               if (mi->blk[i].start != mi->blk[i].end &&
+                   mi->blk[i].nid != NUMA_NO_NODE)
+                       node_set(mi->blk[i].nid, *nodemask);
+}
 
-                       /*
-                        * If there won't be enough non-reserved memory for the
-                        * next node, this one must extend to the end of the
-                        * physical node.
-                        */
-                       if (physnodes[i].end - end -
-                           memblock_x86_hole_size(end, physnodes[i].end) < size)
-                               end = physnodes[i].end;
+/**
+ * numa_reset_distance - Reset NUMA distance table
+ *
+ * The current table is freed.  The next numa_set_distance() call will
+ * create a new one.
+ */
+void __init numa_reset_distance(void)
+{
+       size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
 
-                       /*
-                        * Setup the fake node that will be allocated as bootmem
-                        * later.  If setup_node_range() returns non-zero, there
-                        * is no more memory available on this physical node.
-                        */
-                       if (setup_node_range(ret++, &physnodes[i].start,
-                                               end - physnodes[i].start,
-                                               physnodes[i].end) < 0)
-                               node_clear(i, physnode_mask);
-               }
-       }
-       return ret;
+       /* numa_distance could be 1LU marking allocation failure, test cnt */
+       if (numa_distance_cnt)
+               memblock_x86_free_range(__pa(numa_distance),
+                                       __pa(numa_distance) + size);
+       numa_distance_cnt = 0;
+       numa_distance = NULL;   /* enable table creation */
 }
 
-/*
- * Sets up the system RAM area from start_pfn to last_pfn according to the
- * numa=fake command-line option.
- */
-static int __init numa_emulation(unsigned long start_pfn,
-                       unsigned long last_pfn, int acpi, int amd)
+static int __init numa_alloc_distance(void)
 {
-       u64 addr = start_pfn << PAGE_SHIFT;
-       u64 max_addr = last_pfn << PAGE_SHIFT;
-       int num_nodes;
-       int i;
+       nodemask_t nodes_parsed;
+       size_t size;
+       int i, j, cnt = 0;
+       u64 phys;
 
-       /*
-        * If the numa=fake command-line contains a 'M' or 'G', it represents
-        * the fixed node size.  Otherwise, if it is just a single number N,
-        * split the system RAM into N fake nodes.
-        */
-       if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) {
-               u64 size;
+       /* size the new table and allocate it */
+       nodes_parsed = numa_nodes_parsed;
+       numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
 
-               size = memparse(cmdline, &cmdline);
-               num_nodes = split_nodes_size_interleave(addr, max_addr, size);
-       } else {
-               unsigned long n;
+       for_each_node_mask(i, nodes_parsed)
+               cnt = i;
+       cnt++;
+       size = cnt * cnt * sizeof(numa_distance[0]);
 
-               n = simple_strtoul(cmdline, NULL, 0);
-               num_nodes = split_nodes_interleave(addr, max_addr, n);
+       phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT,
+                                     size, PAGE_SIZE);
+       if (phys == MEMBLOCK_ERROR) {
+               pr_warning("NUMA: Warning: can't allocate distance table!\n");
+               /* don't retry until explicitly reset */
+               numa_distance = (void *)1LU;
+               return -ENOMEM;
        }
+       memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
 
-       if (num_nodes < 0)
-               return num_nodes;
-       memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
-       if (memnode_shift < 0) {
-               memnode_shift = 0;
-               printk(KERN_ERR "No NUMA hash function found.  NUMA emulation "
-                      "disabled.\n");
-               return -1;
-       }
+       numa_distance = __va(phys);
+       numa_distance_cnt = cnt;
+
+       /* fill with the default distances */
+       for (i = 0; i < cnt; i++)
+               for (j = 0; j < cnt; j++)
+                       numa_distance[i * cnt + j] = i == j ?
+                               LOCAL_DISTANCE : REMOTE_DISTANCE;
+       printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
 
-       /*
-        * We need to vacate all active ranges that may have been registered for
-        * the e820 memory map.
-        */
-       remove_all_active_ranges();
-       for_each_node_mask(i, node_possible_map) {
-               memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
-                                               nodes[i].end >> PAGE_SHIFT);
-               setup_node_bootmem(i, nodes[i].start, nodes[i].end);
-       }
-       setup_physnodes(addr, max_addr, acpi, amd);
-       fake_physnodes(acpi, amd, num_nodes);
-       numa_init_array();
        return 0;
 }
-#endif /* CONFIG_NUMA_EMU */
 
-void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
-                               int acpi, int amd)
+/**
+ * numa_set_distance - Set NUMA distance from one NUMA to another
+ * @from: the 'from' node to set distance
+ * @to: the 'to'  node to set distance
+ * @distance: NUMA distance
+ *
+ * Set the distance from node @from to @to to @distance.  If distance table
+ * doesn't exist, one which is large enough to accomodate all the currently
+ * known nodes will be created.
+ *
+ * If such table cannot be allocated, a warning is printed and further
+ * calls are ignored until the distance table is reset with
+ * numa_reset_distance().
+ *
+ * If @from or @to is higher than the highest known node at the time of
+ * table creation or @distance doesn't make sense, the call is ignored.
+ * This is to allow simplification of specific NUMA config implementations.
+ */
+void __init numa_set_distance(int from, int to, int distance)
 {
-       int i;
-
-       nodes_clear(node_possible_map);
-       nodes_clear(node_online_map);
-
-#ifdef CONFIG_NUMA_EMU
-       setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
-                       acpi, amd);
-       if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
+       if (!numa_distance && numa_alloc_distance() < 0)
                return;
-       setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
-                       acpi, amd);
-       nodes_clear(node_possible_map);
-       nodes_clear(node_online_map);
-#endif
 
-#ifdef CONFIG_ACPI_NUMA
-       if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
-                                                 last_pfn << PAGE_SHIFT))
+       if (from >= numa_distance_cnt || to >= numa_distance_cnt) {
+               printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n",
+                           from, to, distance);
                return;
-       nodes_clear(node_possible_map);
-       nodes_clear(node_online_map);
-#endif
+       }
 
-#ifdef CONFIG_AMD_NUMA
-       if (!numa_off && amd && !amd_scan_nodes())
+       if ((u8)distance != distance ||
+           (from == to && distance != LOCAL_DISTANCE)) {
+               pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+                            from, to, distance);
                return;
-       nodes_clear(node_possible_map);
-       nodes_clear(node_online_map);
-#endif
-       printk(KERN_INFO "%s\n",
-              numa_off ? "NUMA turned off" : "No NUMA configuration found");
+       }
 
-       printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
-              start_pfn << PAGE_SHIFT,
-              last_pfn << PAGE_SHIFT);
-       /* setup dummy node covering all memory */
-       memnode_shift = 63;
-       memnodemap = memnode.embedded_map;
-       memnodemap[0] = 0;
-       node_set_online(0);
-       node_set(0, node_possible_map);
-       for (i = 0; i < nr_cpu_ids; i++)
-               numa_set_node(i, 0);
-       memblock_x86_register_active_regions(0, start_pfn, last_pfn);
-       setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
+       numa_distance[from * numa_distance_cnt + to] = distance;
 }
 
-unsigned long __init numa_free_all_bootmem(void)
+int __node_distance(int from, int to)
 {
-       unsigned long pages = 0;
-       int i;
+       if (from >= numa_distance_cnt || to >= numa_distance_cnt)
+               return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
+       return numa_distance[from * numa_distance_cnt + to];
+}
+EXPORT_SYMBOL(__node_distance);
 
-       for_each_online_node(i)
-               pages += free_all_bootmem_node(NODE_DATA(i));
+/*
+ * Sanity check to catch more bad NUMA configurations (they are amazingly
+ * common).  Make sure the nodes cover all memory.
+ */
+static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
+{
+       unsigned long numaram, e820ram;
+       int i;
 
-       pages += free_all_memory_core_early(MAX_NUMNODES);
+       numaram = 0;
+       for (i = 0; i < mi->nr_blks; i++) {
+               unsigned long s = mi->blk[i].start >> PAGE_SHIFT;
+               unsigned long e = mi->blk[i].end >> PAGE_SHIFT;
+               numaram += e - s;
+               numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
+               if ((long)numaram < 0)
+                       numaram = 0;
+       }
 
-       return pages;
+       e820ram = max_pfn - (memblock_x86_hole_size(0,
+                                       max_pfn << PAGE_SHIFT) >> PAGE_SHIFT);
+       /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
+       if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
+               printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
+                      (numaram << PAGE_SHIFT) >> 20,
+                      (e820ram << PAGE_SHIFT) >> 20);
+               return false;
+       }
+       return true;
 }
 
-#ifdef CONFIG_NUMA
-
-static __init int find_near_online_node(int node)
+static int __init numa_register_memblks(struct numa_meminfo *mi)
 {
-       int n, val;
-       int min_val = INT_MAX;
-       int best_node = -1;
+       int i, nid;
 
-       for_each_online_node(n) {
-               val = node_distance(node, n);
+       /* Account for nodes with cpus and no memory */
+       node_possible_map = numa_nodes_parsed;
+       numa_nodemask_from_meminfo(&node_possible_map, mi);
+       if (WARN_ON(nodes_empty(node_possible_map)))
+               return -EINVAL;
+
+       memnode_shift = compute_hash_shift(mi);
+       if (memnode_shift < 0) {
+               printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n");
+               return -EINVAL;
+       }
 
-               if (val < min_val) {
-                       min_val = val;
-                       best_node = n;
+       for (i = 0; i < mi->nr_blks; i++)
+               memblock_x86_register_active_regions(mi->blk[i].nid,
+                                       mi->blk[i].start >> PAGE_SHIFT,
+                                       mi->blk[i].end >> PAGE_SHIFT);
+
+       /* for out of order entries */
+       sort_node_map();
+       if (!numa_meminfo_cover_memory(mi))
+               return -EINVAL;
+
+       /* Finally register nodes. */
+       for_each_node_mask(nid, node_possible_map) {
+               u64 start = (u64)max_pfn << PAGE_SHIFT;
+               u64 end = 0;
+
+               for (i = 0; i < mi->nr_blks; i++) {
+                       if (nid != mi->blk[i].nid)
+                               continue;
+                       start = min(mi->blk[i].start, start);
+                       end = max(mi->blk[i].end, end);
                }
+
+               if (start < end)
+                       setup_node_bootmem(nid, start, end);
        }
 
-       return best_node;
+       return 0;
 }
 
-/*
- * Setup early cpu_to_node.
+/**
+ * dummy_numma_init - Fallback dummy NUMA init
  *
- * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
- * and apicid_to_node[] tables have valid entries for a CPU.
- * This means we skip cpu_to_node[] initialisation for NUMA
- * emulation and faking node case (when running a kernel compiled
- * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
- * is already initialized in a round robin manner at numa_init_array,
- * prior to this call, and this initialization is good enough
- * for the fake NUMA cases.
+ * Used if there's no underlying NUMA architecture, NUMA initialization
+ * fails, or NUMA is disabled on the command line.
  *
- * Called before the per_cpu areas are setup.
+ * Must online at least one node and add memory blocks that cover all
+ * allowed memory.  This function must not fail.
  */
-void __init init_cpu_to_node(void)
+static int __init dummy_numa_init(void)
 {
-       int cpu;
-       u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
-
-       BUG_ON(cpu_to_apicid == NULL);
+       printk(KERN_INFO "%s\n",
+              numa_off ? "NUMA turned off" : "No NUMA configuration found");
+       printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
+              0LU, max_pfn << PAGE_SHIFT);
 
-       for_each_possible_cpu(cpu) {
-               int node;
-               u16 apicid = cpu_to_apicid[cpu];
+       node_set(0, numa_nodes_parsed);
+       numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT);
 
-               if (apicid == BAD_APICID)
-                       continue;
-               node = apicid_to_node[apicid];
-               if (node == NUMA_NO_NODE)
-                       continue;
-               if (!node_online(node))
-                       node = find_near_online_node(node);
-               numa_set_node(cpu, node);
-       }
+       return 0;
 }
-#endif
 
-
-void __cpuinit numa_set_node(int cpu, int node)
+static int __init numa_init(int (*init_func)(void))
 {
-       int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
-
-       /* early setting, no percpu area yet */
-       if (cpu_to_node_map) {
-               cpu_to_node_map[cpu] = node;
-               return;
-       }
-
-#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-       if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
-               printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
-               dump_stack();
-               return;
-       }
-#endif
-       per_cpu(x86_cpu_to_node_map, cpu) = node;
+       int i;
+       int ret;
 
-       if (node != NUMA_NO_NODE)
-               set_cpu_numa_node(cpu, node);
-}
+       for (i = 0; i < MAX_LOCAL_APIC; i++)
+               set_apicid_to_node(i, NUMA_NO_NODE);
 
-void __cpuinit numa_clear_node(int cpu)
-{
-       numa_set_node(cpu, NUMA_NO_NODE);
-}
+       nodes_clear(numa_nodes_parsed);
+       nodes_clear(node_possible_map);
+       nodes_clear(node_online_map);
+       memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+       remove_all_active_ranges();
+       numa_reset_distance();
 
-#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+       ret = init_func();
+       if (ret < 0)
+               return ret;
+       ret = numa_cleanup_meminfo(&numa_meminfo);
+       if (ret < 0)
+               return ret;
 
-#ifndef CONFIG_NUMA_EMU
-void __cpuinit numa_add_cpu(int cpu)
-{
-       cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
-}
+       numa_emulation(&numa_meminfo, numa_distance_cnt);
 
-void __cpuinit numa_remove_cpu(int cpu)
-{
-       cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
-}
-#else
-void __cpuinit numa_add_cpu(int cpu)
-{
-       unsigned long addr;
-       u16 apicid;
-       int physnid;
-       int nid = NUMA_NO_NODE;
+       ret = numa_register_memblks(&numa_meminfo);
+       if (ret < 0)
+               return ret;
 
-       apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
-       if (apicid != BAD_APICID)
-               nid = apicid_to_node[apicid];
-       if (nid == NUMA_NO_NODE)
-               nid = early_cpu_to_node(cpu);
-       BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
-
-       /*
-        * Use the starting address of the emulated node to find which physical
-        * node it is allocated on.
-        */
-       addr = node_start_pfn(nid) << PAGE_SHIFT;
-       for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
-               if (addr >= physnodes[physnid].start &&
-                   addr < physnodes[physnid].end)
-                       break;
+       for (i = 0; i < nr_cpu_ids; i++) {
+               int nid = early_cpu_to_node(i);
 
-       /*
-        * Map the cpu to each emulated node that is allocated on the physical
-        * node of the cpu's apic id.
-        */
-       for_each_online_node(nid) {
-               addr = node_start_pfn(nid) << PAGE_SHIFT;
-               if (addr >= physnodes[physnid].start &&
-                   addr < physnodes[physnid].end)
-                       cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+               if (nid == NUMA_NO_NODE)
+                       continue;
+               if (!node_online(nid))
+                       numa_clear_node(i);
        }
+       numa_init_array();
+       return 0;
 }
 
-void __cpuinit numa_remove_cpu(int cpu)
+void __init initmem_init(void)
 {
-       int i;
+       int ret;
 
-       for_each_online_node(i)
-               cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
-}
-#endif /* !CONFIG_NUMA_EMU */
-
-#else /* CONFIG_DEBUG_PER_CPU_MAPS */
-static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
-{
-       int node = early_cpu_to_node(cpu);
-       struct cpumask *mask;
-       char buf[64];
-
-       mask = node_to_cpumask_map[node];
-       if (!mask) {
-               pr_err("node_to_cpumask_map[%i] NULL\n", node);
-               dump_stack();
-               return NULL;
+       if (!numa_off) {
+#ifdef CONFIG_ACPI_NUMA
+               ret = numa_init(x86_acpi_numa_init);
+               if (!ret)
+                       return;
+#endif
+#ifdef CONFIG_AMD_NUMA
+               ret = numa_init(amd_numa_init);
+               if (!ret)
+                       return;
+#endif
        }
 
-       cpulist_scnprintf(buf, sizeof(buf), mask);
-       printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
-               enable ? "numa_add_cpu" : "numa_remove_cpu",
-               cpu, node, buf);
-       return mask;
+       numa_init(dummy_numa_init);
 }
 
-/*
- * --------- debug versions of the numa functions ---------
- */
-#ifndef CONFIG_NUMA_EMU
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
-{
-       struct cpumask *mask;
-
-       mask = debug_cpumask_set_cpu(cpu, enable);
-       if (!mask)
-               return;
-
-       if (enable)
-               cpumask_set_cpu(cpu, mask);
-       else
-               cpumask_clear_cpu(cpu, mask);
-}
-#else
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+unsigned long __init numa_free_all_bootmem(void)
 {
-       int node = early_cpu_to_node(cpu);
-       struct cpumask *mask;
+       unsigned long pages = 0;
        int i;
 
-       for_each_online_node(i) {
-               unsigned long addr;
-
-               addr = node_start_pfn(i) << PAGE_SHIFT;
-               if (addr < physnodes[node].start ||
-                                       addr >= physnodes[node].end)
-                       continue;
-               mask = debug_cpumask_set_cpu(cpu, enable);
-               if (!mask)
-                       return;
-
-               if (enable)
-                       cpumask_set_cpu(cpu, mask);
-               else
-                       cpumask_clear_cpu(cpu, mask);
-       }
-}
-#endif /* CONFIG_NUMA_EMU */
+       for_each_online_node(i)
+               pages += free_all_bootmem_node(NODE_DATA(i));
 
-void __cpuinit numa_add_cpu(int cpu)
-{
-       numa_set_cpumask(cpu, 1);
-}
+       pages += free_all_memory_core_early(MAX_NUMNODES);
 
-void __cpuinit numa_remove_cpu(int cpu)
-{
-       numa_set_cpumask(cpu, 0);
+       return pages;
 }
 
-int __cpu_to_node(int cpu)
+int __cpuinit numa_cpu_node(int cpu)
 {
-       if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
-               printk(KERN_WARNING
-                       "cpu_to_node(%d): usage too early!\n", cpu);
-               dump_stack();
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-       }
-       return per_cpu(x86_cpu_to_node_map, cpu);
-}
-EXPORT_SYMBOL(__cpu_to_node);
+       int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
 
-/*
- * Same function as cpu_to_node() but used if called before the
- * per_cpu areas are setup.
- */
-int early_cpu_to_node(int cpu)
-{
-       if (early_per_cpu_ptr(x86_cpu_to_node_map))
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
-       if (!cpu_possible(cpu)) {
-               printk(KERN_WARNING
-                       "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
-               dump_stack();
-               return NUMA_NO_NODE;
-       }
-       return per_cpu(x86_cpu_to_node_map, cpu);
+       if (apicid != BAD_APICID)
+               return __apicid_to_node[apicid];
+       return NUMA_NO_NODE;
 }
-
-/*
- * --------- end of debug versions of the numa functions ---------
- */
-
-#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
new file mode 100644 (file)
index 0000000..ad091e4
--- /dev/null
@@ -0,0 +1,494 @@
+/*
+ * NUMA emulation
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/topology.h>
+#include <linux/memblock.h>
+#include <asm/dma.h>
+
+#include "numa_internal.h"
+
+static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata;
+static char *emu_cmdline __initdata;
+
+void __init numa_emu_cmdline(char *str)
+{
+       emu_cmdline = str;
+}
+
+static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
+{
+       int i;
+
+       for (i = 0; i < mi->nr_blks; i++)
+               if (mi->blk[i].nid == nid)
+                       return i;
+       return -ENOENT;
+}
+
+/*
+ * Sets up nid to range from @start to @end.  The return value is -errno if
+ * something went wrong, 0 otherwise.
+ */
+static int __init emu_setup_memblk(struct numa_meminfo *ei,
+                                  struct numa_meminfo *pi,
+                                  int nid, int phys_blk, u64 size)
+{
+       struct numa_memblk *eb = &ei->blk[ei->nr_blks];
+       struct numa_memblk *pb = &pi->blk[phys_blk];
+
+       if (ei->nr_blks >= NR_NODE_MEMBLKS) {
+               pr_err("NUMA: Too many emulated memblks, failing emulation\n");
+               return -EINVAL;
+       }
+
+       ei->nr_blks++;
+       eb->start = pb->start;
+       eb->end = pb->start + size;
+       eb->nid = nid;
+
+       if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
+               emu_nid_to_phys[nid] = pb->nid;
+
+       pb->start += size;
+       if (pb->start >= pb->end) {
+               WARN_ON_ONCE(pb->start > pb->end);
+               numa_remove_memblk_from(phys_blk, pi);
+       }
+
+       printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
+              eb->start, eb->end, (eb->end - eb->start) >> 20);
+       return 0;
+}
+
+/*
+ * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
+ * to max_addr.  The return value is the number of nodes allocated.
+ */
+static int __init split_nodes_interleave(struct numa_meminfo *ei,
+                                        struct numa_meminfo *pi,
+                                        u64 addr, u64 max_addr, int nr_nodes)
+{
+       nodemask_t physnode_mask = NODE_MASK_NONE;
+       u64 size;
+       int big;
+       int nid = 0;
+       int i, ret;
+
+       if (nr_nodes <= 0)
+               return -1;
+       if (nr_nodes > MAX_NUMNODES) {
+               pr_info("numa=fake=%d too large, reducing to %d\n",
+                       nr_nodes, MAX_NUMNODES);
+               nr_nodes = MAX_NUMNODES;
+       }
+
+       size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
+       /*
+        * Calculate the number of big nodes that can be allocated as a result
+        * of consolidating the remainder.
+        */
+       big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
+               FAKE_NODE_MIN_SIZE;
+
+       size &= FAKE_NODE_MIN_HASH_MASK;
+       if (!size) {
+               pr_err("Not enough memory for each node.  "
+                       "NUMA emulation disabled.\n");
+               return -1;
+       }
+
+       for (i = 0; i < pi->nr_blks; i++)
+               node_set(pi->blk[i].nid, physnode_mask);
+
+       /*
+        * Continue to fill physical nodes with fake nodes until there is no
+        * memory left on any of them.
+        */
+       while (nodes_weight(physnode_mask)) {
+               for_each_node_mask(i, physnode_mask) {
+                       u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
+                       u64 start, limit, end;
+                       int phys_blk;
+
+                       phys_blk = emu_find_memblk_by_nid(i, pi);
+                       if (phys_blk < 0) {
+                               node_clear(i, physnode_mask);
+                               continue;
+                       }
+                       start = pi->blk[phys_blk].start;
+                       limit = pi->blk[phys_blk].end;
+                       end = start + size;
+
+                       if (nid < big)
+                               end += FAKE_NODE_MIN_SIZE;
+
+                       /*
+                        * Continue to add memory to this fake node if its
+                        * non-reserved memory is less than the per-node size.
+                        */
+                       while (end - start -
+                              memblock_x86_hole_size(start, end) < size) {
+                               end += FAKE_NODE_MIN_SIZE;
+                               if (end > limit) {
+                                       end = limit;
+                                       break;
+                               }
+                       }
+
+                       /*
+                        * If there won't be at least FAKE_NODE_MIN_SIZE of
+                        * non-reserved memory in ZONE_DMA32 for the next node,
+                        * this one must extend to the boundary.
+                        */
+                       if (end < dma32_end && dma32_end - end -
+                           memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+                               end = dma32_end;
+
+                       /*
+                        * If there won't be enough non-reserved memory for the
+                        * next node, this one must extend to the end of the
+                        * physical node.
+                        */
+                       if (limit - end -
+                           memblock_x86_hole_size(end, limit) < size)
+                               end = limit;
+
+                       ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
+                                              phys_blk,
+                                              min(end, limit) - start);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+       return 0;
+}
+
+/*
+ * Returns the end address of a node so that there is at least `size' amount of
+ * non-reserved memory or `max_addr' is reached.
+ */
+static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
+{
+       u64 end = start + size;
+
+       while (end - start - memblock_x86_hole_size(start, end) < size) {
+               end += FAKE_NODE_MIN_SIZE;
+               if (end > max_addr) {
+                       end = max_addr;
+                       break;
+               }
+       }
+       return end;
+}
+
+/*
+ * Sets up fake nodes of `size' interleaved over physical nodes ranging from
+ * `addr' to `max_addr'.  The return value is the number of nodes allocated.
+ */
+static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+                                             struct numa_meminfo *pi,
+                                             u64 addr, u64 max_addr, u64 size)
+{
+       nodemask_t physnode_mask = NODE_MASK_NONE;
+       u64 min_size;
+       int nid = 0;
+       int i, ret;
+
+       if (!size)
+               return -1;
+       /*
+        * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
+        * increased accordingly if the requested size is too small.  This
+        * creates a uniform distribution of node sizes across the entire
+        * machine (but not necessarily over physical nodes).
+        */
+       min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
+                                               MAX_NUMNODES;
+       min_size = max(min_size, FAKE_NODE_MIN_SIZE);
+       if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
+               min_size = (min_size + FAKE_NODE_MIN_SIZE) &
+                                               FAKE_NODE_MIN_HASH_MASK;
+       if (size < min_size) {
+               pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
+                       size >> 20, min_size >> 20);
+               size = min_size;
+       }
+       size &= FAKE_NODE_MIN_HASH_MASK;
+
+       for (i = 0; i < pi->nr_blks; i++)
+               node_set(pi->blk[i].nid, physnode_mask);
+
+       /*
+        * Fill physical nodes with fake nodes of size until there is no memory
+        * left on any of them.
+        */
+       while (nodes_weight(physnode_mask)) {
+               for_each_node_mask(i, physnode_mask) {
+                       u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
+                       u64 start, limit, end;
+                       int phys_blk;
+
+                       phys_blk = emu_find_memblk_by_nid(i, pi);
+                       if (phys_blk < 0) {
+                               node_clear(i, physnode_mask);
+                               continue;
+                       }
+                       start = pi->blk[phys_blk].start;
+                       limit = pi->blk[phys_blk].end;
+
+                       end = find_end_of_node(start, limit, size);
+                       /*
+                        * If there won't be at least FAKE_NODE_MIN_SIZE of
+                        * non-reserved memory in ZONE_DMA32 for the next node,
+                        * this one must extend to the boundary.
+                        */
+                       if (end < dma32_end && dma32_end - end -
+                           memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+                               end = dma32_end;
+
+                       /*
+                        * If there won't be enough non-reserved memory for the
+                        * next node, this one must extend to the end of the
+                        * physical node.
+                        */
+                       if (limit - end -
+                           memblock_x86_hole_size(end, limit) < size)
+                               end = limit;
+
+                       ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
+                                              phys_blk,
+                                              min(end, limit) - start);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+       return 0;
+}
+
+/**
+ * numa_emulation - Emulate NUMA nodes
+ * @numa_meminfo: NUMA configuration to massage
+ * @numa_dist_cnt: The size of the physical NUMA distance table
+ *
+ * Emulate NUMA nodes according to the numa=fake kernel parameter.
+ * @numa_meminfo contains the physical memory configuration and is modified
+ * to reflect the emulated configuration on success.  @numa_dist_cnt is
+ * used to determine the size of the physical distance table.
+ *
+ * On success, the following modifications are made.
+ *
+ * - @numa_meminfo is updated to reflect the emulated nodes.
+ *
+ * - __apicid_to_node[] is updated such that APIC IDs are mapped to the
+ *   emulated nodes.
+ *
+ * - NUMA distance table is rebuilt to represent distances between emulated
+ *   nodes.  The distances are determined considering how emulated nodes
+ *   are mapped to physical nodes and match the actual distances.
+ *
+ * - emu_nid_to_phys[] reflects how emulated nodes are mapped to physical
+ *   nodes.  This is used by numa_add_cpu() and numa_remove_cpu().
+ *
+ * If emulation is not enabled or fails, emu_nid_to_phys[] is filled with
+ * identity mapping and no other modification is made.
+ */
+void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
+{
+       static struct numa_meminfo ei __initdata;
+       static struct numa_meminfo pi __initdata;
+       const u64 max_addr = max_pfn << PAGE_SHIFT;
+       u8 *phys_dist = NULL;
+       size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]);
+       int max_emu_nid, dfl_phys_nid;
+       int i, j, ret;
+
+       if (!emu_cmdline)
+               goto no_emu;
+
+       memset(&ei, 0, sizeof(ei));
+       pi = *numa_meminfo;
+
+       for (i = 0; i < MAX_NUMNODES; i++)
+               emu_nid_to_phys[i] = NUMA_NO_NODE;
+
+       /*
+        * If the numa=fake command-line contains a 'M' or 'G', it represents
+        * the fixed node size.  Otherwise, if it is just a single number N,
+        * split the system RAM into N fake nodes.
+        */
+       if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
+               u64 size;
+
+               size = memparse(emu_cmdline, &emu_cmdline);
+               ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size);
+       } else {
+               unsigned long n;
+
+               n = simple_strtoul(emu_cmdline, NULL, 0);
+               ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n);
+       }
+
+       if (ret < 0)
+               goto no_emu;
+
+       if (numa_cleanup_meminfo(&ei) < 0) {
+               pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n");
+               goto no_emu;
+       }
+
+       /* copy the physical distance table */
+       if (numa_dist_cnt) {
+               u64 phys;
+
+               phys = memblock_find_in_range(0,
+                                             (u64)max_pfn_mapped << PAGE_SHIFT,
+                                             phys_size, PAGE_SIZE);
+               if (phys == MEMBLOCK_ERROR) {
+                       pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
+                       goto no_emu;
+               }
+               memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST");
+               phys_dist = __va(phys);
+
+               for (i = 0; i < numa_dist_cnt; i++)
+                       for (j = 0; j < numa_dist_cnt; j++)
+                               phys_dist[i * numa_dist_cnt + j] =
+                                       node_distance(i, j);
+       }
+
+       /*
+        * Determine the max emulated nid and the default phys nid to use
+        * for unmapped nodes.
+        */
+       max_emu_nid = 0;
+       dfl_phys_nid = NUMA_NO_NODE;
+       for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) {
+               if (emu_nid_to_phys[i] != NUMA_NO_NODE) {
+                       max_emu_nid = i;
+                       if (dfl_phys_nid == NUMA_NO_NODE)
+                               dfl_phys_nid = emu_nid_to_phys[i];
+               }
+       }
+       if (dfl_phys_nid == NUMA_NO_NODE) {
+               pr_warning("NUMA: Warning: can't determine default physical node, disabling emulation\n");
+               goto no_emu;
+       }
+
+       /* commit */
+       *numa_meminfo = ei;
+
+       /*
+        * Transform __apicid_to_node table to use emulated nids by
+        * reverse-mapping phys_nid.  The maps should always exist but fall
+        * back to zero just in case.
+        */
+       for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) {
+               if (__apicid_to_node[i] == NUMA_NO_NODE)
+                       continue;
+               for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
+                       if (__apicid_to_node[i] == emu_nid_to_phys[j])
+                               break;
+               __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0;
+       }
+
+       /* make sure all emulated nodes are mapped to a physical node */
+       for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
+               if (emu_nid_to_phys[i] == NUMA_NO_NODE)
+                       emu_nid_to_phys[i] = dfl_phys_nid;
+
+       /* transform distance table */
+       numa_reset_distance();
+       for (i = 0; i < max_emu_nid + 1; i++) {
+               for (j = 0; j < max_emu_nid + 1; j++) {
+                       int physi = emu_nid_to_phys[i];
+                       int physj = emu_nid_to_phys[j];
+                       int dist;
+
+                       if (physi >= numa_dist_cnt || physj >= numa_dist_cnt)
+                               dist = physi == physj ?
+                                       LOCAL_DISTANCE : REMOTE_DISTANCE;
+                       else
+                               dist = phys_dist[physi * numa_dist_cnt + physj];
+
+                       numa_set_distance(i, j, dist);
+               }
+       }
+
+       /* free the copied physical distance table */
+       if (phys_dist)
+               memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size);
+       return;
+
+no_emu:
+       /* No emulation.  Build identity emu_nid_to_phys[] for numa_add_cpu() */
+       for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
+               emu_nid_to_phys[i] = i;
+}
+
+#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+void __cpuinit numa_add_cpu(int cpu)
+{
+       int physnid, nid;
+
+       nid = early_cpu_to_node(cpu);
+       BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
+
+       physnid = emu_nid_to_phys[nid];
+
+       /*
+        * Map the cpu to each emulated node that is allocated on the physical
+        * node of the cpu's apic id.
+        */
+       for_each_online_node(nid)
+               if (emu_nid_to_phys[nid] == physnid)
+                       cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+       int i;
+
+       for_each_online_node(i)
+               cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
+}
+#else  /* !CONFIG_DEBUG_PER_CPU_MAPS */
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+       struct cpumask *mask;
+       int nid, physnid, i;
+
+       nid = early_cpu_to_node(cpu);
+       if (nid == NUMA_NO_NODE) {
+               /* early_cpu_to_node() already emits a warning and trace */
+               return;
+       }
+
+       physnid = emu_nid_to_phys[nid];
+
+       for_each_online_node(i) {
+               if (emu_nid_to_phys[nid] != physnid)
+                       continue;
+
+               mask = debug_cpumask_set_cpu(cpu, enable);
+               if (!mask)
+                       return;
+
+               if (enable)
+                       cpumask_set_cpu(cpu, mask);
+               else
+                       cpumask_clear_cpu(cpu, mask);
+       }
+}
+
+void __cpuinit numa_add_cpu(int cpu)
+{
+       numa_set_cpumask(cpu, 1);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+       numa_set_cpumask(cpu, 0);
+}
+#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h
new file mode 100644 (file)
index 0000000..ef2d973
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __X86_MM_NUMA_INTERNAL_H
+#define __X86_MM_NUMA_INTERNAL_H
+
+#include <linux/types.h>
+#include <asm/numa.h>
+
+struct numa_memblk {
+       u64                     start;
+       u64                     end;
+       int                     nid;
+};
+
+struct numa_meminfo {
+       int                     nr_blks;
+       struct numa_memblk      blk[NR_NODE_MEMBLKS];
+};
+
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi);
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi);
+void __init numa_reset_distance(void);
+
+#ifdef CONFIG_NUMA_EMU
+void __init numa_emulation(struct numa_meminfo *numa_meminfo,
+                          int numa_dist_cnt);
+#else
+static inline void numa_emulation(struct numa_meminfo *numa_meminfo,
+                                 int numa_dist_cnt)
+{ }
+#endif
+
+#endif /* __X86_MM_NUMA_INTERNAL_H */
index d343b3c81f3c1e79a7a4f21eb51dc4672f7a1c9b..90825f2eb0f4ef25c9f4d474b29d6a986014f012 100644 (file)
@@ -57,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM];
 
 void update_page_count(int level, unsigned long pages)
 {
-       unsigned long flags;
-
        /* Protect against CPA */
-       spin_lock_irqsave(&pgd_lock, flags);
+       spin_lock(&pgd_lock);
        direct_pages_count[level] += pages;
-       spin_unlock_irqrestore(&pgd_lock, flags);
+       spin_unlock(&pgd_lock);
 }
 
 static void split_page_count(int level)
@@ -394,7 +392,7 @@ static int
 try_preserve_large_page(pte_t *kpte, unsigned long address,
                        struct cpa_data *cpa)
 {
-       unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
+       unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
        pte_t new_pte, old_pte, *tmp;
        pgprot_t old_prot, new_prot, req_prot;
        int i, do_split = 1;
@@ -403,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
        if (cpa->force_split)
                return 1;
 
-       spin_lock_irqsave(&pgd_lock, flags);
+       spin_lock(&pgd_lock);
        /*
         * Check for races, another CPU might have split this page
         * up already:
@@ -498,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
        }
 
 out_unlock:
-       spin_unlock_irqrestore(&pgd_lock, flags);
+       spin_unlock(&pgd_lock);
 
        return do_split;
 }
 
 static int split_large_page(pte_t *kpte, unsigned long address)
 {
-       unsigned long flags, pfn, pfninc = 1;
+       unsigned long pfn, pfninc = 1;
        unsigned int i, level;
        pte_t *pbase, *tmp;
        pgprot_t ref_prot;
@@ -519,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        if (!base)
                return -ENOMEM;
 
-       spin_lock_irqsave(&pgd_lock, flags);
+       spin_lock(&pgd_lock);
        /*
         * Check for races, another CPU might have split this page
         * up for us already:
@@ -591,7 +589,7 @@ out_unlock:
         */
        if (base)
                __free_page(base);
-       spin_unlock_irqrestore(&pgd_lock, flags);
+       spin_unlock(&pgd_lock);
 
        return 0;
 }
index 500242d3c96d61741607af926d3675ed9a309c46..0113d19c8aa60985764dfce5bcc4488c8ee57c5c 100644 (file)
@@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
 
 static void pgd_dtor(pgd_t *pgd)
 {
-       unsigned long flags; /* can be called from interrupt context */
-
        if (SHARED_KERNEL_PMD)
                return;
 
-       spin_lock_irqsave(&pgd_lock, flags);
+       spin_lock(&pgd_lock);
        pgd_list_del(pgd);
-       spin_unlock_irqrestore(&pgd_lock, flags);
+       spin_unlock(&pgd_lock);
 }
 
 /*
@@ -260,7 +258,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        pgd_t *pgd;
        pmd_t *pmds[PREALLOCATED_PMDS];
-       unsigned long flags;
 
        pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
 
@@ -280,12 +277,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
         * respect to anything walking the pgd_list, so that they
         * never see a partially populated pgd.
         */
-       spin_lock_irqsave(&pgd_lock, flags);
+       spin_lock(&pgd_lock);
 
        pgd_ctor(mm, pgd);
        pgd_prepopulate_pmd(mm, pgd, pmds);
 
-       spin_unlock_irqrestore(&pgd_lock, flags);
+       spin_unlock(&pgd_lock);
 
        return pgd;
 
index ae96e7b8051d0c46e786e0ef80581ae331c3f01a..48651c6f657d58605ef5ab6ac54ec3ff03400ac0 100644 (file)
@@ -57,7 +57,7 @@ struct node_memory_chunk_s {
 static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS];
 
 static int __initdata num_memory_chunks; /* total number of memory chunks */
-static u8 __initdata apicid_to_pxm[MAX_APICID];
+static u8 __initdata apicid_to_pxm[MAX_LOCAL_APIC];
 
 int acpi_numa __initdata;
 
@@ -254,8 +254,8 @@ int __init get_memcfg_from_srat(void)
        printk(KERN_DEBUG "Number of memory chunks in system = %d\n",
                         num_memory_chunks);
 
-       for (i = 0; i < MAX_APICID; i++)
-               apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]);
+       for (i = 0; i < MAX_LOCAL_APIC; i++)
+               set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i]));
 
        for (j = 0; j < num_memory_chunks; j++){
                struct node_memory_chunk_s * chunk = &node_memory_chunk[j];
index 603d285d1daa918e90069ff97697ef57ee3160e6..8e9d3394f6d452d97703f2e639e2d0b8f409d473 100644 (file)
 
 int acpi_numa __initdata;
 
-static struct acpi_table_slit *acpi_slit;
-
-static nodemask_t nodes_parsed __initdata;
-static nodemask_t cpu_nodes_parsed __initdata;
-static struct bootnode nodes[MAX_NUMNODES] __initdata;
 static struct bootnode nodes_add[MAX_NUMNODES];
 
-static int num_node_memblks __initdata;
-static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata;
-static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata;
-
 static __init int setup_node(int pxm)
 {
        return acpi_map_pxm_to_node(pxm);
 }
 
-static __init int conflicting_memblks(unsigned long start, unsigned long end)
-{
-       int i;
-       for (i = 0; i < num_node_memblks; i++) {
-               struct bootnode *nd = &node_memblk_range[i];
-               if (nd->start == nd->end)
-                       continue;
-               if (nd->end > start && nd->start < end)
-                       return memblk_nodeid[i];
-               if (nd->end == end && nd->start == start)
-                       return memblk_nodeid[i];
-       }
-       return -1;
-}
-
-static __init void cutoff_node(int i, unsigned long start, unsigned long end)
-{
-       struct bootnode *nd = &nodes[i];
-
-       if (nd->start < start) {
-               nd->start = start;
-               if (nd->end < nd->start)
-                       nd->start = nd->end;
-       }
-       if (nd->end > end) {
-               nd->end = end;
-               if (nd->start > nd->end)
-                       nd->start = nd->end;
-       }
-}
-
 static __init void bad_srat(void)
 {
-       int i;
        printk(KERN_ERR "SRAT: SRAT not used.\n");
        acpi_numa = -1;
-       for (i = 0; i < MAX_LOCAL_APIC; i++)
-               apicid_to_node[i] = NUMA_NO_NODE;
-       for (i = 0; i < MAX_NUMNODES; i++) {
-               nodes[i].start = nodes[i].end = 0;
-               nodes_add[i].start = nodes_add[i].end = 0;
-       }
-       remove_all_active_ranges();
+       memset(nodes_add, 0, sizeof(nodes_add));
 }
 
 static __init inline int srat_disabled(void)
 {
-       return numa_off || acpi_numa < 0;
+       return acpi_numa < 0;
 }
 
 /* Callback for SLIT parsing */
 void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
 {
-       unsigned length;
-       unsigned long phys;
-
-       length = slit->header.length;
-       phys = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, length,
-                PAGE_SIZE);
-
-       if (phys == MEMBLOCK_ERROR)
-               panic(" Can not save slit!\n");
+       int i, j;
 
-       acpi_slit = __va(phys);
-       memcpy(acpi_slit, slit, length);
-       memblock_x86_reserve_range(phys, phys + length, "ACPI SLIT");
+       for (i = 0; i < slit->locality_count; i++)
+               for (j = 0; j < slit->locality_count; j++)
+                       numa_set_distance(pxm_to_node(i), pxm_to_node(j),
+                               slit->entry[slit->locality_count * i + j]);
 }
 
 /* Callback for Proximity Domain -> x2APIC mapping */
@@ -138,8 +84,8 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
                printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
                return;
        }
-       apicid_to_node[apic_id] = node;
-       node_set(node, cpu_nodes_parsed);
+       set_apicid_to_node(apic_id, node);
+       node_set(node, numa_nodes_parsed);
        acpi_numa = 1;
        printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
               pxm, apic_id, node);
@@ -178,8 +124,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
                return;
        }
 
-       apicid_to_node[apic_id] = node;
-       node_set(node, cpu_nodes_parsed);
+       set_apicid_to_node(apic_id, node);
+       node_set(node, numa_nodes_parsed);
        acpi_numa = 1;
        printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
               pxm, apic_id, node);
@@ -241,7 +187,7 @@ update_nodes_add(int node, unsigned long start, unsigned long end)
        }
 
        if (changed) {
-               node_set(node, cpu_nodes_parsed);
+               node_set(node, numa_nodes_parsed);
                printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
                                 nd->start, nd->end);
        }
@@ -251,10 +197,8 @@ update_nodes_add(int node, unsigned long start, unsigned long end)
 void __init
 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
 {
-       struct bootnode *nd, oldnode;
        unsigned long start, end;
        int node, pxm;
-       int i;
 
        if (srat_disabled())
                return;
@@ -276,300 +220,31 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
                bad_srat();
                return;
        }
-       i = conflicting_memblks(start, end);
-       if (i == node) {
-               printk(KERN_WARNING
-               "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
-                       pxm, start, end, nodes[i].start, nodes[i].end);
-       } else if (i >= 0) {
-               printk(KERN_ERR
-                      "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
-                      pxm, start, end, node_to_pxm(i),
-                       nodes[i].start, nodes[i].end);
+
+       if (numa_add_memblk(node, start, end) < 0) {
                bad_srat();
                return;
        }
-       nd = &nodes[node];
-       oldnode = *nd;
-       if (!node_test_and_set(node, nodes_parsed)) {
-               nd->start = start;
-               nd->end = end;
-       } else {
-               if (start < nd->start)
-                       nd->start = start;
-               if (nd->end < end)
-                       nd->end = end;
-       }
 
        printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm,
               start, end);
 
-       if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) {
+       if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)
                update_nodes_add(node, start, end);
-               /* restore nodes[node] */
-               *nd = oldnode;
-               if ((nd->start | nd->end) == 0)
-                       node_clear(node, nodes_parsed);
-       }
-
-       node_memblk_range[num_node_memblks].start = start;
-       node_memblk_range[num_node_memblks].end = end;
-       memblk_nodeid[num_node_memblks] = node;
-       num_node_memblks++;
-}
-
-/* Sanity check to catch more bad SRATs (they are amazingly common).
-   Make sure the PXMs cover all memory. */
-static int __init nodes_cover_memory(const struct bootnode *nodes)
-{
-       int i;
-       unsigned long pxmram, e820ram;
-
-       pxmram = 0;
-       for_each_node_mask(i, nodes_parsed) {
-               unsigned long s = nodes[i].start >> PAGE_SHIFT;
-               unsigned long e = nodes[i].end >> PAGE_SHIFT;
-               pxmram += e - s;
-               pxmram -= __absent_pages_in_range(i, s, e);
-               if ((long)pxmram < 0)
-                       pxmram = 0;
-       }
-
-       e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT);
-       /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
-       if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) {
-               printk(KERN_ERR
-       "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
-                       (pxmram << PAGE_SHIFT) >> 20,
-                       (e820ram << PAGE_SHIFT) >> 20);
-               return 0;
-       }
-       return 1;
 }
 
 void __init acpi_numa_arch_fixup(void) {}
 
-#ifdef CONFIG_NUMA_EMU
-void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
-                               unsigned long end)
-{
-       int i;
-
-       for_each_node_mask(i, nodes_parsed) {
-               cutoff_node(i, start, end);
-               physnodes[i].start = nodes[i].start;
-               physnodes[i].end = nodes[i].end;
-       }
-}
-#endif /* CONFIG_NUMA_EMU */
-
-/* Use the information discovered above to actually set up the nodes. */
-int __init acpi_scan_nodes(unsigned long start, unsigned long end)
+int __init x86_acpi_numa_init(void)
 {
-       int i;
-
-       if (acpi_numa <= 0)
-               return -1;
-
-       /* First clean up the node list */
-       for (i = 0; i < MAX_NUMNODES; i++)
-               cutoff_node(i, start, end);
-
-       /*
-        * Join together blocks on the same node, holes between
-        * which don't overlap with memory on other nodes.
-        */
-       for (i = 0; i < num_node_memblks; ++i) {
-               int j, k;
-
-               for (j = i + 1; j < num_node_memblks; ++j) {
-                       unsigned long start, end;
-
-                       if (memblk_nodeid[i] != memblk_nodeid[j])
-                               continue;
-                       start = min(node_memblk_range[i].end,
-                                   node_memblk_range[j].end);
-                       end = max(node_memblk_range[i].start,
-                                 node_memblk_range[j].start);
-                       for (k = 0; k < num_node_memblks; ++k) {
-                               if (memblk_nodeid[i] == memblk_nodeid[k])
-                                       continue;
-                               if (start < node_memblk_range[k].end &&
-                                   end > node_memblk_range[k].start)
-                                       break;
-                       }
-                       if (k < num_node_memblks)
-                               continue;
-                       start = min(node_memblk_range[i].start,
-                                   node_memblk_range[j].start);
-                       end = max(node_memblk_range[i].end,
-                                 node_memblk_range[j].end);
-                       printk(KERN_INFO "SRAT: Node %d "
-                              "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
-                              memblk_nodeid[i],
-                              node_memblk_range[i].start,
-                              node_memblk_range[i].end,
-                              node_memblk_range[j].start,
-                              node_memblk_range[j].end,
-                              start, end);
-                       node_memblk_range[i].start = start;
-                       node_memblk_range[i].end = end;
-                       k = --num_node_memblks - j;
-                       memmove(memblk_nodeid + j, memblk_nodeid + j+1,
-                               k * sizeof(*memblk_nodeid));
-                       memmove(node_memblk_range + j, node_memblk_range + j+1,
-                               k * sizeof(*node_memblk_range));
-                       --j;
-               }
-       }
-
-       memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
-                                          memblk_nodeid);
-       if (memnode_shift < 0) {
-               printk(KERN_ERR
-                    "SRAT: No NUMA node hash function found. Contact maintainer\n");
-               bad_srat();
-               return -1;
-       }
-
-       for (i = 0; i < num_node_memblks; i++)
-               memblock_x86_register_active_regions(memblk_nodeid[i],
-                               node_memblk_range[i].start >> PAGE_SHIFT,
-                               node_memblk_range[i].end >> PAGE_SHIFT);
-
-       /* for out of order entries in SRAT */
-       sort_node_map();
-       if (!nodes_cover_memory(nodes)) {
-               bad_srat();
-               return -1;
-       }
+       int ret;
 
-       /* Account for nodes with cpus and no memory */
-       nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
-
-       /* Finally register nodes */
-       for_each_node_mask(i, node_possible_map)
-               setup_node_bootmem(i, nodes[i].start, nodes[i].end);
-       /* Try again in case setup_node_bootmem missed one due
-          to missing bootmem */
-       for_each_node_mask(i, node_possible_map)
-               if (!node_online(i))
-                       setup_node_bootmem(i, nodes[i].start, nodes[i].end);
-
-       for (i = 0; i < nr_cpu_ids; i++) {
-               int node = early_cpu_to_node(i);
-
-               if (node == NUMA_NO_NODE)
-                       continue;
-               if (!node_online(node))
-                       numa_clear_node(i);
-       }
-       numa_init_array();
-       return 0;
-}
-
-#ifdef CONFIG_NUMA_EMU
-static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
-       [0 ... MAX_NUMNODES-1] = PXM_INVAL
-};
-static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
-       [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
-};
-static int __init find_node_by_addr(unsigned long addr)
-{
-       int ret = NUMA_NO_NODE;
-       int i;
-
-       for_each_node_mask(i, nodes_parsed) {
-               /*
-                * Find the real node that this emulated node appears on.  For
-                * the sake of simplicity, we only use a real node's starting
-                * address to determine which emulated node it appears on.
-                */
-               if (addr >= nodes[i].start && addr < nodes[i].end) {
-                       ret = i;
-                       break;
-               }
-       }
-       return ret;
+       ret = acpi_numa_init();
+       if (ret < 0)
+               return ret;
+       return srat_disabled() ? -EINVAL : 0;
 }
 
-/*
- * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
- * mappings that respect the real ACPI topology but reflect our emulated
- * environment.  For each emulated node, we find which real node it appears on
- * and create PXM to NID mappings for those fake nodes which mirror that
- * locality.  SLIT will now represent the correct distances between emulated
- * nodes as a result of the real topology.
- */
-void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
-{
-       int i, j;
-
-       for (i = 0; i < num_nodes; i++) {
-               int nid, pxm;
-
-               nid = find_node_by_addr(fake_nodes[i].start);
-               if (nid == NUMA_NO_NODE)
-                       continue;
-               pxm = node_to_pxm(nid);
-               if (pxm == PXM_INVAL)
-                       continue;
-               fake_node_to_pxm_map[i] = pxm;
-               /*
-                * For each apicid_to_node mapping that exists for this real
-                * node, it must now point to the fake node ID.
-                */
-               for (j = 0; j < MAX_LOCAL_APIC; j++)
-                       if (apicid_to_node[j] == nid &&
-                           fake_apicid_to_node[j] == NUMA_NO_NODE)
-                               fake_apicid_to_node[j] = i;
-       }
-
-       /*
-        * If there are apicid-to-node mappings for physical nodes that do not
-        * have a corresponding emulated node, it should default to a guaranteed
-        * value.
-        */
-       for (i = 0; i < MAX_LOCAL_APIC; i++)
-               if (apicid_to_node[i] != NUMA_NO_NODE &&
-                   fake_apicid_to_node[i] == NUMA_NO_NODE)
-                       fake_apicid_to_node[i] = 0;
-
-       for (i = 0; i < num_nodes; i++)
-               __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
-       memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
-
-       nodes_clear(nodes_parsed);
-       for (i = 0; i < num_nodes; i++)
-               if (fake_nodes[i].start != fake_nodes[i].end)
-                       node_set(i, nodes_parsed);
-}
-
-static int null_slit_node_compare(int a, int b)
-{
-       return node_to_pxm(a) == node_to_pxm(b);
-}
-#else
-static int null_slit_node_compare(int a, int b)
-{
-       return a == b;
-}
-#endif /* CONFIG_NUMA_EMU */
-
-int __node_distance(int a, int b)
-{
-       int index;
-
-       if (!acpi_slit)
-               return null_slit_node_compare(a, b) ? LOCAL_DISTANCE :
-                                                     REMOTE_DISTANCE;
-       index = acpi_slit->locality_count * node_to_pxm(a);
-       return acpi_slit->entry[index + node_to_pxm(b)];
-}
-
-EXPORT_SYMBOL(__node_distance);
-
 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY)
 int memory_add_physaddr_to_nid(u64 start)
 {
index 6acc724d5d8ff759f93290a2591c84945f9e6bd2..d6c0418c3e4711795158b52e57b2fa337848d03c 100644 (file)
@@ -179,12 +179,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
        sender = this_cpu_read(tlb_vector_offset);
        f = &flush_state[sender];
 
-       /*
-        * Could avoid this lock when
-        * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
-        * probably not worth checking this for a cache-hot lock.
-        */
-       raw_spin_lock(&f->tlbstate_lock);
+       if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
+               raw_spin_lock(&f->tlbstate_lock);
 
        f->flush_mm = mm;
        f->flush_va = va;
@@ -202,7 +198,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
 
        f->flush_mm = NULL;
        f->flush_va = 0;
-       raw_spin_unlock(&f->tlbstate_lock);
+       if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
+               raw_spin_unlock(&f->tlbstate_lock);
 }
 
 void native_flush_tlb_others(const struct cpumask *cpumask,
@@ -211,11 +208,10 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
        if (is_uv_system()) {
                unsigned int cpu;
 
-               cpu = get_cpu();
+               cpu = smp_processor_id();
                cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
                if (cpumask)
                        flush_tlb_others_ipi(cpumask, mm, va);
-               put_cpu();
                return;
        }
        flush_tlb_others_ipi(cpumask, mm, va);
index e27dffbbb1a7cafb9ebad2ac8e0d656de534ec09..026e4931d16259fe624eef5b5bb269436ad3e81e 100644 (file)
@@ -350,7 +350,7 @@ static int __init early_fill_mp_bus_info(void)
 
 #define ENABLE_CF8_EXT_CFG      (1ULL << 46)
 
-static void enable_pci_io_ecs(void *unused)
+static void __cpuinit enable_pci_io_ecs(void *unused)
 {
        u64 reg;
        rdmsrl(MSR_AMD64_NB_CFG, reg);
index c63c6d3dd8e81e7d7dabf4309308fe63d0d39b37..67858be4b52b8066201cd5f5cfda657806476e32 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 
+#include <asm/ce4100.h>
 #include <asm/pci_x86.h>
 
 struct sim_reg {
@@ -306,10 +307,10 @@ struct pci_raw_ops ce4100_pci_conf = {
        .write = ce4100_conf_write,
 };
 
-static int __init ce4100_pci_init(void)
+int __init ce4100_pci_init(void)
 {
        init_sim_regs();
        raw_pci_ops = &ce4100_pci_conf;
-       return 0;
+       /* Indicate caller that it should invoke pci_legacy_init() */
+       return 1;
 }
-subsys_initcall(ce4100_pci_init);
index 25cd4a07d09f78cbe850733f0d2d36d98fe5d176..8c4085a95ef153354aa7b58ddd4b48a588de5e53 100644 (file)
@@ -20,7 +20,8 @@
 #include <asm/xen/pci.h>
 
 #ifdef CONFIG_ACPI
-static int xen_hvm_register_pirq(u32 gsi, int triggering)
+static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
+                                int trigger, int polarity)
 {
        int rc, irq;
        struct physdev_map_pirq map_irq;
@@ -41,7 +42,7 @@ static int xen_hvm_register_pirq(u32 gsi, int triggering)
                return -1;
        }
 
-       if (triggering == ACPI_EDGE_SENSITIVE) {
+       if (trigger == ACPI_EDGE_SENSITIVE) {
                shareable = 0;
                name = "ioapic-edge";
        } else {
@@ -55,12 +56,6 @@ static int xen_hvm_register_pirq(u32 gsi, int triggering)
 
        return irq;
 }
-
-static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
-                                int trigger, int polarity)
-{
-       return xen_hvm_register_pirq(gsi, trigger);
-}
 #endif
 
 #if defined(CONFIG_PCI_MSI)
@@ -91,7 +86,7 @@ static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
 
 static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 {
-       int irq, pirq, ret = 0;
+       int irq, pirq;
        struct msi_desc *msidesc;
        struct msi_msg msg;
 
@@ -99,39 +94,32 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                __read_msi_msg(msidesc, &msg);
                pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
                        ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
-               if (xen_irq_from_pirq(pirq) >= 0 && msg.data == XEN_PIRQ_MSI_DATA) {
-                       xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
-                                       "msi-x" : "msi", &irq, &pirq, XEN_ALLOC_IRQ);
-                       if (irq < 0)
+               if (msg.data != XEN_PIRQ_MSI_DATA ||
+                   xen_irq_from_pirq(pirq) < 0) {
+                       pirq = xen_allocate_pirq_msi(dev, msidesc);
+                       if (pirq < 0)
                                goto error;
-                       ret = set_irq_msi(irq, msidesc);
-                       if (ret < 0)
-                               goto error_while;
-                       printk(KERN_DEBUG "xen: msi already setup: msi --> irq=%d"
-                                       " pirq=%d\n", irq, pirq);
-                       return 0;
+                       xen_msi_compose_msg(dev, pirq, &msg);
+                       __write_msi_msg(msidesc, &msg);
+                       dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
+               } else {
+                       dev_dbg(&dev->dev,
+                               "xen: msi already bound to pirq=%d\n", pirq);
                }
-               xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
-                               "msi-x" : "msi", &irq, &pirq, (XEN_ALLOC_IRQ | XEN_ALLOC_PIRQ));
-               if (irq < 0 || pirq < 0)
+               irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0,
+                                              (type == PCI_CAP_ID_MSIX) ?
+                                              "msi-x" : "msi");
+               if (irq < 0)
                        goto error;
-               printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq);
-               xen_msi_compose_msg(dev, pirq, &msg);
-               ret = set_irq_msi(irq, msidesc);
-               if (ret < 0)
-                       goto error_while;
-               write_msi_msg(irq, &msg);
+               dev_dbg(&dev->dev,
+                       "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
        }
        return 0;
 
-error_while:
-       unbind_from_irqhandler(irq, NULL);
 error:
-       if (ret == -ENODEV)
-               dev_err(&dev->dev, "Xen PCI frontend has not registered" \
-                               " MSI/MSI-X support!\n");
-
-       return ret;
+       dev_err(&dev->dev,
+               "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+       return -ENODEV;
 }
 
 /*
@@ -150,35 +138,26 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                return -ENOMEM;
 
        if (type == PCI_CAP_ID_MSIX)
-               ret = xen_pci_frontend_enable_msix(dev, &v, nvec);
+               ret = xen_pci_frontend_enable_msix(dev, v, nvec);
        else
-               ret = xen_pci_frontend_enable_msi(dev, &v);
+               ret = xen_pci_frontend_enable_msi(dev, v);
        if (ret)
                goto error;
        i = 0;
        list_for_each_entry(msidesc, &dev->msi_list, list) {
-               irq = xen_allocate_pirq(v[i], 0, /* not sharable */
-                       (type == PCI_CAP_ID_MSIX) ?
-                       "pcifront-msi-x" : "pcifront-msi");
-               if (irq < 0) {
-                       ret = -1;
+               irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
+                                              (type == PCI_CAP_ID_MSIX) ?
+                                              "pcifront-msi-x" :
+                                              "pcifront-msi");
+               if (irq < 0)
                        goto free;
-               }
-
-               ret = set_irq_msi(irq, msidesc);
-               if (ret)
-                       goto error_while;
                i++;
        }
        kfree(v);
        return 0;
 
-error_while:
-       unbind_from_irqhandler(irq, NULL);
 error:
-       if (ret == -ENODEV)
-               dev_err(&dev->dev, "Xen PCI frontend has not registered" \
-                       " MSI/MSI-X support!\n");
+       dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
 free:
        kfree(v);
        return ret;
@@ -193,6 +172,9 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev)
                xen_pci_frontend_disable_msix(dev);
        else
                xen_pci_frontend_disable_msi(dev);
+
+       /* Free the IRQ's and the msidesc using the generic code. */
+       default_teardown_msi_irqs(dev);
 }
 
 static void xen_teardown_msi_irq(unsigned int irq)
@@ -200,47 +182,82 @@ static void xen_teardown_msi_irq(unsigned int irq)
        xen_destroy_irq(irq);
 }
 
+#ifdef CONFIG_XEN_DOM0
 static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 {
-       int irq, ret;
+       int ret = 0;
        struct msi_desc *msidesc;
 
        list_for_each_entry(msidesc, &dev->msi_list, list) {
-               irq = xen_create_msi_irq(dev, msidesc, type);
-               if (irq < 0)
-                       return -1;
+               struct physdev_map_pirq map_irq;
 
-               ret = set_irq_msi(irq, msidesc);
-               if (ret)
-                       goto error;
-       }
-       return 0;
+               memset(&map_irq, 0, sizeof(map_irq));
+               map_irq.domid = DOMID_SELF;
+               map_irq.type = MAP_PIRQ_TYPE_MSI;
+               map_irq.index = -1;
+               map_irq.pirq = -1;
+               map_irq.bus = dev->bus->number;
+               map_irq.devfn = dev->devfn;
 
-error:
-       xen_destroy_irq(irq);
+               if (type == PCI_CAP_ID_MSIX) {
+                       int pos;
+                       u32 table_offset, bir;
+
+                       pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+
+                       pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
+                                             &table_offset);
+                       bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+
+                       map_irq.table_base = pci_resource_start(dev, bir);
+                       map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
+               }
+
+               ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+               if (ret) {
+                       dev_warn(&dev->dev, "xen map irq failed %d\n", ret);
+                       goto out;
+               }
+
+               ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
+                                              map_irq.pirq, map_irq.index,
+                                              (type == PCI_CAP_ID_MSIX) ?
+                                              "msi-x" : "msi");
+               if (ret < 0)
+                       goto out;
+       }
+       ret = 0;
+out:
        return ret;
 }
 #endif
+#endif
 
 static int xen_pcifront_enable_irq(struct pci_dev *dev)
 {
        int rc;
        int share = 1;
+       u8 gsi;
 
-       dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq);
-
-       if (dev->irq < 0)
-               return -EINVAL;
+       rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
+       if (rc < 0) {
+               dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
+                        rc);
+               return rc;
+       }
 
-       if (dev->irq < NR_IRQS_LEGACY)
+       if (gsi < NR_IRQS_LEGACY)
                share = 0;
 
-       rc = xen_allocate_pirq(dev->irq, share, "pcifront");
+       rc = xen_allocate_pirq(gsi, share, "pcifront");
        if (rc < 0) {
-               dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n",
-                        dev->irq, rc);
+               dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n",
+                        gsi, rc);
                return rc;
        }
+
+       dev->irq = rc;
+       dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
        return 0;
 }
 
index 68c0dbcc95beda7976fca133324a91ebbc88a023..28071bb31db7bc1512242589dd10ad5184c976dc 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/serial_reg.h>
 #include <linux/serial_8250.h>
 
+#include <asm/ce4100.h>
 #include <asm/prom.h>
 #include <asm/setup.h>
 #include <asm/i8259.h>
@@ -136,6 +137,7 @@ void __init x86_ce4100_early_setup(void)
        x86_init.resources.probe_roms = x86_init_noop;
        x86_init.mpparse.get_smp_config = x86_init_uint_noop;
        x86_init.mpparse.find_smp_config = x86_init_noop;
+       x86_init.pci.init = ce4100_pci_init;
 
 #ifdef CONFIG_X86_IO_APIC
        x86_init.pci.init_irq = sdv_pci_init;
index dab87464753058b10b7b3094c734126dcc862066..044bda5b3174f19c07a0fe9cb8b1ab1d77ca1b60 100644 (file)
@@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size)
                 * wasted bootmem) and hand off chunks of it to callers.
                 */
                res = alloc_bootmem(chunk_size);
-               if (!res)
-                       return NULL;
+               BUG_ON(!res);
                prom_early_allocated += chunk_size;
                memset(res, 0, chunk_size);
                free_mem = chunk_size;
index df58e9cad96ae9441a4f86f22900a6e0bf05aa64..a7b38d35c29a1ce7578d36e38cfe04616143b39f 100644 (file)
@@ -1364,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode)
                memset(bd2, 0, sizeof(struct bau_desc));
                bd2->header.sw_ack_flag = 1;
                /*
-                * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
+                * base_dest_nodeid is the nasid of the first uvhub
                 * in the partition. The bit map will indicate uvhub numbers,
                 * which are 0-N in a partition. Pnodes are unique system-wide.
                 */
-               bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
+               bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode);
                bd2->header.dest_subnodeid = 0x10; /* the LB */
                bd2->header.command = UV_NET_ENDPOINT_INTD;
                bd2->header.int_both = 1;
index 7b24460917d551b540a337a0f7c74dbe4f65e297..374a05d8ad22156b9a82e3ae0a643d9de785599f 100644 (file)
@@ -131,7 +131,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
                       unsigned long mmr_offset, int limit)
 {
        const struct cpumask *eligible_cpu = cpumask_of(cpu);
-       struct irq_cfg *cfg = get_irq_chip_data(irq);
+       struct irq_cfg *cfg = irq_get_chip_data(irq);
        unsigned long mmr_value;
        struct uv_IO_APIC_route_entry *entry;
        int mmr_pnode, err;
@@ -148,7 +148,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
        else
                irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
 
-       set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
+       irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
                                      irq_name);
 
        mmr_value = 0;
index 63203767174683b3db1ab410be9263aabe280ab0..fe4cf8294878a9e5de7d0773cb8086c22cc8b55a 100644 (file)
@@ -569,11 +569,13 @@ out_unlock:
 static struct irqaction master_action = {
        .handler =      piix4_master_intr,
        .name =         "PIIX4-8259",
+       .flags =        IRQF_NO_THREAD,
 };
 
 static struct irqaction cascade_action = {
        .handler =      no_action,
        .name =         "cascade",
+       .flags =        IRQF_NO_THREAD,
 };
 
 static inline void set_piix4_virtual_irq_type(void)
@@ -606,7 +608,7 @@ static void __init visws_pre_intr_init(void)
                        chip = &cobalt_irq_type;
 
                if (chip)
-                       set_irq_chip(i, chip);
+                       irq_set_chip(i, chip);
        }
 
        setup_irq(CO_IRQ_8259, &master_action);
index 5b54892e4bc3c13b72f05187d8c7ec4d8752081a..e4343fe488ed2768b0db811cf4971ae865240fb8 100644 (file)
@@ -48,3 +48,11 @@ config XEN_DEBUG_FS
        help
          Enable statistics output and various tuning options in debugfs.
          Enabling this option may incur a significant performance overhead.
+
+config XEN_DEBUG
+       bool "Enable Xen debug checks"
+       depends on XEN
+       default n
+       help
+         Enable various WARN_ON checks in the Xen MMU code.
+         Enabling this option WILL incur a significant performance overhead.
index 50542efe45fbbc9313e8d66670d99ce573d682a6..49dbd78ec3cb0d4481e72e96cdb041507b5ae686 100644 (file)
@@ -1284,15 +1284,14 @@ static int init_hvm_pv_info(int *major, int *minor)
 
        xen_setup_features();
 
-       pv_info = xen_info;
-       pv_info.kernel_rpl = 0;
+       pv_info.name = "Xen HVM";
 
        xen_domain_type = XEN_HVM_DOMAIN;
 
        return 0;
 }
 
-void xen_hvm_init_shared_info(void)
+void __ref xen_hvm_init_shared_info(void)
 {
        int cpu;
        struct xen_add_to_physmap xatp;
@@ -1331,6 +1330,8 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
        switch (action) {
        case CPU_UP_PREPARE:
                per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+               if (xen_have_vector_callback)
+                       xen_init_lock_cpu(cpu);
                break;
        default:
                break;
@@ -1355,6 +1356,7 @@ static void __init xen_hvm_guest_init(void)
 
        if (xen_feature(XENFEAT_hvm_callback_vector))
                xen_have_vector_callback = 1;
+       xen_hvm_smp_init();
        register_cpu_notifier(&xen_hvm_cpu_notifier);
        xen_unplug_emulated_devices();
        have_vcpu_info_placement = 0;
index 5e92b61ad574dd6514f09644737dd450d0311dd0..3f6f3347aa17b299165a698e9f878682ed8f8b34 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
+#include <linux/seq_file.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -416,8 +417,12 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
        if (val & _PAGE_PRESENT) {
                unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
                pteval_t flags = val & PTE_FLAGS_MASK;
-               unsigned long mfn = pfn_to_mfn(pfn);
+               unsigned long mfn;
 
+               if (!xen_feature(XENFEAT_auto_translated_physmap))
+                       mfn = get_phys_to_machine(pfn);
+               else
+                       mfn = pfn;
                /*
                 * If there's no mfn for the pfn, then just create an
                 * empty non-present pte.  Unfortunately this loses
@@ -427,8 +432,18 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
                if (unlikely(mfn == INVALID_P2M_ENTRY)) {
                        mfn = 0;
                        flags = 0;
+               } else {
+                       /*
+                        * Paramount to do this test _after_ the
+                        * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
+                        * IDENTITY_FRAME_BIT resolves to true.
+                        */
+                       mfn &= ~FOREIGN_FRAME_BIT;
+                       if (mfn & IDENTITY_FRAME_BIT) {
+                               mfn &= ~IDENTITY_FRAME_BIT;
+                               flags |= _PAGE_IOMAP;
+                       }
                }
-
                val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
        }
 
@@ -532,6 +547,41 @@ pte_t xen_make_pte(pteval_t pte)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
 
+#ifdef CONFIG_XEN_DEBUG
+pte_t xen_make_pte_debug(pteval_t pte)
+{
+       phys_addr_t addr = (pte & PTE_PFN_MASK);
+       phys_addr_t other_addr;
+       bool io_page = false;
+       pte_t _pte;
+
+       if (pte & _PAGE_IOMAP)
+               io_page = true;
+
+       _pte = xen_make_pte(pte);
+
+       if (!addr)
+               return _pte;
+
+       if (io_page &&
+           (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
+               other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
+               WARN(addr != other_addr,
+                       "0x%lx is using VM_IO, but it is 0x%lx!\n",
+                       (unsigned long)addr, (unsigned long)other_addr);
+       } else {
+               pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
+               other_addr = (_pte.pte & PTE_PFN_MASK);
+               WARN((addr == other_addr) && (!io_page) && (!iomap_set),
+                       "0x%lx is missing VM_IO (and wasn't fixed)!\n",
+                       (unsigned long)addr);
+       }
+
+       return _pte;
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
+#endif
+
 pgd_t xen_make_pgd(pgdval_t pgd)
 {
        pgd = pte_pfn_to_mfn(pgd);
@@ -986,10 +1036,9 @@ static void xen_pgd_pin(struct mm_struct *mm)
  */
 void xen_mm_pin_all(void)
 {
-       unsigned long flags;
        struct page *page;
 
-       spin_lock_irqsave(&pgd_lock, flags);
+       spin_lock(&pgd_lock);
 
        list_for_each_entry(page, &pgd_list, lru) {
                if (!PagePinned(page)) {
@@ -998,7 +1047,7 @@ void xen_mm_pin_all(void)
                }
        }
 
-       spin_unlock_irqrestore(&pgd_lock, flags);
+       spin_unlock(&pgd_lock);
 }
 
 /*
@@ -1099,10 +1148,9 @@ static void xen_pgd_unpin(struct mm_struct *mm)
  */
 void xen_mm_unpin_all(void)
 {
-       unsigned long flags;
        struct page *page;
 
-       spin_lock_irqsave(&pgd_lock, flags);
+       spin_lock(&pgd_lock);
 
        list_for_each_entry(page, &pgd_list, lru) {
                if (PageSavePinned(page)) {
@@ -1112,7 +1160,7 @@ void xen_mm_unpin_all(void)
                }
        }
 
-       spin_unlock_irqrestore(&pgd_lock, flags);
+       spin_unlock(&pgd_lock);
 }
 
 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
@@ -1443,7 +1491,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
         * early_ioremap fixmap slot, make sure it is RO.
         */
        if (!is_early_ioremap_ptep(ptep) &&
-           pfn >= e820_table_start && pfn < e820_table_end)
+           pfn >= pgt_buf_start && pfn < pgt_buf_end)
                pte = pte_wrprotect(pte);
 
        return pte;
@@ -1942,6 +1990,9 @@ __init void xen_ident_map_ISA(void)
 
 static __init void xen_post_allocator_init(void)
 {
+#ifdef CONFIG_XEN_DEBUG
+       pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
+#endif
        pv_mmu_ops.set_pte = xen_set_pte;
        pv_mmu_ops.set_pmd = xen_set_pmd;
        pv_mmu_ops.set_pud = xen_set_pud;
@@ -2074,7 +2125,7 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
                        in_frames[i] = virt_to_mfn(vaddr);
 
                MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
-               set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
+               __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
 
                if (out_frames)
                        out_frames[i] = virt_to_pfn(vaddr);
@@ -2353,6 +2404,18 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
 
 #ifdef CONFIG_XEN_DEBUG_FS
 
+static int p2m_dump_open(struct inode *inode, struct file *filp)
+{
+       return single_open(filp, p2m_dump_show, NULL);
+}
+
+static const struct file_operations p2m_dump_fops = {
+       .open           = p2m_dump_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static struct dentry *d_mmu_debug;
 
 static int __init xen_mmu_debugfs(void)
@@ -2408,6 +2471,7 @@ static int __init xen_mmu_debugfs(void)
        debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
                           &mmu_stats.prot_commit_batched);
 
+       debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
        return 0;
 }
 fs_initcall(xen_mmu_debugfs);
index fd12d7ce7ff93265ed620ac9ef96db2c676a6c94..215a3ce6106820d029c38d0216c7ea4384f20a0b 100644 (file)
  * P2M_PER_PAGE depends on the architecture, as a mfn is always
  * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
  * 512 and 1024 entries respectively. 
+ *
+ * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
+ *
+ * However not all entries are filled with MFNs. Specifically for all other
+ * leaf entries, or for the top  root, or middle one, for which there is a void
+ * entry, we assume it is  "missing". So (for example)
+ *  pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
+ *
+ * We also have the possibility of setting 1-1 mappings on certain regions, so
+ * that:
+ *  pfn_to_mfn(0xc0000)=0xc0000
+ *
+ * The benefit of this is, that we can assume for non-RAM regions (think
+ * PCI BARs, or ACPI spaces), we can create mappings easily b/c we
+ * get the PFN value to match the MFN.
+ *
+ * For this to work efficiently we have one new page p2m_identity and
+ * allocate (via reserved_brk) any other pages we need to cover the sides
+ * (1GB or 4MB boundary violations). All entries in p2m_identity are set to
+ * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs,
+ * no other fancy value).
+ *
+ * On lookup we spot that the entry points to p2m_identity and return the
+ * identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
+ * If the entry points to an allocated page, we just proceed as before and
+ * return the PFN.  If the PFN has IDENTITY_FRAME_BIT set we unmask that in
+ * appropriate functions (pfn_to_mfn).
+ *
+ * The reason for having the IDENTITY_FRAME_BIT instead of just returning the
+ * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
+ * non-identity pfn. To protect ourselves against we elect to set (and get) the
+ * IDENTITY_FRAME_BIT on all identity mapped PFNs.
+ *
+ * This simplistic diagram is used to explain the more subtle piece of code.
+ * There is also a digram of the P2M at the end that can help.
+ * Imagine your E820 looking as so:
+ *
+ *                    1GB                                           2GB
+ * /-------------------+---------\/----\         /----------\    /---+-----\
+ * | System RAM        | Sys RAM ||ACPI|         | reserved |    | Sys RAM |
+ * \-------------------+---------/\----/         \----------/    \---+-----/
+ *                               ^- 1029MB                       ^- 2001MB
+ *
+ * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100),
+ *  2048MB = 524288 (0x80000)]
+ *
+ * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB
+ * is actually not present (would have to kick the balloon driver to put it in).
+ *
+ * When we are told to set the PFNs for identity mapping (see patch: "xen/setup:
+ * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start
+ * of the PFN and the end PFN (263424 and 512256 respectively). The first step
+ * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page
+ * covers 512^2 of page estate (1GB) and in case the start or end PFN is not
+ * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn
+ * to end pfn.  We reserve_brk top leaf pages if they are missing (means they
+ * point to p2m_mid_missing).
+ *
+ * With the E820 example above, 263424 is not 1GB aligned so we allocate a
+ * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000.
+ * Each entry in the allocate page is "missing" (points to p2m_missing).
+ *
+ * Next stage is to determine if we need to do a more granular boundary check
+ * on the 4MB (or 2MB depending on architecture) off the start and end pfn's.
+ * We check if the start pfn and end pfn violate that boundary check, and if
+ * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer
+ * granularity of setting which PFNs are missing and which ones are identity.
+ * In our example 263424 and 512256 both fail the check so we reserve_brk two
+ * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing"
+ * values) and assign them to p2m[1][2] and p2m[1][488] respectively.
+ *
+ * At this point we would at minimum reserve_brk one page, but could be up to
+ * three. Each call to set_phys_range_identity has at maximum a three page
+ * cost. If we were to query the P2M at this stage, all those entries from
+ * start PFN through end PFN (so 1029MB -> 2001MB) would return
+ * INVALID_P2M_ENTRY ("missing").
+ *
+ * The next step is to walk from the start pfn to the end pfn setting
+ * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity.
+ * If we find that the middle leaf is pointing to p2m_missing we can swap it
+ * over to p2m_identity - this way covering 4MB (or 2MB) PFN space.  At this
+ * point we do not need to worry about boundary aligment (so no need to
+ * reserve_brk a middle page, figure out which PFNs are "missing" and which
+ * ones are identity), as that has been done earlier.  If we find that the
+ * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference
+ * that page (which covers 512 PFNs) and set the appropriate PFN with
+ * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we
+ * set from p2m[1][2][256->511] and p2m[1][488][0->256] with
+ * IDENTITY_FRAME_BIT set.
+ *
+ * All other regions that are void (or not filled) either point to p2m_missing
+ * (considered missing) or have the default value of INVALID_P2M_ENTRY (also
+ * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511]
+ * contain the INVALID_P2M_ENTRY value and are considered "missing."
+ *
+ * This is what the p2m ends up looking (for the E820 above) with this
+ * fabulous drawing:
+ *
+ *    p2m         /--------------\
+ *  /-----\       | &mfn_list[0],|                           /-----------------\
+ *  |  0  |------>| &mfn_list[1],|    /---------------\      | ~0, ~0, ..      |
+ *  |-----|       |  ..., ~0, ~0 |    | ~0, ~0, [x]---+----->| IDENTITY [@256] |
+ *  |  1  |---\   \--------------/    | [p2m_identity]+\     | IDENTITY [@257] |
+ *  |-----|    \                      | [p2m_identity]+\\    | ....            |
+ *  |  2  |--\  \-------------------->|  ...          | \\   \----------------/
+ *  |-----|   \                       \---------------/  \\
+ *  |  3  |\   \                                          \\  p2m_identity
+ *  |-----| \   \-------------------->/---------------\   /-----------------\
+ *  | ..  +->+                        | [p2m_identity]+-->| ~0, ~0, ~0, ... |
+ *  \-----/ /                         | [p2m_identity]+-->| ..., ~0         |
+ *         / /---------------\        | ....          |   \-----------------/
+ *        /  | IDENTITY[@0]  |      /-+-[x], ~0, ~0.. |
+ *       /   | IDENTITY[@256]|<----/  \---------------/
+ *      /    | ~0, ~0, ....  |
+ *     |     \---------------/
+ *     |
+ *     p2m_missing             p2m_missing
+ * /------------------\     /------------\
+ * | [p2m_mid_missing]+---->| ~0, ~0, ~0 |
+ * | [p2m_mid_missing]+---->| ..., ~0    |
+ * \------------------/     \------------/
+ *
+ * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT)
  */
 
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/hash.h>
 #include <linux/sched.h>
+#include <linux/seq_file.h>
 
 #include <asm/cache.h>
 #include <asm/setup.h>
@@ -59,9 +183,15 @@ static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
 static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
 static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
 
+static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
+
 RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
 RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
 
+/* We might hit two boundary violations at the start and end, at max each
+ * boundary violation will require three middle nodes. */
+RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
+
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
        BUG_ON(pfn >= MAX_P2M_PFN);
@@ -136,7 +266,7 @@ static void p2m_init(unsigned long *p2m)
  * - After resume we're called from within stop_machine, but the mfn
  *   tree should alreay be completely allocated.
  */
-void xen_build_mfn_list_list(void)
+void __ref xen_build_mfn_list_list(void)
 {
        unsigned long pfn;
 
@@ -221,6 +351,9 @@ void __init xen_build_dynamic_phys_to_machine(void)
        p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
        p2m_top_init(p2m_top);
 
+       p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_init(p2m_identity);
+
        /*
         * The domain builder gives us a pre-constructed p2m array in
         * mfn_list for all the pages initially given to us, so we just
@@ -266,6 +399,14 @@ unsigned long get_phys_to_machine(unsigned long pfn)
        mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
 
+       /*
+        * The INVALID_P2M_ENTRY is filled in both p2m_*identity
+        * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
+        * would be wrong.
+        */
+       if (p2m_top[topidx][mididx] == p2m_identity)
+               return IDENTITY_FRAME(pfn);
+
        return p2m_top[topidx][mididx][idx];
 }
 EXPORT_SYMBOL_GPL(get_phys_to_machine);
@@ -335,9 +476,11 @@ static bool alloc_p2m(unsigned long pfn)
                        p2m_top_mfn_p[topidx] = mid_mfn;
        }
 
-       if (p2m_top[topidx][mididx] == p2m_missing) {
+       if (p2m_top[topidx][mididx] == p2m_identity ||
+           p2m_top[topidx][mididx] == p2m_missing) {
                /* p2m leaf page is missing */
                unsigned long *p2m;
+               unsigned long *p2m_orig = p2m_top[topidx][mididx];
 
                p2m = alloc_p2m_page();
                if (!p2m)
@@ -345,7 +488,7 @@ static bool alloc_p2m(unsigned long pfn)
 
                p2m_init(p2m);
 
-               if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+               if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig)
                        free_p2m_page(p2m);
                else
                        mid_mfn[mididx] = virt_to_mfn(p2m);
@@ -354,11 +497,91 @@ static bool alloc_p2m(unsigned long pfn)
        return true;
 }
 
+bool __early_alloc_p2m(unsigned long pfn)
+{
+       unsigned topidx, mididx, idx;
+
+       topidx = p2m_top_index(pfn);
+       mididx = p2m_mid_index(pfn);
+       idx = p2m_index(pfn);
+
+       /* Pfff.. No boundary cross-over, lets get out. */
+       if (!idx)
+               return false;
+
+       WARN(p2m_top[topidx][mididx] == p2m_identity,
+               "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
+               topidx, mididx);
+
+       /*
+        * Could be done by xen_build_dynamic_phys_to_machine..
+        */
+       if (p2m_top[topidx][mididx] != p2m_missing)
+               return false;
+
+       /* Boundary cross-over for the edges: */
+       if (idx) {
+               unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
+
+               p2m_init(p2m);
+
+               p2m_top[topidx][mididx] = p2m;
+
+       }
+       return idx != 0;
+}
+unsigned long set_phys_range_identity(unsigned long pfn_s,
+                                     unsigned long pfn_e)
+{
+       unsigned long pfn;
+
+       if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN))
+               return 0;
+
+       if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
+               return pfn_e - pfn_s;
+
+       if (pfn_s > pfn_e)
+               return 0;
+
+       for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1));
+               pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
+               pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
+       {
+               unsigned topidx = p2m_top_index(pfn);
+               if (p2m_top[topidx] == p2m_mid_missing) {
+                       unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+
+                       p2m_mid_init(mid);
+
+                       p2m_top[topidx] = mid;
+               }
+       }
+
+       __early_alloc_p2m(pfn_s);
+       __early_alloc_p2m(pfn_e);
+
+       for (pfn = pfn_s; pfn < pfn_e; pfn++)
+               if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
+                       break;
+
+       if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s),
+               "Identity mapping failed. We are %ld short of 1-1 mappings!\n",
+               (pfn_e - pfn_s) - (pfn - pfn_s)))
+               printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn);
+
+       return pfn - pfn_s;
+}
+
 /* Try to install p2m mapping; fail if intermediate bits missing */
 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
        unsigned topidx, mididx, idx;
 
+       if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
+               BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+               return true;
+       }
        if (unlikely(pfn >= MAX_P2M_PFN)) {
                BUG_ON(mfn != INVALID_P2M_ENTRY);
                return true;
@@ -368,6 +591,21 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
        mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
 
+       /* For sparse holes were the p2m leaf has real PFN along with
+        * PCI holes, stick in the PFN as the MFN value.
+        */
+       if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) {
+               if (p2m_top[topidx][mididx] == p2m_identity)
+                       return true;
+
+               /* Swap over from MISSING to IDENTITY if needed. */
+               if (p2m_top[topidx][mididx] == p2m_missing) {
+                       WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing,
+                               p2m_identity) != p2m_missing);
+                       return true;
+               }
+       }
+
        if (p2m_top[topidx][mididx] == p2m_missing)
                return mfn == INVALID_P2M_ENTRY;
 
@@ -378,11 +616,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 
 bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
-       if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
-               BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-               return true;
-       }
-
        if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
                if (!alloc_p2m(pfn))
                        return false;
@@ -421,7 +654,7 @@ int m2p_add_override(unsigned long mfn, struct page *page)
 {
        unsigned long flags;
        unsigned long pfn;
-       unsigned long address;
+       unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
 
@@ -455,7 +688,7 @@ int m2p_remove_override(struct page *page)
        unsigned long flags;
        unsigned long mfn;
        unsigned long pfn;
-       unsigned long address;
+       unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
 
@@ -520,3 +753,80 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
        return ret;
 }
 EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
+
+#ifdef CONFIG_XEN_DEBUG_FS
+
+int p2m_dump_show(struct seq_file *m, void *v)
+{
+       static const char * const level_name[] = { "top", "middle",
+                                               "entry", "abnormal" };
+       static const char * const type_name[] = { "identity", "missing",
+                                               "pfn", "abnormal"};
+#define TYPE_IDENTITY 0
+#define TYPE_MISSING 1
+#define TYPE_PFN 2
+#define TYPE_UNKNOWN 3
+       unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0;
+       unsigned int uninitialized_var(prev_level);
+       unsigned int uninitialized_var(prev_type);
+
+       if (!p2m_top)
+               return 0;
+
+       for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) {
+               unsigned topidx = p2m_top_index(pfn);
+               unsigned mididx = p2m_mid_index(pfn);
+               unsigned idx = p2m_index(pfn);
+               unsigned lvl, type;
+
+               lvl = 4;
+               type = TYPE_UNKNOWN;
+               if (p2m_top[topidx] == p2m_mid_missing) {
+                       lvl = 0; type = TYPE_MISSING;
+               } else if (p2m_top[topidx] == NULL) {
+                       lvl = 0; type = TYPE_UNKNOWN;
+               } else if (p2m_top[topidx][mididx] == NULL) {
+                       lvl = 1; type = TYPE_UNKNOWN;
+               } else if (p2m_top[topidx][mididx] == p2m_identity) {
+                       lvl = 1; type = TYPE_IDENTITY;
+               } else if (p2m_top[topidx][mididx] == p2m_missing) {
+                       lvl = 1; type = TYPE_MISSING;
+               } else if (p2m_top[topidx][mididx][idx] == 0) {
+                       lvl = 2; type = TYPE_UNKNOWN;
+               } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) {
+                       lvl = 2; type = TYPE_IDENTITY;
+               } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) {
+                       lvl = 2; type = TYPE_MISSING;
+               } else if (p2m_top[topidx][mididx][idx] == pfn) {
+                       lvl = 2; type = TYPE_PFN;
+               } else if (p2m_top[topidx][mididx][idx] != pfn) {
+                       lvl = 2; type = TYPE_PFN;
+               }
+               if (pfn == 0) {
+                       prev_level = lvl;
+                       prev_type = type;
+               }
+               if (pfn == MAX_DOMAIN_PAGES-1) {
+                       lvl = 3;
+                       type = TYPE_UNKNOWN;
+               }
+               if (prev_type != type) {
+                       seq_printf(m, " [0x%lx->0x%lx] %s\n",
+                               prev_pfn_type, pfn, type_name[prev_type]);
+                       prev_pfn_type = pfn;
+                       prev_type = type;
+               }
+               if (prev_level != lvl) {
+                       seq_printf(m, " [0x%lx->0x%lx] level %s\n",
+                               prev_pfn_level, pfn, level_name[prev_level]);
+                       prev_pfn_level = pfn;
+                       prev_level = lvl;
+               }
+       }
+       return 0;
+#undef TYPE_IDENTITY
+#undef TYPE_MISSING
+#undef TYPE_PFN
+#undef TYPE_UNKNOWN
+}
+#endif
index a8a66a50d446342128776bfae67b27a5b97dd6f4..fa0269a993773f8488f765c3a1e255ba59fd4e3f 100644 (file)
@@ -52,6 +52,8 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
 
 static __init void xen_add_extra_mem(unsigned long pages)
 {
+       unsigned long pfn;
+
        u64 size = (u64)pages * PAGE_SIZE;
        u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
 
@@ -66,6 +68,9 @@ static __init void xen_add_extra_mem(unsigned long pages)
        xen_extra_mem_size += size;
 
        xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
+
+       for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++)
+               __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
 }
 
 static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
@@ -104,7 +109,7 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
                WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n",
                     start, end, ret);
                if (ret == 1) {
-                       set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+                       __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
                        len++;
                }
        }
@@ -138,12 +143,55 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
        return released;
 }
 
+static unsigned long __init xen_set_identity(const struct e820entry *list,
+                                            ssize_t map_size)
+{
+       phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
+       phys_addr_t start_pci = last;
+       const struct e820entry *entry;
+       unsigned long identity = 0;
+       int i;
+
+       for (i = 0, entry = list; i < map_size; i++, entry++) {
+               phys_addr_t start = entry->addr;
+               phys_addr_t end = start + entry->size;
+
+               if (start < last)
+                       start = last;
+
+               if (end <= start)
+                       continue;
+
+               /* Skip over the 1MB region. */
+               if (last > end)
+                       continue;
+
+               if (entry->type == E820_RAM) {
+                       if (start > start_pci)
+                               identity += set_phys_range_identity(
+                                               PFN_UP(start_pci), PFN_DOWN(start));
+
+                       /* Without saving 'last' we would gooble RAM too
+                        * at the end of the loop. */
+                       last = end;
+                       start_pci = end;
+                       continue;
+               }
+               start_pci = min(start, start_pci);
+               last = end;
+       }
+       if (last > start_pci)
+               identity += set_phys_range_identity(
+                                       PFN_UP(start_pci), PFN_DOWN(last));
+       return identity;
+}
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
  **/
 char * __init xen_memory_setup(void)
 {
        static struct e820entry map[E820MAX] __initdata;
+       static struct e820entry map_raw[E820MAX] __initdata;
 
        unsigned long max_pfn = xen_start_info->nr_pages;
        unsigned long long mem_end;
@@ -151,6 +199,7 @@ char * __init xen_memory_setup(void)
        struct xen_memory_map memmap;
        unsigned long extra_pages = 0;
        unsigned long extra_limit;
+       unsigned long identity_pages = 0;
        int i;
        int op;
 
@@ -176,6 +225,7 @@ char * __init xen_memory_setup(void)
        }
        BUG_ON(rc);
 
+       memcpy(map_raw, map, sizeof(map));
        e820.nr_map = 0;
        xen_extra_mem_start = mem_end;
        for (i = 0; i < memmap.nr_entries; i++) {
@@ -194,6 +244,15 @@ char * __init xen_memory_setup(void)
                        end -= delta;
 
                        extra_pages += PFN_DOWN(delta);
+                       /*
+                        * Set RAM below 4GB that is not for us to be unusable.
+                        * This prevents "System RAM" address space from being
+                        * used as potential resource for I/O address (happens
+                        * when 'allocate_resource' is called).
+                        */
+                       if (delta &&
+                               (xen_initial_domain() && end < 0x100000000ULL))
+                               e820_add_region(end, delta, E820_UNUSABLE);
                }
 
                if (map[i].size > 0 && end > xen_extra_mem_start)
@@ -251,6 +310,13 @@ char * __init xen_memory_setup(void)
 
        xen_add_extra_mem(extra_pages);
 
+       /*
+        * Set P2M for all non-RAM pages and E820 gaps to be identity
+        * type PFNs. We supply it with the non-sanitized version
+        * of the E820.
+        */
+       identity_pages = xen_set_identity(map_raw, memmap.nr_entries);
+       printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
        return "Xen";
 }
 
index 72a4c79590459f58c6532b89dd74112654cb7608..30612441ed9991b7e34c64a97c513d65e8061826 100644 (file)
@@ -509,3 +509,41 @@ void __init xen_smp_init(void)
        xen_fill_possible_map();
        xen_init_spinlocks();
 }
+
+static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
+{
+       native_smp_prepare_cpus(max_cpus);
+       WARN_ON(xen_smp_intr_init(0));
+
+       if (!xen_have_vector_callback)
+               return;
+       xen_init_lock_cpu(0);
+       xen_init_spinlocks();
+}
+
+static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
+{
+       int rc;
+       rc = native_cpu_up(cpu);
+       WARN_ON (xen_smp_intr_init(cpu));
+       return rc;
+}
+
+static void xen_hvm_cpu_die(unsigned int cpu)
+{
+       unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
+       native_cpu_die(cpu);
+}
+
+void __init xen_hvm_smp_init(void)
+{
+       smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+       smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
+       smp_ops.cpu_up = xen_hvm_cpu_up;
+       smp_ops.cpu_die = xen_hvm_cpu_die;
+       smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
+       smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
+}
index 9bbd63a129b5869a4842238423217d620b26d79b..45329c8c226e4c4070f16a791b300265a0bf472b 100644 (file)
@@ -12,7 +12,7 @@
 #include "xen-ops.h"
 #include "mmu.h"
 
-void xen_pre_suspend(void)
+void xen_arch_pre_suspend(void)
 {
        xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
        xen_start_info->console.domU.mfn =
@@ -26,8 +26,9 @@ void xen_pre_suspend(void)
                BUG();
 }
 
-void xen_hvm_post_suspend(int suspend_cancelled)
+void xen_arch_hvm_post_suspend(int suspend_cancelled)
 {
+#ifdef CONFIG_XEN_PVHVM
        int cpu;
        xen_hvm_init_shared_info();
        xen_callback_vector();
@@ -37,9 +38,10 @@ void xen_hvm_post_suspend(int suspend_cancelled)
                        xen_setup_runstate_info(cpu);
                }
        }
+#endif
 }
 
-void xen_post_suspend(int suspend_cancelled)
+void xen_arch_post_suspend(int suspend_cancelled)
 {
        xen_build_mfn_list_list();
 
index 067759e3d6a525b53198673d6029ec4d6328cae7..2e2d370a47b1517bf1caa2503c04a96e1fa3b79a 100644 (file)
@@ -397,7 +397,9 @@ void xen_setup_timer(int cpu)
                name = "<timer kasprintf failed>";
 
        irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
-                                     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER,
+                                     IRQF_DISABLED|IRQF_PERCPU|
+                                     IRQF_NOBALANCING|IRQF_TIMER|
+                                     IRQF_FORCE_RESUME,
                                      name, NULL);
 
        evt = &per_cpu(xen_clock_events, cpu);
index 1a5ff24e29c0a645156abad8111cb99e8de2e68f..aaa7291c9259f55c0fc5ac0aab5f09ff788511a0 100644 (file)
@@ -28,9 +28,9 @@ ENTRY(startup_xen)
        __FINIT
 
 .pushsection .text
-       .align PAGE_SIZE_asm
+       .align PAGE_SIZE
 ENTRY(hypercall_page)
-       .skip PAGE_SIZE_asm
+       .skip PAGE_SIZE
 .popsection
 
        ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz "linux")
index 9d41bf985757973ece1288bd27b84d6c7af6622c..3112f55638c4ae67106c8964bfc9eb90e332145a 100644 (file)
@@ -64,10 +64,12 @@ void xen_setup_vcpu_info_placement(void);
 
 #ifdef CONFIG_SMP
 void xen_smp_init(void);
+void __init xen_hvm_smp_init(void);
 
 extern cpumask_var_t xen_cpu_initialized_map;
 #else
 static inline void xen_smp_init(void) {}
+static inline void xen_hvm_smp_init(void) {}
 #endif
 
 #ifdef CONFIG_PARAVIRT_SPINLOCKS
index e39edf5c86f2da432c181f1e503c787137e4c623..249619e7e7f2aa161060e7f9eb64686b9116e8b8 100644 (file)
 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
 #endif
 
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
-       signed long             count;
 #define RWSEM_UNLOCKED_VALUE           0x00000000
 #define RWSEM_ACTIVE_BIAS              0x00000001
 #define RWSEM_ACTIVE_MASK              0x0000ffff
 #define RWSEM_WAITING_BIAS             (-0x00010000)
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-       spinlock_t              wait_lock;
-       struct list_head        wait_list;
-};
-
-#define __RWSEM_INITIALIZER(name) \
-       { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
-         LIST_HEAD_INIT((name).wait_list) }
-
-#define DECLARE_RWSEM(name)            \
-       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
-       sem->count = RWSEM_UNLOCKED_VALUE;
-       spin_lock_init(&sem->wait_lock);
-       INIT_LIST_HEAD(&sem->wait_list);
-}
 
 /*
  * lock for reading
@@ -160,9 +128,4 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
        return atomic_add_return(delta, (atomic_t *)(&sem->count));
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-       return (sem->count != 0);
-}
-
 #endif /* _XTENSA_RWSEM_H */
index 19df764f6399ae4fd4201379301cef1c78364bee..f3e5eb43f71cc3ab2669c35d22f27a12a95c0f84 100644 (file)
@@ -96,16 +96,12 @@ again:
                update_process_times(user_mode(get_irq_regs()));
 #endif
 
-               write_seqlock(&xtime_lock);
-
-               do_timer(1); /* Linux handler in kernel/timer.c */
+               xtime_update(1); /* Linux handler in kernel/time/timekeeping */
 
                /* Note that writing CCOMPARE clears the interrupt. */
 
                next += CCOUNT_PER_JIFFY;
                set_linux_timer(next);
-
-               write_sequnlock(&xtime_lock);
        }
 
        /* Allow platform to do something useful (Wdog). */
index 2f4002f79a24b3cf242c870282d96859dc475dc9..518dd423a5fef4c4dcf1d2cdc7af78e6f8c3a778 100644 (file)
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-       __blk_run_queue(q);
+       __blk_run_queue(q, false);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
 /**
  * __blk_run_queue - run a single device queue
  * @q: The queue to run
+ * @force_kblockd: Don't run @q->request_fn directly.  Use kblockd.
  *
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
  *    held and interrupts disabled.
  *
  */
-void __blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q, bool force_kblockd)
 {
        blk_remove_plug(q);
 
@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+       if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                q->request_fn(q);
                queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else {
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       __blk_run_queue(q);
+       __blk_run_queue(q, false);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
 
        drive_stat_acct(rq, 1);
        __elv_add_request(q, rq, where, 0);
-       __blk_run_queue(q);
+       __blk_run_queue(q, false);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 }
 EXPORT_SYMBOL(kblockd_schedule_work);
 
-int kblockd_schedule_delayed_work(struct request_queue *q,
-                       struct delayed_work *dwork, unsigned long delay)
-{
-       return queue_delayed_work(kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
 int __init blk_dev_init(void)
 {
        BUILD_BUG_ON(__REQ_NR_BITS > 8 *
index 54b123d6563e6e57aecff9e92af2614fb8c3682e..b27d0208611b4d904e7fda9e4e66bd9a4c7411b4 100644 (file)
@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
 
        /*
         * Moving a request silently to empty queue_head may stall the
-        * queue.  Kick the queue in those cases.
+        * queue.  Kick the queue in those cases.  This function is called
+        * from request completion path and calling directly into
+        * request_fn may confuse the driver.  Always use kblockd.
         */
        if (was_empty && next_rq)
-               __blk_run_queue(q);
+               __blk_run_queue(q, true);
 }
 
 static void pre_flush_end_io(struct request *rq, int error)
@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
                BUG();
        }
 
-       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+       elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
        return rq;
 }
 
index 1a320d2406b01c073c56f48e8d7867dd067d1510..bd3e8df4d5e2b45a0ae89d90de025cfbaa12eed9 100644 (file)
@@ -109,7 +109,6 @@ struct bio_batch
        atomic_t                done;
        unsigned long           flags;
        struct completion       *wait;
-       bio_end_io_t            *end_io;
 };
 
 static void bio_batch_end_io(struct bio *bio, int err)
@@ -122,17 +121,14 @@ static void bio_batch_end_io(struct bio *bio, int err)
                else
                        clear_bit(BIO_UPTODATE, &bb->flags);
        }
-       if (bb) {
-               if (bb->end_io)
-                       bb->end_io(bio, err);
-               atomic_inc(&bb->done);
-               complete(bb->wait);
-       }
+       if (bb)
+               if (atomic_dec_and_test(&bb->done))
+                       complete(bb->wait);
        bio_put(bio);
 }
 
 /**
- * blkdev_issue_zeroout generate number of zero filed write bios
+ * blkdev_issue_zeroout generate number of zero filed write bios
  * @bdev:      blockdev to issue
  * @sector:    start sector
  * @nr_sects:  number of sectors to write
@@ -150,13 +146,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        int ret;
        struct bio *bio;
        struct bio_batch bb;
-       unsigned int sz, issued = 0;
+       unsigned int sz;
        DECLARE_COMPLETION_ONSTACK(wait);
 
-       atomic_set(&bb.done, 0);
+       atomic_set(&bb.done, 1);
        bb.flags = 1 << BIO_UPTODATE;
        bb.wait = &wait;
-       bb.end_io = NULL;
 
 submit:
        ret = 0;
@@ -185,12 +180,12 @@ submit:
                                break;
                }
                ret = 0;
-               issued++;
+               atomic_inc(&bb.done);
                submit_bio(WRITE, bio);
        }
 
        /* Wait for bios in-flight */
-       while (issued != atomic_read(&bb.done))
+       if (!atomic_dec_and_test(&bb.done))
                wait_for_completion(&wait);
 
        if (!test_bit(BIO_UPTODATE, &bb.flags))
index a89043a3caa416bd59f9a24486698e8d5ce30e1c..e36cc10a346c83bfd233a0b71421486180518e2f 100644 (file)
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
 /* Throttling is performed over 100ms slice and after that slice is renewed */
 static unsigned long throtl_slice = HZ/10;     /* 100 ms */
 
+/* A workqueue to queue throttle related work */
+static struct workqueue_struct *kthrotld_workqueue;
+static void throtl_schedule_delayed_work(struct throtl_data *td,
+                               unsigned long delay);
+
 struct throtl_rb_root {
        struct rb_root rb;
        struct rb_node *left;
@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
        update_min_dispatch_time(st);
 
        if (time_before_eq(st->min_disptime, jiffies))
-               throtl_schedule_delayed_work(td->queue, 0);
+               throtl_schedule_delayed_work(td, 0);
        else
-               throtl_schedule_delayed_work(td->queue,
-                               (st->min_disptime - jiffies));
+               throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
 }
 
 static inline void
@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
 }
 
 /* Call with queue lock held */
-void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
+static void
+throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
 {
 
-       struct throtl_data *td = q->td;
        struct delayed_work *dwork = &td->throtl_work;
 
        if (total_nr_queued(td) > 0) {
@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
                 * Cancel that and schedule a new one.
                 */
                __cancel_delayed_work(dwork);
-               kblockd_schedule_delayed_work(q, dwork, delay);
+               queue_delayed_work(kthrotld_workqueue, dwork, delay);
                throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
                                delay, jiffies);
        }
 }
-EXPORT_SYMBOL(throtl_schedule_delayed_work);
 
 static void
 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
        smp_mb__after_atomic_inc();
 
        /* Schedule a work now to process the limit change */
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_write_bps(void *key,
@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
        smp_mb__before_atomic_inc();
        atomic_inc(&td->limits_changed);
        smp_mb__after_atomic_inc();
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_read_iops(void *key,
@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
        smp_mb__before_atomic_inc();
        atomic_inc(&td->limits_changed);
        smp_mb__after_atomic_inc();
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_write_iops(void *key,
@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
        smp_mb__before_atomic_inc();
        atomic_inc(&td->limits_changed);
        smp_mb__after_atomic_inc();
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
 
 static int __init throtl_init(void)
 {
+       kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
+       if (!kthrotld_workqueue)
+               panic("Failed to create kthrotld\n");
+
        blkio_policy_register(&blkio_policy_throtl);
        return 0;
 }
index 7be4c79596250d28cb82afa9e1aef085abc6a66b..ea83a4f0c27dfda658ee41d87979a2d57c58450d 100644 (file)
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                            cfqd->busy_queues > 1) {
                                cfq_del_timer(cfqd, cfqq);
                                cfq_clear_cfqq_wait_request(cfqq);
-                               __blk_run_queue(cfqd->queue);
+                               __blk_run_queue(cfqd->queue, false);
                        } else {
                                cfq_blkiocg_update_idle_time_stats(
                                                &cfqq->cfqg->blkg);
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               __blk_run_queue(cfqd->queue);
+               __blk_run_queue(cfqd->queue, false);
        }
 }
 
@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(cfqd->queue);
+       __blk_run_queue(cfqd->queue, false);
        spin_unlock_irq(q->queue_lock);
 }
 
index 2569512830d3e65a8a73213879b591917bdd91c4..236e93c1f46ce54d0f2ac40c48a85289f1d568fb 100644 (file)
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
         */
        elv_drain_elevator(q);
        while (q->rq.elvpriv) {
-               __blk_run_queue(q);
+               __blk_run_queue(q, false);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
                 *   with anything.  There's no point in delaying queue
                 *   processing.
                 */
-               __blk_run_queue(q);
+               __blk_run_queue(q, false);
                break;
 
        case ELEVATOR_INSERT_SORT:
index 6a5b772aa2016f52f986a2cca396daf94ad0c7c4..cbf1112a885c0c715e4d38a3dfad91f74cd1f1fa 100644 (file)
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
        struct block_device *bdev = bdget_disk(disk, partno);
        if (bdev) {
                fsync_bdev(bdev);
-               res = __invalidate_device(bdev);
+               res = __invalidate_device(bdev, true);
                bdput(bdev);
        }
        return res;
index 9049d460fa890fd45e3e31c1aa8ca9ed080e9f81..1124cd297263571d63371ca8ed33f8c361b74e06 100644 (file)
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                        return -EINVAL;
                if (get_user(n, (int __user *) arg))
                        return -EFAULT;
-               if (!(mode & FMODE_EXCL) &&
-                   blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
-                       return -EBUSY;
+               if (!(mode & FMODE_EXCL)) {
+                       bdgrab(bdev);
+                       if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+                               return -EBUSY;
+               }
                ret = set_blocksize(bdev, n);
                if (!(mode & FMODE_EXCL))
                        blkdev_put(bdev, mode | FMODE_EXCL);
index 54784bb42ceca0386be233e0f2653867d5c7bf90..edc25867ad9d92ca4a23ef6fc7aa9c5f66c5862a 100644 (file)
@@ -416,10 +416,15 @@ struct acpi_gpe_handler_info {
        u8 originally_enabled;  /* True if GPE was originally enabled */
 };
 
+struct acpi_gpe_notify_object {
+       struct acpi_namespace_node *node;
+       struct acpi_gpe_notify_object *next;
+};
+
 union acpi_gpe_dispatch_info {
        struct acpi_namespace_node *method_node;        /* Method node for this GPE level */
        struct acpi_gpe_handler_info *handler;  /* Installed GPE handler */
-       struct acpi_namespace_node *device_node;        /* Parent _PRW device for implicit notify */
+       struct acpi_gpe_notify_object device;   /* List of _PRW devices for implicit notify */
 };
 
 /*
index 14988a86066fb70eaf9985cc49fbe1ff5b99ab4c..f4725212eb488fd1b37a3c2cc21735dbeb2282ad 100644 (file)
@@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
        acpi_status status;
        struct acpi_gpe_event_info *local_gpe_event_info;
        struct acpi_evaluate_info *info;
+       struct acpi_gpe_notify_object *notify_object;
 
        ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
 
@@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
                 * from this thread -- because handlers may in turn run other
                 * control methods.
                 */
-               status =
-                   acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
-                                                device_node,
-                                                ACPI_NOTIFY_DEVICE_WAKE);
+               status = acpi_ev_queue_notify_request(
+                               local_gpe_event_info->dispatch.device.node,
+                               ACPI_NOTIFY_DEVICE_WAKE);
+
+               notify_object = local_gpe_event_info->dispatch.device.next;
+               while (ACPI_SUCCESS(status) && notify_object) {
+                       status = acpi_ev_queue_notify_request(
+                                       notify_object->node,
+                                       ACPI_NOTIFY_DEVICE_WAKE);
+                       notify_object = notify_object->next;
+               }
+
                break;
 
        case ACPI_GPE_DISPATCH_METHOD:
index 3b20a3401b641d0a8039f5ecff7cd55f9d1cfc01..52aaff3df562b94ffbd73206ae0cf363001e42bd 100644 (file)
@@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
        acpi_status status = AE_BAD_PARAMETER;
        struct acpi_gpe_event_info *gpe_event_info;
        struct acpi_namespace_node *device_node;
+       struct acpi_gpe_notify_object *notify_object;
        acpi_cpu_flags flags;
+       u8 gpe_dispatch_mask;
 
        ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
 
@@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
                goto unlock_and_exit;
        }
 
+       if (wake_device == ACPI_ROOT_OBJECT) {
+               goto out;
+       }
+
        /*
         * If there is no method or handler for this GPE, then the
         * wake_device will be notified whenever this GPE fires (aka
         * "implicit notify") Note: The GPE is assumed to be
         * level-triggered (for windows compatibility).
         */
-       if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-             ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) {
+       gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
+       if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
+           && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
+               goto out;
+       }
 
-               /* Validate wake_device is of type Device */
+       /* Validate wake_device is of type Device */
 
-               device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
-                                           wake_device);
-               if (device_node->type != ACPI_TYPE_DEVICE) {
-                       goto unlock_and_exit;
-               }
+       device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+       if (device_node->type != ACPI_TYPE_DEVICE) {
+               goto unlock_and_exit;
+       }
+
+       if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
                gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
                                         ACPI_GPE_LEVEL_TRIGGERED);
-               gpe_event_info->dispatch.device_node = device_node;
+               gpe_event_info->dispatch.device.node = device_node;
+               gpe_event_info->dispatch.device.next = NULL;
+       } else {
+               /* There are multiple devices to notify implicitly. */
+
+               notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
+               if (!notify_object) {
+                       status = AE_NO_MEMORY;
+                       goto unlock_and_exit;
+               }
+
+               notify_object->node = device_node;
+               notify_object->next = gpe_event_info->dispatch.device.next;
+               gpe_event_info->dispatch.device.next = notify_object;
        }
 
+ out:
        gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
        status = AE_OK;
 
index 5df67f1d6c612537f10ac2fd84b6720911d16723..384f7abcff77984fb67c21c34b462761bc8f611e 100644 (file)
@@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
                        size_t count, loff_t *ppos)
 {
        static char *buf;
-       static int uncopied_bytes;
+       static u32 max_size;
+       static u32 uncopied_bytes;
+
        struct acpi_table_header table;
        acpi_status status;
 
@@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
                if (copy_from_user(&table, user_buf,
                                   sizeof(struct acpi_table_header)))
                        return -EFAULT;
-               uncopied_bytes = table.length;
-               buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+               uncopied_bytes = max_size = table.length;
+               buf = kzalloc(max_size, GFP_KERNEL);
                if (!buf)
                        return -ENOMEM;
        }
 
-       if (uncopied_bytes < count) {
-               kfree(buf);
+       if (buf == NULL)
+               return -EINVAL;
+
+       if ((*ppos > max_size) ||
+           (*ppos + count > max_size) ||
+           (*ppos + count < count) ||
+           (count > uncopied_bytes))
                return -EINVAL;
-       }
 
        if (copy_from_user(buf + (*ppos), user_buf, count)) {
                kfree(buf);
+               buf = NULL;
                return -EFAULT;
        }
 
@@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
        if (!uncopied_bytes) {
                status = acpi_install_method(buf);
                kfree(buf);
+               buf = NULL;
                if (ACPI_FAILURE(status))
                        return -EINVAL;
                add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
index 5eb25eb3ea4818a9aead010dfc0ffeac9c073fa9..3b5c3189fd995e4cc200c532b2d05ba6b274fac7 100644 (file)
@@ -274,7 +274,7 @@ acpi_table_parse_srat(enum acpi_srat_type id,
 
 int __init acpi_numa_init(void)
 {
-       int ret = 0;
+       int cnt = 0;
 
        /*
         * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
@@ -288,7 +288,7 @@ int __init acpi_numa_init(void)
                                     acpi_parse_x2apic_affinity, 0);
                acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
                                     acpi_parse_processor_affinity, 0);
-               ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+               cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
                                            acpi_parse_memory_affinity,
                                            NR_NODE_MEMBLKS);
        }
@@ -297,7 +297,10 @@ int __init acpi_numa_init(void)
        acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
 
        acpi_numa_arch_fixup();
-       return ret;
+
+       if (cnt <= 0)
+               return cnt ?: -ENOENT;
+       return 0;
 }
 
 int acpi_get_pxm(acpi_handle h)
index b9ba04fc2b34eb0845cf0c724b64151e53b05c65..77fc76f8aea91b79be3f8cfc6c640e3e23748de0 100644 (file)
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
                        struct block_device *bdev = opened_bdev[cnt];
                        if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
                                continue;
-                       __invalidate_device(bdev);
+                       __invalidate_device(bdev, true);
                }
                mutex_unlock(&open_lock);
        } else {
index 49e6a545eb63fde25bc1984e3b2c165432f984b9..dbf31ec9114db6a23c270be8d49e3836cbc2684a 100644 (file)
@@ -78,7 +78,6 @@
 
 #include <asm/uaccess.h>
 
-static DEFINE_MUTEX(loop_mutex);
 static LIST_HEAD(loop_devices);
 static DEFINE_MUTEX(loop_devices_mutex);
 
@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
 {
        struct loop_device *lo = bdev->bd_disk->private_data;
 
-       mutex_lock(&loop_mutex);
        mutex_lock(&lo->lo_ctl_mutex);
        lo->lo_refcnt++;
        mutex_unlock(&lo->lo_ctl_mutex);
-       mutex_unlock(&loop_mutex);
 
        return 0;
 }
@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
        struct loop_device *lo = disk->private_data;
        int err;
 
-       mutex_lock(&loop_mutex);
        mutex_lock(&lo->lo_ctl_mutex);
 
        if (--lo->lo_refcnt)
@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
 out:
        mutex_unlock(&lo->lo_ctl_mutex);
 out_unlocked:
-       mutex_unlock(&loop_mutex);
        return 0;
 }
 
index d7aa39e349a617ac26bb253f5b7e329bead28656..9cb8668ff5f412908e99dadcafd6d025904a2cdc 100644 (file)
@@ -120,6 +120,10 @@ static DEFINE_SPINLOCK(minor_lock);
 #define EXTENDED (1<<EXT_SHIFT)
 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
+#define EMULATED_HD_DISK_MINOR_OFFSET (0)
+#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
+#define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16))
+#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4)
 
 #define DEV_NAME       "xvd"   /* name in /dev */
 
@@ -281,7 +285,7 @@ static int blkif_queue_request(struct request *req)
        info->shadow[id].request = req;
 
        ring_req->id = id;
-       ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
+       ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
        ring_req->handle = info->handle;
 
        ring_req->operation = rq_data_dir(req) ?
@@ -317,7 +321,7 @@ static int blkif_queue_request(struct request *req)
                                rq_data_dir(req) );
 
                info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
-               ring_req->seg[i] =
+               ring_req->u.rw.seg[i] =
                                (struct blkif_request_segment) {
                                        .gref       = ref,
                                        .first_sect = fsect,
@@ -434,6 +438,65 @@ static void xlvbd_flush(struct blkfront_info *info)
               info->feature_flush ? "enabled" : "disabled");
 }
 
+static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
+{
+       int major;
+       major = BLKIF_MAJOR(vdevice);
+       *minor = BLKIF_MINOR(vdevice);
+       switch (major) {
+               case XEN_IDE0_MAJOR:
+                       *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
+                       *minor = ((*minor / 64) * PARTS_PER_DISK) +
+                               EMULATED_HD_DISK_MINOR_OFFSET;
+                       break;
+               case XEN_IDE1_MAJOR:
+                       *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
+                       *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
+                               EMULATED_HD_DISK_MINOR_OFFSET;
+                       break;
+               case XEN_SCSI_DISK0_MAJOR:
+                       *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
+                       *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
+                       break;
+               case XEN_SCSI_DISK1_MAJOR:
+               case XEN_SCSI_DISK2_MAJOR:
+               case XEN_SCSI_DISK3_MAJOR:
+               case XEN_SCSI_DISK4_MAJOR:
+               case XEN_SCSI_DISK5_MAJOR:
+               case XEN_SCSI_DISK6_MAJOR:
+               case XEN_SCSI_DISK7_MAJOR:
+                       *offset = (*minor / PARTS_PER_DISK) + 
+                               ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
+                               EMULATED_SD_DISK_NAME_OFFSET;
+                       *minor = *minor +
+                               ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
+                               EMULATED_SD_DISK_MINOR_OFFSET;
+                       break;
+               case XEN_SCSI_DISK8_MAJOR:
+               case XEN_SCSI_DISK9_MAJOR:
+               case XEN_SCSI_DISK10_MAJOR:
+               case XEN_SCSI_DISK11_MAJOR:
+               case XEN_SCSI_DISK12_MAJOR:
+               case XEN_SCSI_DISK13_MAJOR:
+               case XEN_SCSI_DISK14_MAJOR:
+               case XEN_SCSI_DISK15_MAJOR:
+                       *offset = (*minor / PARTS_PER_DISK) + 
+                               ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
+                               EMULATED_SD_DISK_NAME_OFFSET;
+                       *minor = *minor +
+                               ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
+                               EMULATED_SD_DISK_MINOR_OFFSET;
+                       break;
+               case XENVBD_MAJOR:
+                       *offset = *minor / PARTS_PER_DISK;
+                       break;
+               default:
+                       printk(KERN_WARNING "blkfront: your disk configuration is "
+                                       "incorrect, please use an xvd device instead\n");
+                       return -ENODEV;
+       }
+       return 0;
+}
 
 static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
                               struct blkfront_info *info,
@@ -441,7 +504,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
 {
        struct gendisk *gd;
        int nr_minors = 1;
-       int err = -ENODEV;
+       int err;
        unsigned int offset;
        int minor;
        int nr_parts;
@@ -456,12 +519,20 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
        }
 
        if (!VDEV_IS_EXTENDED(info->vdevice)) {
-               minor = BLKIF_MINOR(info->vdevice);
-               nr_parts = PARTS_PER_DISK;
+               err = xen_translate_vdev(info->vdevice, &minor, &offset);
+               if (err)
+                       return err;             
+               nr_parts = PARTS_PER_DISK;
        } else {
                minor = BLKIF_MINOR_EXT(info->vdevice);
                nr_parts = PARTS_PER_EXT_DISK;
+               offset = minor / nr_parts;
+               if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4)
+                       printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
+                                       "emulated IDE disks,\n\t choose an xvd device name"
+                                       "from xvde on\n", info->vdevice);
        }
+       err = -ENODEV;
 
        if ((minor % nr_parts) == 0)
                nr_minors = nr_parts;
@@ -475,8 +546,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
        if (gd == NULL)
                goto release;
 
-       offset = minor / nr_parts;
-
        if (nr_minors > 1) {
                if (offset < 26)
                        sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
@@ -615,7 +684,7 @@ static void blkif_completion(struct blk_shadow *s)
 {
        int i;
        for (i = 0; i < s->req.nr_segments; i++)
-               gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
+               gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
 }
 
 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -932,7 +1001,7 @@ static int blkif_recover(struct blkfront_info *info)
                /* Rewrite any grant references invalidated by susp/resume. */
                for (j = 0; j < req->nr_segments; j++)
                        gnttab_grant_foreign_access_ref(
-                               req->seg[j].gref,
+                               req->u.rw.seg[j].gref,
                                info->xbdev->otherend_id,
                                pfn_to_mfn(info->shadow[req->id].frame[j]),
                                rq_data_dir(info->shadow[req->id].request));
index 333c21289d97f4ba6224abfa5cabbdb2d8ccde6f..6dcd55a74c0abbdc6a7d00e2b9896b1cb9acc4b9 100644 (file)
@@ -41,6 +41,9 @@ static struct usb_device_id ath3k_table[] = {
 
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03F0, 0x311D) },
+
+       /* Atheros AR5BBU12 with sflash firmware */
+       { USB_DEVICE(0x0489, 0xE02C) },
        { }     /* Terminating entry */
 };
 
index 4cefa91e6c34782bf57ef4769c747f94a23e8696..700a3840fddc2e8dec17e63f7910b128463c9225 100644 (file)
@@ -105,6 +105,9 @@ static struct usb_device_id blacklist_table[] = {
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
 
+       /* Atheros AR5BBU12 with sflash firmware */
+       { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
        /* Broadcom BCM2035 */
        { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
        { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -829,7 +832,7 @@ static void btusb_work(struct work_struct *work)
 
        if (hdev->conn_hash.sco_num > 0) {
                if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
-                       err = usb_autopm_get_interface(data->isoc);
+                       err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
                        if (err < 0) {
                                clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
                                usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -858,7 +861,7 @@ static void btusb_work(struct work_struct *work)
 
                __set_isoc_interface(hdev, 0);
                if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
-                       usb_autopm_put_interface(data->isoc);
+                       usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
        }
 }
 
@@ -1041,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf,
 
        usb_set_intfdata(intf, data);
 
-       usb_enable_autosuspend(interface_to_usbdev(intf));
-
        return 0;
 }
 
index 9252e85706ef2ce54728a0c8bc366c80c793033c..780498d765811db559d8bae1108357704490bf40 100644 (file)
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void)
 #else
                        printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
 #endif
+                       pci_unregister_driver(&agp_amd64_pci_driver);
                        return -ENODEV;
                }
 
                /* First check that we have at least one AMD64 NB */
-               if (!pci_dev_present(amd_nb_misc_ids))
+               if (!pci_dev_present(amd_nb_misc_ids)) {
+                       pci_unregister_driver(&agp_amd64_pci_driver);
                        return -ENODEV;
+               }
 
                /* Look for any AGP bridge */
                agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
                err = driver_attach(&agp_amd64_pci_driver.driver);
-               if (err == 0 && agp_bridges_found == 0)
+               if (err == 0 && agp_bridges_found == 0) {
+                       pci_unregister_driver(&agp_amd64_pci_driver);
                        err = -ENODEV;
+               }
        }
        return err;
 }
index c195bfeade117125eec0e2dbf1c6e550f4ec87f9..5feebe2800e9a5147fa04e951ff40dfd62e1688d 100644 (file)
 #define INTEL_GMCH_GMS_STOLEN_352M     (0xd << 4)
 
 #define I915_IFPADDR    0x60
+#define I830_HIC        0x70
 
 /* Intel 965G registers */
 #define I965_MSAC 0x62
index fab3d3265adbec77ba71096587d924e9858d59f7..0d09b537bb9a5cbd52691874b7b8a77534ee1720 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/pagemap.h>
 #include <linux/agp_backend.h>
+#include <linux/delay.h>
 #include <asm/smp.h>
 #include "agp.h"
 #include "intel-agp.h"
@@ -70,12 +71,8 @@ static struct _intel_private {
        u32 __iomem *gtt;               /* I915G */
        bool clear_fake_agp; /* on first access via agp, fill with scratch */
        int num_dcache_entries;
-       union {
-               void __iomem *i9xx_flush_page;
-               void *i8xx_flush_page;
-       };
+       void __iomem *i9xx_flush_page;
        char *i81x_gtt_table;
-       struct page *i8xx_page;
        struct resource ifp_resource;
        int resource_valid;
        struct page *scratch_page;
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
 
 static void i830_cleanup(void)
 {
-       if (intel_private.i8xx_flush_page) {
-               kunmap(intel_private.i8xx_flush_page);
-               intel_private.i8xx_flush_page = NULL;
-       }
-
-       __free_page(intel_private.i8xx_page);
-       intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
-       /* return if we've already set the flush mechanism up */
-       if (intel_private.i8xx_page)
-               return;
-
-       intel_private.i8xx_page = alloc_page(GFP_KERNEL);
-       if (!intel_private.i8xx_page)
-               return;
-
-       intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
-       if (!intel_private.i8xx_flush_page)
-               i830_cleanup();
 }
 
 /* The chipset_flush interface needs to get data that has already been
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
  */
 static void i830_chipset_flush(void)
 {
-       unsigned int *pg = intel_private.i8xx_flush_page;
+       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+       /* Forcibly evict everything from the CPU write buffers.
+        * clflush appears to be insufficient.
+        */
+       wbinvd_on_all_cpus();
+
+       /* Now we've only seen documents for this magic bit on 855GM,
+        * we hope it exists for the other gen2 chipsets...
+        *
+        * Also works as advertised on my 845G.
+        */
+       writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+              intel_private.registers+I830_HIC);
 
-       memset(pg, 0, 1024);
+       while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+               if (time_after(jiffies, timeout))
+                       break;
 
-       if (cpu_has_clflush)
-               clflush_cache_range(pg, 1024);
-       else if (wbinvd_on_all_cpus() != 0)
-               printk(KERN_ERR "Timed out waiting for cache flush.\n");
+               udelay(50);
+       }
 }
 
 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -849,8 +837,6 @@ static int i830_setup(void)
 
        intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
 
-       intel_i830_setup_flush();
-
        return 0;
 }
 
index 7855f9f45b8ec9418bfdda34d304aaae5c3bb670..62787e30d508c2e63fe8f7923fbc45a63493a545 100644 (file)
@@ -900,6 +900,14 @@ static void sender(void                *send_info,
        printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 
+       /*
+        * last_timeout_jiffies is updated here to avoid
+        * smi_timeout() handler passing very large time_diff
+        * value to smi_event_handler() that causes
+        * the send command to abort.
+        */
+       smi_info->last_timeout_jiffies = jiffies;
+
        mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
 
        if (smi_info->thread)
index e6d75627c6c815a794b54924e5f03c96feb7625e..33dc2298af73a9118649208df8e75a79bef1bd15 100644 (file)
@@ -53,6 +53,8 @@ MODULE_LICENSE("GPL");
 
 #define RTC_BITS 55 /* 55 bits for this implementation */
 
+static struct k_clock sgi_clock;
+
 extern unsigned long sn_rtc_cycles_per_second;
 
 #define RTC_COUNTER_ADDR        ((long *)LOCAL_MMR_ADDR(SH_RTC))
@@ -487,7 +489,7 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
        return 0;
 };
 
-static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
+static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp)
 {
 
        u64 nsec;
@@ -763,15 +765,21 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
        return err;
 }
 
+static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp)
+{
+       tp->tv_sec = 0;
+       tp->tv_nsec = sgi_clock_period;
+       return 0;
+}
+
 static struct k_clock sgi_clock = {
-       .res = 0,
-       .clock_set = sgi_clock_set,
-       .clock_get = sgi_clock_get,
-       .timer_create = sgi_timer_create,
-       .nsleep = do_posix_clock_nonanosleep,
-       .timer_set = sgi_timer_set,
-       .timer_del = sgi_timer_del,
-       .timer_get = sgi_timer_get
+       .clock_set      = sgi_clock_set,
+       .clock_get      = sgi_clock_get,
+       .clock_getres   = sgi_clock_getres,
+       .timer_create   = sgi_timer_create,
+       .timer_set      = sgi_timer_set,
+       .timer_del      = sgi_timer_del,
+       .timer_get      = sgi_timer_get
 };
 
 /**
@@ -831,8 +839,8 @@ static int __init mmtimer_init(void)
                        (unsigned long) node);
        }
 
-       sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second;
-       register_posix_clock(CLOCK_SGI_CYCLE, &sgi_clock);
+       sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second;
+       posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock);
 
        printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION,
               sn_rtc_cycles_per_second/(unsigned long)1E6);
index 777181a2e603592361b32bfea492429b506b64e6..bcbbc71febb78f2b9801c4a89f6e5f9989167a3a 100644 (file)
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p)
                            test_bit(IS_ANY_T1, &dev->flags))) {
                                DEBUGP(4, dev, "Perform AUTOPPS\n");
                                set_bit(IS_AUTOPPS_ACT, &dev->flags);
-                               ptsreq.protocol = ptsreq.protocol =
-                                   (0x01 << dev->proto);
+                               ptsreq.protocol = (0x01 << dev->proto);
                                ptsreq.flags = 0x01;
                                ptsreq.pts1 = 0x00;
                                ptsreq.pts2 = 0x00;
index 94b8eb4d691d6b95fd0f32ffeea780dc28a6121c..444155a305ae1a3ad26d35a19b79ff83643167d6 100644 (file)
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data)
 static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 {
        struct ipw_dev *ipw = priv_data;
-       struct resource *io_resource;
        int ret;
 
        p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
        if (ret)
                return ret;
 
-       io_resource = request_region(p_dev->resource[0]->start,
-                               resource_size(p_dev->resource[0]),
-                               IPWIRELESS_PCCARD_NAME);
+       if (!request_region(p_dev->resource[0]->start,
+                           resource_size(p_dev->resource[0]),
+                           IPWIRELESS_PCCARD_NAME)) {
+               ret = -EBUSY;
+               goto exit;
+       }
 
        p_dev->resource[2]->flags |=
                WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 
        ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
        if (ret != 0)
-               goto exit2;
+               goto exit1;
 
        ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
 
-       ipw->attr_memory = ioremap(p_dev->resource[2]->start,
+       ipw->common_memory = ioremap(p_dev->resource[2]->start,
                                resource_size(p_dev->resource[2]));
-       request_mem_region(p_dev->resource[2]->start,
-                       resource_size(p_dev->resource[2]),
-                       IPWIRELESS_PCCARD_NAME);
+       if (!request_mem_region(p_dev->resource[2]->start,
+                               resource_size(p_dev->resource[2]),
+                               IPWIRELESS_PCCARD_NAME)) {
+               ret = -EBUSY;
+               goto exit2;
+       }
 
        p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
                                        WIN_ENABLE;
        p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
        ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
        if (ret != 0)
-               goto exit2;
+               goto exit3;
 
        ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
        if (ret != 0)
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 
        ipw->attr_memory = ioremap(p_dev->resource[3]->start,
                                resource_size(p_dev->resource[3]));
-       request_mem_region(p_dev->resource[3]->start,
-                       resource_size(p_dev->resource[3]),
-                       IPWIRELESS_PCCARD_NAME);
+       if (!request_mem_region(p_dev->resource[3]->start,
+                               resource_size(p_dev->resource[3]),
+                               IPWIRELESS_PCCARD_NAME)) {
+               ret = -EBUSY;
+               goto exit4;
+       }
 
        return 0;
 
+exit4:
+       iounmap(ipw->attr_memory);
 exit3:
+       release_mem_region(p_dev->resource[2]->start,
+                       resource_size(p_dev->resource[2]));
 exit2:
-       if (ipw->common_memory) {
-               release_mem_region(p_dev->resource[2]->start,
-                               resource_size(p_dev->resource[2]));
-               iounmap(ipw->common_memory);
-       }
+       iounmap(ipw->common_memory);
 exit1:
-       release_resource(io_resource);
+       release_region(p_dev->resource[0]->start,
+                      resource_size(p_dev->resource[0]));
+exit:
        pcmcia_disable_device(p_dev);
-       return -1;
+       return ret;
 }
 
 static int config_ipwireless(struct ipw_dev *ipw)
@@ -219,6 +229,8 @@ exit:
 
 static void release_ipwireless(struct ipw_dev *ipw)
 {
+       release_region(ipw->link->resource[0]->start,
+                      resource_size(ipw->link->resource[0]));
        if (ipw->common_memory) {
                release_mem_region(ipw->link->resource[2]->start,
                                resource_size(ipw->link->resource[2]));
index 36e0fa161c2bf0b46206a6e0af00698935ad158b..1f46f1cd9225c3c0d2a894bc80d3d1e74fe012e7 100644 (file)
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
                    tpm_protected_ordinal_duration[ordinal &
                                                   TPM_PROTECTED_ORDINAL_MASK];
 
-       if (duration_idx != TPM_UNDEFINED) {
+       if (duration_idx != TPM_UNDEFINED)
                duration = chip->vendor.duration[duration_idx];
-               /* if duration is 0, it's because chip->vendor.duration wasn't */
-               /* filled yet, so we set the lowest timeout just to give enough */
-               /* time for tpm_get_timeouts() to succeed */
-               return (duration <= 0 ? HZ : duration);
-       } else
+       if (duration <= 0)
                return 2 * 60 * HZ;
+       else
+               return duration;
 }
 EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
 
index 490393186338dae5b8a7a7804923b801a381263a..84b164d1eb2b16d29db4d87ed6a7d745fa1938bc 100644 (file)
@@ -388,6 +388,10 @@ static void discard_port_data(struct port *port)
        unsigned int len;
        int ret;
 
+       if (!port->portdev) {
+               /* Device has been unplugged.  vqs are already gone. */
+               return;
+       }
        vq = port->in_vq;
        if (port->inbuf)
                buf = port->inbuf;
@@ -470,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port)
        void *buf;
        unsigned int len;
 
+       if (!port->portdev) {
+               /* Device has been unplugged.  vqs are already gone. */
+               return;
+       }
        while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
                kfree(buf);
                port->outvq_full = false;
index 1109f6848a43940b8e8ed738f891f6560f17f7a8..5cb4d09919d67f83d99f8bfb837e64c8a660bb46 100644 (file)
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 
        ret = sysdev_driver_register(&cpu_sysdev_class,
                                        &cpufreq_sysdev_driver);
+       if (ret)
+               goto err_null_driver;
 
-       if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+       if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
                int i;
                ret = -ENODEV;
 
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
                if (ret) {
                        dprintk("no CPU initialized for driver %s\n",
                                                        driver_data->name);
-                       sysdev_driver_unregister(&cpu_sysdev_class,
-                                               &cpufreq_sysdev_driver);
-
-                       spin_lock_irqsave(&cpufreq_driver_lock, flags);
-                       cpufreq_driver = NULL;
-                       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+                       goto err_sysdev_unreg;
                }
        }
 
-       if (!ret) {
-               register_hotcpu_notifier(&cpufreq_cpu_notifier);
-               dprintk("driver %s up and running\n", driver_data->name);
-               cpufreq_debug_enable_ratelimit();
-       }
+       register_hotcpu_notifier(&cpufreq_cpu_notifier);
+       dprintk("driver %s up and running\n", driver_data->name);
+       cpufreq_debug_enable_ratelimit();
 
+       return 0;
+err_sysdev_unreg:
+       sysdev_driver_unregister(&cpu_sysdev_class,
+                       &cpufreq_sysdev_driver);
+err_null_driver:
+       spin_lock_irqsave(&cpufreq_driver_lock, flags);
+       cpufreq_driver = NULL;
+       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
index cead8e6ff345f0a0563bc9d8ecf5d97a9ee99980..7f6f01a4b145ad00bc9c2b1fac7c704d67e145a4 100644 (file)
@@ -326,6 +326,7 @@ static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = {
        { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
        { 0, }
 };
+MODULE_DEVICE_TABLE(pci, ioh_gpio_pcidev_id);
 
 static struct pci_driver ioh_gpio_driver = {
        .name = "ml_ioh_gpio",
index 0eba0a75c804d8b81c66e1a8920c71cb67b6365c..2c6af87051030b1017cc5670fcbfd2bfddad61fa 100644 (file)
@@ -286,6 +286,7 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
        { 0, }
 };
+MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
 
 static struct pci_driver pch_gpio_driver = {
        .name = "pch_gpio",
index 6977a1ce9d98171b9f5e43cc0127e13d87ea3b5b..f73ef4390db615fc499742af6b29f3aed6dae833 100644 (file)
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
        struct drm_crtc_helper_funcs *crtc_funcs;
        u16 *red, *green, *blue, *transp;
        struct drm_crtc *crtc;
-       int i, rc = 0;
+       int i, j, rc = 0;
        int start;
 
        for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
                transp = cmap->transp;
                start = cmap->start;
 
-               for (i = 0; i < cmap->len; i++) {
+               for (j = 0; j < cmap->len; j++) {
                        u16 hred, hgreen, hblue, htransp = 0xffff;
 
                        hred = *red++;
index 3dadfa2a85289105abf34acd11399f2c7bb94869..28d1d3c24d65eba69a7573d8818ee9e0225ed665 100644 (file)
@@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
         * available. In that case we can't account for this and just
         * hope for the best.
         */
-       if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+       if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
                atomic_inc(&dev->_vblank_count[crtc]);
+               smp_mb__after_atomic_inc();
+       }
 
        /* Invalidate all timestamps while vblank irq's are off. */
        clear_vblank_timestamps(dev, crtc);
@@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
        /* Dot clock in Hz: */
        dotclock = (u64) crtc->hwmode.clock * 1000;
 
+       /* Fields of interlaced scanout modes are only halve a frame duration.
+        * Double the dotclock to get halve the frame-/line-/pixelduration.
+        */
+       if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+               dotclock *= 2;
+
        /* Valid dotclock? */
        if (dotclock > 0) {
                /* Convert scanline length in pixels and video dot clock to
@@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                return -EAGAIN;
        }
 
-       /* Don't know yet how to handle interlaced or
-        * double scan modes. Just no-op for now.
-        */
-       if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
-               DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
-               return -ENOTSUPP;
-       }
-
        /* Get current scanout position with system timestamp.
         * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
         * if single query takes longer than max_error nanoseconds.
@@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
        if (rc) {
                tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
                vblanktimestamp(dev, crtc, tslot) = t_vblank;
-               smp_wmb();
        }
 
+       smp_mb__before_atomic_inc();
        atomic_add(diff, &dev->_vblank_count[crtc]);
+       smp_mb__after_atomic_inc();
 }
 
 /**
@@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
 {
        struct drm_modeset_ctl *modeset = data;
-       int crtc, ret = 0;
+       int ret = 0;
+       unsigned int crtc;
 
        /* If drm_vblank_init() hasn't been called yet, just no-op */
        if (!dev->num_crtcs)
@@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
         * e.g., due to spurious vblank interrupts. We need to
         * ignore those for accounting.
         */
-       if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+       if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
                /* Store new timestamp in ringbuffer. */
                vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
-               smp_wmb();
 
                /* Increment cooked vblank count. This also atomically commits
                 * the timestamp computed above.
                 */
+               smp_mb__before_atomic_inc();
                atomic_inc(&dev->_vblank_count[crtc]);
+               smp_mb__after_atomic_inc();
        } else {
                DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
                          crtc, (int) diff_ns);
index 3601466c55027391c355fe1c11186913394fbf2d..4ff9b6cc973f031f363b8a25c669765793920951 100644 (file)
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                int max_freq;
 
                /* RPSTAT1 is in the GT power well */
-               __gen6_force_wake_get(dev_priv);
+               __gen6_gt_force_wake_get(dev_priv);
 
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
                seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           max_freq * 100);
 
-               __gen6_force_wake_put(dev_priv);
+               __gen6_gt_force_wake_put(dev_priv);
        } else {
                seq_printf(m, "no P-state info available\n");
        }
index 17bd766f20811ae5622a4276adf78de9a77c5208..e33d9be7df3b7455c4f4f72085079c0450d50903 100644 (file)
@@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (IS_GEN2(dev))
                dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
 
+       /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+        * using 32bit addressing, overwriting memory if HWS is located
+        * above 4GB.
+        *
+        * The documentation also mentions an issue with undefined
+        * behaviour if any general state is accessed within a page above 4GB,
+        * which also needs to be handled carefully.
+        */
+       if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+               dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
        mmio_bar = IS_GEN2(dev) ? 1 : 0;
        dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
        if (!dev_priv->regs) {
index 0ad533f06af97361a51666c7262eb96c4f218e71..22ec066adae68ffd83432b37e149e8344c65d744 100644 (file)
@@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 unsigned int i915_powersave = 1;
 module_param_named(powersave, i915_powersave, int, 0600);
 
+unsigned int i915_semaphores = 0;
+module_param_named(semaphores, i915_semaphores, int, 0600);
+
 unsigned int i915_enable_rc6 = 0;
 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 
@@ -254,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev)
        }
 }
 
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
        int count;
 
@@ -270,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
                udelay(10);
 }
 
-void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE, 0);
        POSTING_READ(FORCEWAKE);
 }
 
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+       int loop = 500;
+       u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+       while (fifo < 20 && loop--) {
+               udelay(10);
+               fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+       }
+}
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
index 65dfe81d0035bc9ced9636c1397373c6664e5e20..456f4048483827d04c84375e0268351057688d15 100644 (file)
@@ -956,6 +956,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc;
 extern unsigned int i915_powersave;
+extern unsigned int i915_semaphores;
 extern unsigned int i915_lvds_downclock;
 extern unsigned int i915_panel_use_ssc;
 extern unsigned int i915_enable_rc6;
@@ -1177,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 void i915_gem_free_all_phys_object(struct drm_device *dev);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+
 /* i915_gem_gtt.c */
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1353,22 +1357,32 @@ __i915_write(64, q)
  * must be set to prevent GT core from power down and stale values being
  * returned.
  */
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
-void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
-static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
 {
        u32 val;
 
        if (dev_priv->info->gen >= 6) {
-               __gen6_force_wake_get(dev_priv);
+               __gen6_gt_force_wake_get(dev_priv);
                val = I915_READ(reg);
-               __gen6_force_wake_put(dev_priv);
+               __gen6_gt_force_wake_put(dev_priv);
        } else
                val = I915_READ(reg);
 
        return val;
 }
 
+static inline void i915_gt_write(struct drm_i915_private *dev_priv,
+                               u32 reg, u32 val)
+{
+       if (dev_priv->info->gen >= 6)
+               __gen6_gt_wait_for_fifo(dev_priv);
+       I915_WRITE(reg, val);
+}
+
 static inline void
 i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
 {
index cf4f74c7c6fb6fb7fa8dcedc4839ce2910039846..36e66cc5225ebf23c090289562d3e1dc2ee14092 100644 (file)
@@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
  * Return the required GTT alignment for an object, only taking into account
  * unfenced tiled surface requirements.
  */
-static uint32_t
+uint32_t
 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
index d2f445e825f200b10aad8aef884ddb2ec2671fc9..50ab1614571c746447781758c8355cb251276019 100644 (file)
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
        if (from == NULL || to == from)
                return 0;
 
-       /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
-       if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
+       /* XXX gpu semaphores are implicated in various hard hangs on SNB */
+       if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
                return i915_gem_object_wait_rendering(obj, true);
 
        idx = intel_ring_sync_index(from, to);
index 22a32b9932c59c45761521415fb3d092d562e855..d64843e18df2c6d0565801c44ea96353b780c2d3 100644 (file)
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                        (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
                         i915_gem_object_fence_ok(obj, args->tiling_mode));
 
-               obj->tiling_changed = true;
-               obj->tiling_mode = args->tiling_mode;
-               obj->stride = args->stride;
+               /* Rebind if we need a change of alignment */
+               if (!obj->map_and_fenceable) {
+                       u32 unfenced_alignment =
+                               i915_gem_get_unfenced_gtt_alignment(obj);
+                       if (obj->gtt_offset & (unfenced_alignment - 1))
+                               ret = i915_gem_object_unbind(obj);
+               }
+
+               if (ret == 0) {
+                       obj->tiling_changed = true;
+                       obj->tiling_mode = args->tiling_mode;
+                       obj->stride = args->stride;
+               }
        }
+       /* we have to maintain this existing ABI... */
+       args->stride = obj->stride;
+       args->tiling_mode = obj->tiling_mode;
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
 
-       return 0;
+       return ret;
 }
 
 /**
index 97f946dcc1aaa496a218cafb77d813c2dcd83a07..8a9e08bf1cf74d3a9ba9060a8523e394593a09bd 100644 (file)
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_encoder *encoder;
 
+       DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
        list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
                if (encoder->hot_plug)
                        encoder->hot_plug(encoder);
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        } else {
                hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
                               SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
-               hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
-               I915_WRITE(FDI_RXA_IMR, 0);
-               I915_WRITE(FDI_RXB_IMR, 0);
+               hotplug_mask |= SDE_AUX_MASK;
        }
 
        dev_priv->pch_irq_mask = ~hotplug_mask;
index 729d4233b763d2576c4ded20a1b5a330efe8a56c..2abe240dae5837ff2c993a804012f9439b17390d 100644 (file)
 
 /* Backlight control */
 #define BLC_PWM_CTL            0x61254
+#define   BACKLIGHT_MODULATION_FREQ_SHIFT              (17)
 #define BLC_PWM_CTL2           0x61250 /* 965+ only */
+#define   BLM_COMBINATION_MODE (1 << 30)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define   BACKLIGHT_MODULATION_FREQ_MASK               (0x7fff << 17)
+#define   BLM_LEGACY_MODE                              (1 << 16)
 /*
  * This is the number of cycles out of the backlight modulation cycle for which
  * the backlight is on.
 #define  FORCEWAKE                             0xA18C
 #define  FORCEWAKE_ACK                         0x130090
 
+#define  GT_FIFO_FREE_ENTRIES                  0x120008
+
 #define GEN6_RPNSWREQ                          0xA008
 #define   GEN6_TURBO_DISABLE                   (1<<31)
 #define   GEN6_FREQUENCY(x)                    ((x)<<25)
index 3b006536b3d23cda6c771de53f423345f35b9c89..49fb54fd9a1879d2185b32bd1fb3597d82ee31a9 100644 (file)
@@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
        u32 blt_ecoskpd;
 
        /* Make sure blitter notifies FBC of writes */
-       __gen6_force_wake_get(dev_priv);
+       __gen6_gt_force_wake_get(dev_priv);
        blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
        blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
                GEN6_BLITTER_LOCK_SHIFT;
@@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
                         GEN6_BLITTER_LOCK_SHIFT);
        I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
        POSTING_READ(GEN6_BLITTER_ECOSKPD);
-       __gen6_force_wake_put(dev_priv);
+       __gen6_gt_force_wake_put(dev_priv);
 }
 
 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
 
                wait_event(dev_priv->pending_flip_queue,
+                          atomic_read(&dev_priv->mm.wedged) ||
                           atomic_read(&obj->pending_flip) == 0);
 
                /* Big Hammer, we also need to ensure that any pending
                 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
                 * current scanout is retired before unpinning the old
                 * framebuffer.
+                *
+                * This should only fail upon a hung GPU, in which case we
+                * can safely continue.
                 */
                ret = i915_gem_object_flush_gpu(obj, false);
-               if (ret) {
-                       i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
+               (void) ret;
        }
 
        ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
                   atomic_read(&obj->pending_flip) == 0);
 }
 
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *encoder;
+
+       /*
+        * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+        * must be driven by its own crtc; no sharing is possible.
+        */
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+               if (encoder->base.crtc != crtc)
+                       continue;
+
+               switch (encoder->type) {
+               case INTEL_OUTPUT_EDP:
+                       if (!intel_encoder_is_pch_edp(&encoder->base))
+                               return false;
+                       continue;
+               }
+       }
+
+       return true;
+}
+
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
        u32 reg, temp;
+       bool is_pch_port = false;
 
        if (intel_crtc->active)
                return;
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
                        I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
        }
 
-       ironlake_fdi_enable(crtc);
+       is_pch_port = intel_crtc_driving_pch(crtc);
+
+       if (is_pch_port)
+               ironlake_fdi_enable(crtc);
+       else {
+               /* disable CPU FDI tx and PCH FDI rx */
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
+               I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+               POSTING_READ(reg);
+
+               reg = FDI_RX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~(0x7 << 16);
+               temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+               I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+               POSTING_READ(reg);
+               udelay(100);
+
+               /* Ironlake workaround, disable clock pointer after downing FDI */
+               if (HAS_PCH_IBX(dev))
+                       I915_WRITE(FDI_RX_CHICKEN(pipe),
+                                  I915_READ(FDI_RX_CHICKEN(pipe) &
+                                            ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+               /* still set train pattern 1 */
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_PATTERN_1;
+               I915_WRITE(reg, temp);
+
+               reg = FDI_RX_CTL(pipe);
+               temp = I915_READ(reg);
+               if (HAS_PCH_CPT(dev)) {
+                       temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+                       temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+               } else {
+                       temp &= ~FDI_LINK_TRAIN_NONE;
+                       temp |= FDI_LINK_TRAIN_PATTERN_1;
+               }
+               /* BPC in FDI rx is consistent with that in PIPECONF */
+               temp &= ~(0x07 << 16);
+               temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+               I915_WRITE(reg, temp);
+
+               POSTING_READ(reg);
+               udelay(100);
+       }
 
        /* Enable panel fitting for LVDS */
        if (dev_priv->pch_pf_size &&
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
                intel_flush_display_plane(dev, plane);
        }
 
+       /* Skip the PCH stuff if possible */
+       if (!is_pch_port)
+               goto done;
+
        /* For PCH output, training FDI link */
        if (IS_GEN6(dev))
                gen6_fdi_link_train(crtc);
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        I915_WRITE(reg, temp | TRANS_ENABLE);
        if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
                DRM_ERROR("failed to enable transcoder %d\n", pipe);
-
+done:
        intel_crtc_load_lut(crtc);
        intel_update_fbc(dev);
        intel_crtc_update_cursor(crtc, true);
@@ -6203,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
         * userspace...
         */
        I915_WRITE(GEN6_RC_STATE, 0);
-       __gen6_force_wake_get(dev_priv);
+       __gen6_gt_force_wake_get(dev_priv);
 
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6301,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        /* enable all PM interrupts */
        I915_WRITE(GEN6_PMINTRMSK, 0);
 
-       __gen6_force_wake_put(dev_priv);
+       __gen6_gt_force_wake_put(dev_priv);
 }
 
 void intel_enable_clock_gating(struct drm_device *dev)
@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
                POSTING_READ(RSTDBYCTL);
        }
 
-       ironlake_disable_rc6(dev);
+       ironlake_teardown_rc6(dev);
 }
 
 static int ironlake_setup_rc6(struct drm_device *dev)
index d860abeda70f54905355ceae824174ae0e19943b..f8f86e57df2264c4578f10935b48f78370f49d4c 100644 (file)
@@ -30,6 +30,8 @@
 
 #include "intel_drv.h"
 
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+
 void
 intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
                       struct drm_display_mode *adjusted_mode)
@@ -110,6 +112,19 @@ done:
        dev_priv->pch_pf_size = (width << 16) | height;
 }
 
+static int is_backlight_combination_mode(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+
+       if (IS_GEN2(dev))
+               return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+
+       return 0;
+}
+
 static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
 {
        u32 val;
@@ -166,6 +181,9 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
                        if (INTEL_INFO(dev)->gen < 4)
                                max &= ~1;
                }
+
+               if (is_backlight_combination_mode(dev))
+                       max *= 0xff;
        }
 
        DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -183,6 +201,14 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
                val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
                if (IS_PINEVIEW(dev))
                        val >>= 1;
+
+               if (is_backlight_combination_mode(dev)){
+                       u8 lbpc;
+
+                       val &= ~1;
+                       pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+                       val *= lbpc;
+               }
        }
 
        DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -205,6 +231,16 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
 
        if (HAS_PCH_SPLIT(dev))
                return intel_pch_panel_set_backlight(dev, level);
+
+       if (is_backlight_combination_mode(dev)){
+               u32 max = intel_panel_get_max_backlight(dev);
+               u8 lbpc;
+
+               lbpc = level * 0xfe / max + 1;
+               level /= lbpc;
+               pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
+       }
+
        tmp = I915_READ(BLC_PWM_CTL);
        if (IS_PINEVIEW(dev)) {
                tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
index 6d6fde85a636248e373d9ef288450597f1ba6f91..34306865a5df936bad131c41eae47118146b1f52 100644 (file)
@@ -14,22 +14,23 @@ struct  intel_hw_status_page {
        struct          drm_i915_gem_object *obj;
 };
 
-#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
+#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
 
 #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
 
 #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
 
 #define I915_READ_HEAD(ring)  I915_RING_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
 
 #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
 
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
 
 #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
 #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
index d38a4d9f9b0b0d6a49c66c3fa01e142e92372f1d..a52184007f5f0375f61d37335f323c98b46949ef 100644 (file)
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
                DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
        nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
-       nouveau_vm_put(&nvbo->vma);
+       if (nvbo->vma.node) {
+               nouveau_vm_unmap(&nvbo->vma);
+               nouveau_vm_put(&nvbo->vma);
+       }
        kfree(nvbo);
 }
 
index 65699bfaaaeaf616aa07066122e636f9bba3ff8a..b368ed74aad75e13fce3995a25a4effa75a1a7fe 100644 (file)
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan)
                return ret;
 
        /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
-       ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
+       ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
+                                    &chan->m2mf_ntfy);
        if (ret)
                return ret;
 
index 9821fcacc3d2f561ed10da893509ac3ace7d5b21..982d70b12722fb3f5c93de26fc93e0b319bf3c70 100644 (file)
@@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
 extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
 extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
 extern int  nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
-                                  int cout, uint32_t *offset);
+                                  int cout, uint32_t start, uint32_t end,
+                                  uint32_t *offset);
 extern int  nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
 extern int  nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
                                         struct drm_file *);
index 26347b7cd8722e95c70d425416b8ed40c236e15e..b0fb9bdcddb76a3ff5711142090a16a8cbfe66a3 100644 (file)
@@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
        ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
                        mem->page_alignment << PAGE_SHIFT, size_nc,
                        (nvbo->tile_flags >> 8) & 0xff, &node);
-       if (ret)
-               return ret;
+       if (ret) {
+               mem->mm_node = NULL;
+               return (ret == -ENOSPC) ? 0 : ret;
+       }
 
        node->page_shift = 12;
        if (nvbo->vma.node)
index 8844b50c3e540f697b973748da96203f9a5b9993..7609756b6faf9352fa5e833cf86f54f4bc031d31 100644 (file)
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
                return 0;
        }
 
-       return -ENOMEM;
+       return -ENOSPC;
 }
 
 int
index fe29d604b820cf2f5c82d27738bdb1c77f840a94..5ea167623a82cc74c2ae979899f2cd89ca1220ce 100644 (file)
@@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
 
 int
 nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
-                      int size, uint32_t *b_offset)
+                      int size, uint32_t start, uint32_t end,
+                      uint32_t *b_offset)
 {
        struct drm_device *dev = chan->dev;
        struct nouveau_gpuobj *nobj = NULL;
@@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
        uint32_t offset;
        int target, ret;
 
-       mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
+       mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
+                                         start, end, 0);
        if (mem)
-               mem = drm_mm_get_block(mem, size, 0);
+               mem = drm_mm_get_block_range(mem, size, 0, start, end);
        if (!mem) {
                NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
                return -ENOMEM;
@@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
        if (IS_ERR(chan))
                return PTR_ERR(chan);
 
-       ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
+       ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
+                                    &na->offset);
        nouveau_channel_put(&chan);
        return ret;
 }
index ea0041810ae3a648959f533e3fbefcc17ca508d7..e57caa2a00e3640647ce3a56b35a1e7c60aa6b8d 100644 (file)
@@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
 void
 nv50_instmem_flush(struct drm_device *dev)
 {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       spin_lock(&dev_priv->ramin_lock);
        nv_wr32(dev, 0x00330c, 0x00000001);
        if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
                NV_ERROR(dev, "PRAMIN flush timeout\n");
+       spin_unlock(&dev_priv->ramin_lock);
 }
 
 void
 nv84_instmem_flush(struct drm_device *dev)
 {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       spin_lock(&dev_priv->ramin_lock);
        nv_wr32(dev, 0x070000, 0x00000001);
        if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
                NV_ERROR(dev, "PRAMIN flush timeout\n");
+       spin_unlock(&dev_priv->ramin_lock);
 }
 
index 459ff08241e571400eb63e9e84c2cdae9a150bb7..6144156f255af02389775d78444cb56fd7f22fe6 100644 (file)
@@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm)
 void
 nv50_vm_flush_engine(struct drm_device *dev, int engine)
 {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       spin_lock(&dev_priv->ramin_lock);
        nv_wr32(dev, 0x100c80, (engine << 16) | 1);
        if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
                NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+       spin_unlock(&dev_priv->ramin_lock);
 }
index d270b3ff896b9a32d65533159ae5a8dac79171a3..6140ea1de45a6d1979c7dcbd048606adf8081cfc 100644 (file)
@@ -2194,7 +2194,6 @@ int evergreen_mc_init(struct radeon_device *rdev)
                rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
        }
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        r700_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 
@@ -2934,7 +2933,7 @@ static int evergreen_startup(struct radeon_device *rdev)
        /* XXX: ontario has problems blitting to gart at the moment */
        if (rdev->family == CHIP_PALM) {
                rdev->asic->copy = NULL;
-               rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        }
 
        /* allocate wb buffer */
index 2adfb03f479bf4c36fcf556801f15f01e075cd13..2be698e78ff2256be592391674b78b2d7a51d7a4 100644 (file)
@@ -623,7 +623,7 @@ done:
                dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
                return r;
        }
-       rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
        return 0;
 }
 
@@ -631,7 +631,7 @@ void evergreen_blit_fini(struct radeon_device *rdev)
 {
        int r;
 
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        if (rdev->r600_blit.shader_obj == NULL)
                return;
        /* If we can't reserve the bo, unref should be enough to destroy
index 56deae5bf02e9dbff8066136d961a9f4ba52bf8f..e372f9e1e5ce10a56b0fc17e9ee9930f8719daf4 100644 (file)
@@ -70,23 +70,6 @@ MODULE_FIRMWARE(FIRMWARE_R520);
 
 void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
 {
-       struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
-       u32 tmp;
-
-       /* make sure flip is at vb rather than hb */
-       tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
-       tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
-       /* make sure pending bit is asserted */
-       tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
-       WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
-
-       /* set pageflip to happen as late as possible in the vblank interval.
-        * same field for crtc1/2
-        */
-       tmp = RREG32(RADEON_CRTC_GEN_CNTL);
-       tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
-       WREG32(RADEON_CRTC_GEN_CNTL, tmp);
-
        /* enable the pflip int */
        radeon_irq_kms_pflip_irq_get(rdev, crtc);
 }
@@ -1041,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
                return r;
        }
        rdev->cp.ready = true;
-       rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
        return 0;
 }
 
@@ -1059,7 +1042,7 @@ void r100_cp_fini(struct radeon_device *rdev)
 void r100_cp_disable(struct radeon_device *rdev)
 {
        /* Disable ring */
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        rdev->cp.ready = false;
        WREG32(RADEON_CP_CSQ_MODE, 0);
        WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -2329,7 +2312,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
        /* FIXME we don't use the second aperture yet when we could use it */
        if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
                rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
        if (rdev->flags & RADEON_IS_IGP) {
                uint32_t tom;
@@ -3490,7 +3472,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
                track->num_texture = 16;
                track->maxy = 4096;
                track->separate_cube = 0;
-               track->aaresolve = true;
+               track->aaresolve = false;
                track->aa.robj = NULL;
        }
 
@@ -3801,8 +3783,6 @@ static int r100_startup(struct radeon_device *rdev)
        r100_mc_program(rdev);
        /* Resume clock */
        r100_clock_startup(rdev);
-       /* Initialize GPU configuration (# pipes, ...) */
-//     r100_gpu_init(rdev);
        /* Initialize GART (initialize after TTM so we can allocate
         * memory through TTM but finalize after TTM) */
        r100_enable_bm(rdev);
index de88624d5f8736037c93ebf627318b463930c953..9b3fad23b76ca2036bef2a1328ba343bf617bc80 100644 (file)
@@ -1255,7 +1255,6 @@ int r600_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        r600_vram_gtt_location(rdev, &rdev->mc);
 
        if (rdev->flags & RADEON_IS_IGP) {
@@ -1937,7 +1936,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  */
 void r600_cp_stop(struct radeon_device *rdev)
 {
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
        WREG32(SCRATCH_UMSK, 0);
 }
index 41f7aafc97c4c66b7b6d6943251f78e78be68929..df68d91e8190eb790f1ceed252450a4e2ebc5837 100644 (file)
@@ -558,7 +558,7 @@ done:
                dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
                return r;
        }
-       rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
        return 0;
 }
 
@@ -566,7 +566,7 @@ void r600_blit_fini(struct radeon_device *rdev)
 {
        int r;
 
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        if (rdev->r600_blit.shader_obj == NULL)
                return;
        /* If we can't reserve the bo, unref should be enough to destroy
index 56c48b67ef3d04c2ac0d01a150751985f9e749d3..6b3429495118aeebfaa0762588063ba23c9690a6 100644 (file)
@@ -345,7 +345,6 @@ struct radeon_mc {
         * about vram size near mc fb location */
        u64                     mc_vram_size;
        u64                     visible_vram_size;
-       u64                     active_vram_size;
        u64                     gtt_size;
        u64                     gtt_start;
        u64                     gtt_end;
@@ -1448,6 +1447,7 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m
 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 extern int radeon_resume_kms(struct drm_device *dev);
 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 
 /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
 extern bool r600_card_posted(struct radeon_device *rdev);
index e75d63b8e21d8dcc8902b08f8a479baeea9644c9..793c5e6026ad79902806e9efd4a7ac51ec2340e5 100644 (file)
@@ -834,6 +834,9 @@ static struct radeon_asic sumo_asic = {
        .pm_finish = &evergreen_pm_finish,
        .pm_init_profile = &rs780_pm_init_profile,
        .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+       .pre_page_flip = &evergreen_pre_page_flip,
+       .page_flip = &evergreen_page_flip,
+       .post_page_flip = &evergreen_post_page_flip,
 };
 
 static struct radeon_asic btc_asic = {
index 0e657095de7cbca4cdd7d79af876fcd8fb2dd74a..3e7e7f9eb781af59ffc8bb7e606b543b177f76b8 100644 (file)
@@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
                max_fractional_feed_div = pll->max_frac_feedback_div;
        }
 
-       for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
+       for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
                uint32_t ref_div;
 
                if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
index 66324b5bb5ba0f038f393f1a4464514c41159ca9..cc44bdfec80f2239a2d07bdf88ab2bc7ac1b53e1 100644 (file)
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
        u32 tiling_flags = 0;
        int ret;
        int aligned_size, size;
+       int height = mode_cmd->height;
 
        /* need to align pitch with crtc limits */
        mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
 
-       size = mode_cmd->pitch * mode_cmd->height;
+       if (rdev->family >= CHIP_R600)
+               height = ALIGN(mode_cmd->height, 8);
+       size = mode_cmd->pitch * height;
        aligned_size = ALIGN(size, PAGE_SIZE);
        ret = radeon_gem_object_create(rdev, aligned_size, 0,
                                       RADEON_GEM_DOMAIN_VRAM,
index df95eb83dac6d52e4cad7f6570d6cca9e2807b1f..1fe95dfe48c9a130fae104da04a3af4b964614a2 100644 (file)
@@ -156,9 +156,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
 {
        struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_info *args = data;
+       struct ttm_mem_type_manager *man;
+
+       man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
        args->vram_size = rdev->mc.real_vram_size;
-       args->vram_visible = rdev->mc.real_vram_size;
+       args->vram_visible = (u64)man->size << PAGE_SHIFT;
        if (rdev->stollen_vga_memory)
                args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
        args->vram_visible -= radeon_fbdev_total_size(rdev);
index cf0638c3b7c70df070ff10fa11b7a914115689fc..78968b738e88ea5d0df47210bf780d64a5ead1fb 100644 (file)
@@ -443,7 +443,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
                       (target_fb->bits_per_pixel * 8));
        crtc_pitch |= crtc_pitch << 16;
 
-
+       crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
        if (tiling_flags & RADEON_TILING_MACRO) {
                if (ASIC_IS_R300(rdev))
                        crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
@@ -502,6 +502,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
        gen_cntl_val = RREG32(gen_cntl_reg);
        gen_cntl_val &= ~(0xf << 8);
        gen_cntl_val |= (format << 8);
+       gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
        WREG32(gen_cntl_reg, gen_cntl_val);
 
        crtc_offset = (u32)base;
index e5b2cf10cbf4feba4cb7373dd68c5dfb06046114..8389b4c63d128da20e41d5a838e4d3eb176d6955 100644 (file)
@@ -589,6 +589,20 @@ void radeon_ttm_fini(struct radeon_device *rdev)
        DRM_INFO("radeon: ttm finalized\n");
 }
 
+/* this should only be called at bootup or when userspace
+ * isn't running */
+void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+{
+       struct ttm_mem_type_manager *man;
+
+       if (!rdev->mman.initialized)
+               return;
+
+       man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+       /* this just adjusts TTM size idea, which sets lpfn to the correct value */
+       man->size = size >> PAGE_SHIFT;
+}
+
 static struct vm_operations_struct radeon_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops = NULL;
 
index 5afe294ed51f0e4ea3d4055a55598f4ba865dbb9..8af4679db23e3451f223eab9e5e005026ed6336d 100644 (file)
@@ -751,7 +751,6 @@ void rs600_mc_init(struct radeon_device *rdev)
        rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
        rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
        base = RREG32_MC(R_000004_MC_FB_LOCATION);
        base = G_000004_MC_FB_START(base) << 16;
index 6638c8e4c81bca044bdc4d9147f79af47f59bd61..66c949b7c18cc29fb4f047543039eab4be4f1d19 100644 (file)
@@ -157,7 +157,6 @@ void rs690_mc_init(struct radeon_device *rdev)
        rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
        rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
        base = G_000100_MC_FB_START(base) << 16;
        rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
index d8ba676906566a42063113985881377403dda560..714ad45757d060ed41daf45933f51db4b899a8ec 100644 (file)
@@ -307,7 +307,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
  */
 void r700_cp_stop(struct radeon_device *rdev)
 {
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
        WREG32(SCRATCH_UMSK, 0);
 }
@@ -1123,7 +1123,6 @@ int rv770_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        r700_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 
index 86d822aa9bbf78a75ff20d1e4bff1bb0623d3a79..d46c0c758ddf163200f52d8e52cebb0dcf05b4b5 100644 (file)
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
        { "ad7414", 0 },
        {}
 };
+MODULE_DEVICE_TABLE(i2c, ad7414_id);
 
 static struct i2c_driver ad7414_driver = {
        .driver = {
index f13c843a2964bbb07728c06363cda065937293bb..5cc3e3784b42f5101c4238a0582113adecaae737 100644 (file)
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
        { "adt7411", 0 },
        { }
 };
+MODULE_DEVICE_TABLE(i2c, adt7411_id);
 
 static struct i2c_driver adt7411_driver = {
        .driver         = {
index 3f49dd376f023e1e6d4a281bc84ae9f05c59288a..6e06019015a5cf5644458fc62b061899eb1488b5 100644 (file)
@@ -37,7 +37,7 @@
 #define SIO_F71858FG_LD_HWM    0x02    /* Hardware monitor logical device */
 #define SIO_F71882FG_LD_HWM    0x04    /* Hardware monitor logical device */
 #define SIO_UNLOCK_KEY         0x87    /* Key to enable Super-I/O */
-#define SIO_LOCK_KEY           0xAA    /* Key to diasble Super-I/O */
+#define SIO_LOCK_KEY           0xAA    /* Key to disable Super-I/O */
 
 #define SIO_REG_LDSEL          0x07    /* Logical device select */
 #define SIO_REG_DEVID          0x20    /* Device ID (2 bytes) */
@@ -2111,7 +2111,6 @@ static int f71882fg_remove(struct platform_device *pdev)
        int nr_fans = (data->type == f71882fg) ? 4 : 3;
        u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
 
-       platform_set_drvdata(pdev, NULL);
        if (data->hwmon_dev)
                hwmon_device_unregister(data->hwmon_dev);
 
@@ -2178,6 +2177,7 @@ static int f71882fg_remove(struct platform_device *pdev)
                }
        }
 
+       platform_set_drvdata(pdev, NULL);
        kfree(data);
 
        return 0;
index 2e067dd2ee5154a185b6c2ad1f315a41459bf604..50ea1f43bdc1ea6f11688c64e044fe573736dba4 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/pci.h>
 #include <linux/mutex.h>
 #include <linux/ktime.h>
+#include <linux/slab.h>
 
 #define PCH_EVENT_SET  0       /* I2C Interrupt Event Set Status */
 #define PCH_EVENT_NONE 1       /* I2C Interrupt Event Clear Status */
index c2ef5ce1f1ffc66a1b367e8a63582bbdee43fbac..1b46a9d9f907336fb70526ca93881ce3b3934950 100644 (file)
@@ -249,7 +249,7 @@ static struct i2c_adapter ocores_adapter = {
 static int ocores_i2c_of_probe(struct platform_device* pdev,
                                struct ocores_i2c* i2c)
 {
-       __be32* val;
+       const __be32* val;
 
        val = of_get_property(pdev->dev.of_node, "regstep", NULL);
        if (!val) {
index b605ff3a1fa05703d8fbbaf12df90aaa1c125813..58a58c7eaa17d6eb7fac8b2ca6014110e0e3f225 100644 (file)
@@ -378,9 +378,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
                         * REVISIT: Some wkup sources might not be needed.
                         */
                        dev->westate = OMAP_I2C_WE_ALL;
-                       if (dev->rev < OMAP_I2C_REV_ON_4430)
-                               omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
-                                                               dev->westate);
+                       omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
                }
        }
        omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
@@ -847,11 +845,15 @@ complete:
                        dev_err(dev->dev, "Arbitration lost\n");
                        err |= OMAP_I2C_STAT_AL;
                }
+               /*
+                * ProDB0017052: Clear ARDY bit twice
+                */
                if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
                                        OMAP_I2C_STAT_AL)) {
                        omap_i2c_ack_stat(dev, stat &
                                (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
-                               OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
+                               OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
+                               OMAP_I2C_STAT_ARDY));
                        omap_i2c_complete_cmd(dev, err);
                        return IRQ_HANDLED;
                }
@@ -1137,12 +1139,41 @@ omap_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_SUSPEND
+static int omap_i2c_suspend(struct device *dev)
+{
+       if (!pm_runtime_suspended(dev))
+               if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
+                       dev->bus->pm->runtime_suspend(dev);
+
+       return 0;
+}
+
+static int omap_i2c_resume(struct device *dev)
+{
+       if (!pm_runtime_suspended(dev))
+               if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
+                       dev->bus->pm->runtime_resume(dev);
+
+       return 0;
+}
+
+static struct dev_pm_ops omap_i2c_pm_ops = {
+       .suspend = omap_i2c_suspend,
+       .resume = omap_i2c_resume,
+};
+#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
+#else
+#define OMAP_I2C_PM_OPS NULL
+#endif
+
 static struct platform_driver omap_i2c_driver = {
        .probe          = omap_i2c_probe,
        .remove         = omap_i2c_remove,
        .driver         = {
                .name   = "omap_i2c",
                .owner  = THIS_MODULE,
+               .pm     = OMAP_I2C_PM_OPS,
        },
 };
 
index 495be451d326c5b860648e94ccad189c9438c0b2..266135ddf7fa3eeb84cc4e40516688a149cb16d0 100644 (file)
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
        adap->owner = THIS_MODULE;
        /* DDC class but actually often used for more generic I2C */
        adap->class = I2C_CLASS_DDC;
-       strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
+       strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
                sizeof(adap->name));
        adap->nr = bus_nr;
        adap->algo = &stu300_algo;
index 1fa091e05690fe345817c9858b7c020e7c005eec..4a5c4a44ffb17a3ae0f44a489074ca99c33fa7ed 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <asm/mwait.h>
+#include <asm/msr.h>
 
 #define INTEL_IDLE_VERSION "0.4"
 #define PREFIX "intel_idle: "
@@ -84,6 +85,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
 
 static struct cpuidle_state *cpuidle_state_table;
 
+/*
+ * Hardware C-state auto-demotion may not always be optimal.
+ * Indicate which enable bits to clear here.
+ */
+static unsigned long long auto_demotion_disable_flags;
+
 /*
  * Set this flag for states where the HW flushes the TLB for us
  * and so we don't need cross-calls to keep it consistent.
@@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = {
        .notifier_call = setup_broadcast_cpuhp_notify,
 };
 
+static void auto_demotion_disable(void *dummy)
+{
+       unsigned long long msr_bits;
+
+       rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+       msr_bits &= ~auto_demotion_disable_flags;
+       wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+}
+
 /*
  * intel_idle_probe()
  */
@@ -324,11 +340,17 @@ static int intel_idle_probe(void)
        case 0x25:      /* Westmere */
        case 0x2C:      /* Westmere */
                cpuidle_state_table = nehalem_cstates;
+               auto_demotion_disable_flags =
+                       (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
                break;
 
        case 0x1C:      /* 28 - Atom Processor */
+               cpuidle_state_table = atom_cstates;
+               break;
+
        case 0x26:      /* 38 - Lincroft Atom Processor */
                cpuidle_state_table = atom_cstates;
+               auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
                break;
 
        case 0x2A:      /* SNB */
@@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
                        return -EIO;
                }
        }
+       if (auto_demotion_disable_flags)
+               smp_call_function(auto_demotion_disable, NULL, 1);
 
        return 0;
 }
index 23cf8fc933ec037c40251d0c855e736e00141dc8..5b8f59d6c3e839cb5e0c76db7d9f080d473995ee 100644 (file)
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
        event->owner = owner;
 
        list_add_tail(&event->node, &gameport_event_list);
-       schedule_work(&gameport_event_work);
+       queue_work(system_long_wq, &gameport_event_work);
 
 out:
        spin_unlock_irqrestore(&gameport_event_lock, flags);
index ac471b77c18ee606020d671096c7edc829545a49..99ce9032d08cd61f437fb5e3256db537cfa751e8 100644 (file)
@@ -71,8 +71,9 @@ struct tegra_kbc {
        spinlock_t lock;
        unsigned int repoll_dly;
        unsigned long cp_dly_jiffies;
+       bool use_fn_map;
        const struct tegra_kbc_platform_data *pdata;
-       unsigned short keycode[KBC_MAX_KEY];
+       unsigned short keycode[KBC_MAX_KEY * 2];
        unsigned short current_keys[KBC_MAX_KPENT];
        unsigned int num_pressed_keys;
        struct timer_list timer;
@@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = {
        KEY(15, 5, KEY_F2),
        KEY(15, 6, KEY_CAPSLOCK),
        KEY(15, 7, KEY_F6),
+
+       /* Software Handled Function Keys */
+       KEY(20, 0, KEY_KP7),
+
+       KEY(21, 0, KEY_KP9),
+       KEY(21, 1, KEY_KP8),
+       KEY(21, 2, KEY_KP4),
+       KEY(21, 4, KEY_KP1),
+
+       KEY(22, 1, KEY_KPSLASH),
+       KEY(22, 2, KEY_KP6),
+       KEY(22, 3, KEY_KP5),
+       KEY(22, 4, KEY_KP3),
+       KEY(22, 5, KEY_KP2),
+       KEY(22, 7, KEY_KP0),
+
+       KEY(27, 1, KEY_KPASTERISK),
+       KEY(27, 3, KEY_KPMINUS),
+       KEY(27, 4, KEY_KPPLUS),
+       KEY(27, 5, KEY_KPDOT),
+
+       KEY(28, 5, KEY_VOLUMEUP),
+
+       KEY(29, 3, KEY_HOME),
+       KEY(29, 4, KEY_END),
+       KEY(29, 5, KEY_BRIGHTNESSDOWN),
+       KEY(29, 6, KEY_VOLUMEDOWN),
+       KEY(29, 7, KEY_BRIGHTNESSUP),
+
+       KEY(30, 0, KEY_NUMLOCK),
+       KEY(30, 1, KEY_SCROLLLOCK),
+       KEY(30, 2, KEY_MUTE),
+
+       KEY(31, 4, KEY_HELP),
 };
 
 static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
@@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
        unsigned int i;
        unsigned int num_down = 0;
        unsigned long flags;
+       bool fn_keypress = false;
 
        spin_lock_irqsave(&kbc->lock, flags);
        for (i = 0; i < KBC_MAX_KPENT; i++) {
@@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
                                MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
 
                        scancodes[num_down] = scancode;
-                       keycodes[num_down++] = kbc->keycode[scancode];
+                       keycodes[num_down] = kbc->keycode[scancode];
+                       /* If driver uses Fn map, do not report the Fn key. */
+                       if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
+                               fn_keypress = true;
+                       else
+                               num_down++;
                }
 
                val >>= 8;
        }
+
+       /*
+        * If the platform uses Fn keymaps, translate keys on a Fn keypress.
+        * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
+        */
+       if (fn_keypress) {
+               for (i = 0; i < num_down; i++) {
+                       scancodes[i] += KBC_MAX_KEY;
+                       keycodes[i] = kbc->keycode[scancodes[i]];
+               }
+       }
+
        spin_unlock_irqrestore(&kbc->lock, flags);
 
        tegra_kbc_report_released_keys(kbc->idev,
@@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
 
        input_dev->keycode = kbc->keycode;
        input_dev->keycodesize = sizeof(kbc->keycode[0]);
-       input_dev->keycodemax = ARRAY_SIZE(kbc->keycode);
+       input_dev->keycodemax = KBC_MAX_KEY;
+       if (pdata->use_fn_map)
+               input_dev->keycodemax *= 2;
 
+       kbc->use_fn_map = pdata->use_fn_map;
        keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
        matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
                                   input_dev->keycode, input_dev->keybit);
index 25e5d042a72c128c624357915b322b6bff9f7486..7453938bf5efb4f9c6903316349d9f29f0761ae6 100644 (file)
 #define SYN_EXT_CAP_REQUESTS(c)                (((c) & 0x700000) >> 20)
 #define SYN_CAP_MULTI_BUTTON_NO(ec)    (((ec) & 0x00f000) >> 12)
 #define SYN_CAP_PRODUCT_ID(ec)         (((ec) & 0xff0000) >> 16)
+
+/*
+ * The following describes response for the 0x0c query.
+ *
+ * byte        mask    name                    meaning
+ * ----        ----    -------                 ------------
+ * 1   0x01    adjustable threshold    capacitive button sensitivity
+ *                                     can be adjusted
+ * 1   0x02    report max              query 0x0d gives max coord reported
+ * 1   0x04    clearpad                sensor is ClearPad product
+ * 1   0x08    advanced gesture        not particularly meaningful
+ * 1   0x10    clickpad bit 0          1-button ClickPad
+ * 1   0x60    multifinger mode        identifies firmware finger counting
+ *                                     (not reporting!) algorithm.
+ *                                     Not particularly meaningful
+ * 1   0x80    covered pad             W clipped to 14, 15 == pad mostly covered
+ * 2   0x01    clickpad bit 1          2-button ClickPad
+ * 2   0x02    deluxe LED controls     touchpad support LED commands
+ *                                     ala multimedia control bar
+ * 2   0x04    reduced filtering       firmware does less filtering on
+ *                                     position data, driver should watch
+ *                                     for noise.
+ */
 #define SYN_CAP_CLICKPAD(ex0c)         ((ex0c) & 0x100000) /* 1-button ClickPad */
 #define SYN_CAP_CLICKPAD2BTN(ex0c)     ((ex0c) & 0x000100) /* 2-button ClickPad */
 #define SYN_CAP_MAX_DIMENSIONS(ex0c)   ((ex0c) & 0x020000)
index 7c38d1fbabf2339afeedd1ab2d21ec909de61621..ba70058e2be3ae2bc6a3289c08d541f336a4aad8 100644 (file)
@@ -299,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner,
        event->owner = owner;
 
        list_add_tail(&event->node, &serio_event_list);
-       schedule_work(&serio_event_work);
+       queue_work(system_long_wq, &serio_event_work);
 
 out:
        spin_unlock_irqrestore(&serio_event_lock, flags);
index 18f8798442fa53ac8a7d91d2684a81e678970642..7bd5baa547beb2cce68ce2412460f8bbd381806f 100644 (file)
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
   stream interface.
   If synchronous service was requested, then function
   does return amount of data written to stream.
-  'final' does indicate that pice of data to be written is
+  'final' does indicate that piece of data to be written is
   final part of frame (necessary only by structured datatransfer)
   return  0 if zero lengh packet was written
   return -1 if stream is full
index 8a2f767f26d80c2dc3c949914049cf14ef591ff0..0ed7f6bc2a7fb4ea9cd30d7879d4d4d1ab7a134e 100644 (file)
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
 
        if (md_check_no_bitmap(mddev))
                return -EINVAL;
-       mddev->queue->queue_lock = &mddev->queue->__queue_lock;
        conf = linear_conf(mddev, mddev->raid_disks);
 
        if (!conf)
index 0cc30ecda4c128196cd147fc06f8cf9ddbb1085b..818313e277e7c663c9273419bd3929890fec4611 100644 (file)
@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
 {
        mddev_t *mddev, *new = NULL;
 
+       if (unit && MAJOR(unit) != MD_MAJOR)
+               unit &= ~((1<<MdpMinorShift)-1);
+
  retry:
        spin_lock(&all_mddevs_lock);
 
@@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
        }
 
        mddev->array_sectors = sectors;
-       set_capacity(mddev->gendisk, mddev->array_sectors);
-       if (mddev->pers)
+       if (mddev->pers) {
+               set_capacity(mddev->gendisk, mddev->array_sectors);
                revalidate_disk(mddev->gendisk);
-
+       }
        return len;
 }
 
@@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
        }
        set_capacity(mddev->gendisk, mddev->array_sectors);
        revalidate_disk(mddev->gendisk);
+       mddev->changed = 1;
        kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
 out:
        return err;
@@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
        mddev->sync_speed_min = mddev->sync_speed_max = 0;
        mddev->recovery = 0;
        mddev->in_sync = 0;
+       mddev->changed = 0;
        mddev->degraded = 0;
        mddev->safemode = 0;
        mddev->bitmap_info.offset = 0;
@@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
 
                set_capacity(disk, 0);
                mutex_unlock(&mddev->open_mutex);
+               mddev->changed = 1;
                revalidate_disk(disk);
 
                if (mddev->ro)
@@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
        atomic_inc(&mddev->openers);
        mutex_unlock(&mddev->open_mutex);
 
-       check_disk_size_change(mddev->gendisk, bdev);
+       check_disk_change(bdev);
  out:
        return err;
 }
@@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
 
        return 0;
 }
+
+static int md_media_changed(struct gendisk *disk)
+{
+       mddev_t *mddev = disk->private_data;
+
+       return mddev->changed;
+}
+
+static int md_revalidate(struct gendisk *disk)
+{
+       mddev_t *mddev = disk->private_data;
+
+       mddev->changed = 0;
+       return 0;
+}
 static const struct block_device_operations md_fops =
 {
        .owner          = THIS_MODULE,
@@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops =
        .compat_ioctl   = md_compat_ioctl,
 #endif
        .getgeo         = md_getgeo,
+       .media_changed  = md_media_changed,
+       .revalidate_disk= md_revalidate,
 };
 
 static int md_thread(void * arg)
index 7e90b8593b2a4f0f292759f1152348134ac0035e..12215d437fcc28b4d195c1544747ff51d2142bc3 100644 (file)
@@ -274,6 +274,8 @@ struct mddev_s
        atomic_t                        active;         /* general refcount */
        atomic_t                        openers;        /* number of active opens */
 
+       int                             changed;        /* True if we might need to
+                                                        * reread partition info */
        int                             degraded;       /* whether md should consider
                                                         * adding a spare
                                                         */
index 6d7ddf32ef2ec932040d0611cbf109b82a7a656c..3a62d440e27b8105bb778d36950575f77fdd28a3 100644 (file)
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
         * bookkeeping area. [whatever we allocate in multipath_run(),
         * should be freed in multipath_stop()]
         */
-       mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
        conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
        mddev->private = conf;
index 637a96855edb2b6c9c99e988fcdb78b696e703b2..c0ac457f1218ca52a2d6d237be051a70198f0936 100644 (file)
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
        if (md_check_no_bitmap(mddev))
                return -EINVAL;
        blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
-       mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
        /* if private is not null, we are here after takeover */
        if (mddev->private == NULL) {
@@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
        mddev->new_layout = 0;
        mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
        mddev->delta_disks = 1 - mddev->raid_disks;
+       mddev->raid_disks = 1;
        /* make sure it will be not marked as dirty */
        mddev->recovery_cp = MaxSector;
 
index a23ffa397ba91b0cd4dc2bb148f50122d72d6beb..06cd712807d0c2d81c053e352a5f0cc2e73ce1b3 100644 (file)
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
        if (conf->pending_bio_list.head) {
                struct bio *bio;
                bio = bio_list_get(&conf->pending_bio_list);
+               /* Only take the spinlock to quiet a warning */
+               spin_lock(conf->mddev->queue->queue_lock);
                blk_remove_plug(conf->mddev->queue);
+               spin_unlock(conf->mddev->queue->queue_lock);
                spin_unlock_irq(&conf->device_lock);
                /* flush any pending bitmap writes to
                 * disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                atomic_inc(&r1_bio->remaining);
                spin_lock_irqsave(&conf->device_lock, flags);
                bio_list_add(&conf->pending_bio_list, mbio);
-               blk_plug_device(mddev->queue);
+               blk_plug_device_unlocked(mddev->queue);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
        r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
-       mddev->queue->queue_lock = &conf->device_lock;
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
index 3b607b28741b8e666c0a19d43477203c475e7e42..747d061d8e05817878ac102a6438714d181e4356 100644 (file)
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
        if (conf->pending_bio_list.head) {
                struct bio *bio;
                bio = bio_list_get(&conf->pending_bio_list);
+               /* Spinlock only taken to quiet a warning */
+               spin_lock(conf->mddev->queue->queue_lock);
                blk_remove_plug(conf->mddev->queue);
+               spin_unlock(conf->mddev->queue->queue_lock);
                spin_unlock_irq(&conf->device_lock);
                /* flush any pending bitmap writes to disk
                 * before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                atomic_inc(&r10_bio->remaining);
                spin_lock_irqsave(&conf->device_lock, flags);
                bio_list_add(&conf->pending_bio_list, mbio);
-               blk_plug_device(mddev->queue);
+               blk_plug_device_unlocked(mddev->queue);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
 
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
        if (!conf)
                goto out;
 
-       mddev->queue->queue_lock = &conf->device_lock;
-
        mddev->thread = conf->thread;
        conf->thread = NULL;
 
index 702812824195ae7c0a6333381659575f552a8f62..78536fdbd87fec133894a10bdabd027061cee0eb 100644 (file)
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
 
                mddev->queue->backing_dev_info.congested_data = mddev;
                mddev->queue->backing_dev_info.congested_fn = raid5_congested;
-               mddev->queue->queue_lock = &conf->device_lock;
                mddev->queue->unplug_fn = raid5_unplug_queue;
 
                chunk_size = mddev->chunk_sectors << 9;
index bc6a67768af1ed7a7ecb35b6af176895bf69e533..8c4852114eeb41a06f6ff8406991957c6c01274a 100644 (file)
@@ -658,13 +658,13 @@ static int tda8290_probe(struct tuner_i2c_props *i2c_props)
 #define TDA8290_ID 0x89
        u8 reg = 0x1f, id;
        struct i2c_msg msg_read[] = {
-               { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
-               { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+               { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
+               { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
        };
 
        /* detect tda8290 */
        if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
-               printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+               printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
                               __func__, reg);
                return -ENODEV;
        }
@@ -685,13 +685,13 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
 #define TDA8295C2_ID 0x8b
        u8 reg = 0x2f, id;
        struct i2c_msg msg_read[] = {
-               { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
-               { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+               { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
+               { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
        };
 
-       /* detect tda8290 */
+       /* detect tda8295 */
        if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
-               printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+               printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
                               __func__, reg);
                return -ENODEV;
        }
index defd83964ce25556d47d6e3b3241d8b61eb65719..193cdb77b76a278cf56f5cfc0afa63d1cd0a8bb6 100644 (file)
@@ -870,6 +870,23 @@ static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap)
        return 0;
 }
 
+static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index,
+               u16 pid, int onoff)
+{
+       struct dib0700_state *st = adapter->dev->priv;
+       if (st->is_dib7000pc)
+               return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
+       return dib7000m_pid_filter(adapter->fe, index, pid, onoff);
+}
+
+static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
+{
+       struct dib0700_state *st = adapter->dev->priv;
+       if (st->is_dib7000pc)
+               return dib7000p_pid_filter_ctrl(adapter->fe, onoff);
+       return dib7000m_pid_filter_ctrl(adapter->fe, onoff);
+}
+
 static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
 {
     return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
@@ -1875,8 +1892,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                        {
                                .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
                                .pid_filter_count = 32,
-                               .pid_filter       = stk70x0p_pid_filter,
-                               .pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+                               .pid_filter       = stk7700p_pid_filter,
+                               .pid_filter_ctrl  = stk7700p_pid_filter_ctrl,
                                .frontend_attach  = stk7700p_frontend_attach,
                                .tuner_attach     = stk7700p_tuner_attach,
 
index 9eea4188303b37dbb8686fcade696655d6e7f7ef..46ccd01a76967c9c2b192d8cec9dfa6e0dc1f666 100644 (file)
@@ -659,7 +659,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
 }
 
 /* Default firmware for LME2510C */
-const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
+char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
 
 static void lme_coldreset(struct usb_device *dev)
 {
@@ -1006,7 +1006,7 @@ static struct dvb_usb_device_properties lme2510c_properties = {
        .caps = DVB_USB_IS_AN_I2C_ADAPTER,
        .usb_ctrl = DEVICE_SPECIFIC,
        .download_firmware = lme2510_download_firmware,
-       .firmware = lme_firmware,
+       .firmware = (const char *)&lme_firmware,
        .size_of_priv = sizeof(struct lme2510_state),
        .num_adapters = 1,
        .adapter = {
@@ -1109,5 +1109,5 @@ module_exit(lme2510_module_exit);
 
 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
 MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("1.74");
+MODULE_VERSION("1.75");
 MODULE_LICENSE("GPL");
index c7f5ccf54aa5f1ea93b182e59a070ee234e4730b..289a79837f247faa09591b4b459638d5213596a4 100644 (file)
@@ -1285,6 +1285,25 @@ struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum di
 }
 EXPORT_SYMBOL(dib7000m_get_i2c_master);
 
+int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
+{
+       struct dib7000m_state *state = fe->demodulator_priv;
+       u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
+       val |= (onoff & 0x1) << 4;
+       dprintk("PID filter enabled %d", onoff);
+       return dib7000m_write_word(state, 294 + state->reg_offs, val);
+}
+EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
+
+int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
+{
+       struct dib7000m_state *state = fe->demodulator_priv;
+       dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+       return dib7000m_write_word(state, 300 + state->reg_offs + id,
+                       onoff ? (1 << 13) | pid : 0);
+}
+EXPORT_SYMBOL(dib7000m_pid_filter);
+
 #if 0
 /* used with some prototype boards */
 int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
index 113819ce9f0d1f9a1d4d7d7460b487d4c15252c1..81fcf2241c64c10e78ca1e1b8adba6b69b3bfb21 100644 (file)
@@ -46,6 +46,8 @@ extern struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
 extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *,
                                                   enum dibx000_i2c_interface,
                                                   int);
+extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
+extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
 #else
 static inline
 struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
@@ -63,6 +65,19 @@ struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *demod,
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
+static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id,
+                                               u16 pid, u8 onoff)
+{
+       printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+       return -ENODEV;
+}
+
+static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe,
+                                               uint8_t onoff)
+{
+       printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+       return -ENODEV;
+}
 #endif
 
 /* TODO
index 59feeb84aec7f1933e9ab7821c938929ebb5da2e..10a432a79d00b8cc464eb4058fce7f125738a62b 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 #include <asm/io.h>
-#include <asm/pgtable.h>
 #include <asm/page.h>
 #include <linux/kmod.h>
 #include <linux/vmalloc.h>
index 73230ff93b8ac0d117801a9b98ab8fbb69e6e11a..01f258a2a57adfd17a287153c9991183ab064342 100644 (file)
@@ -112,7 +112,7 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
 {
        ktime_t                 now;
        s64                     delta; /* ns */
-       struct ir_raw_event     ev;
+       DEFINE_IR_RAW_EVENT(ev);
        int                     rc = 0;
 
        if (!dev->raw)
@@ -125,7 +125,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
         * being called for the first time, note that delta can't
         * possibly be negative.
         */
-       ev.duration = 0;
        if (delta > IR_MAX_DURATION || !dev->raw->last_type)
                type |= IR_START_EVENT;
        else
index 6df0a49806452f640667f2c783cfdbeabcd99010..e4f8eac7f7173b1e72bb2baee585ce70d4b1f01f 100644 (file)
@@ -148,6 +148,7 @@ enum mceusb_model_type {
        MCE_GEN2_TX_INV,
        POLARIS_EVK,
        CX_HYBRID_TV,
+       MULTIFUNCTION,
 };
 
 struct mceusb_model {
@@ -155,9 +156,10 @@ struct mceusb_model {
        u32 mce_gen2:1;
        u32 mce_gen3:1;
        u32 tx_mask_normal:1;
-       u32 is_polaris:1;
        u32 no_tx:1;
 
+       int ir_intfnum;
+
        const char *rc_map;     /* Allow specify a per-board map */
        const char *name;       /* per-board name */
 };
@@ -179,7 +181,6 @@ static const struct mceusb_model mceusb_model[] = {
                .tx_mask_normal = 1,
        },
        [POLARIS_EVK] = {
-               .is_polaris = 1,
                /*
                 * In fact, the EVK is shipped without
                 * remotes, but we should have something handy,
@@ -189,10 +190,13 @@ static const struct mceusb_model mceusb_model[] = {
                .name = "Conexant Hybrid TV (cx231xx) MCE IR",
        },
        [CX_HYBRID_TV] = {
-               .is_polaris = 1,
                .no_tx = 1, /* tx isn't wired up at all */
                .name = "Conexant Hybrid TV (cx231xx) MCE IR",
        },
+       [MULTIFUNCTION] = {
+               .mce_gen2 = 1,
+               .ir_intfnum = 2,
+       },
 };
 
 static struct usb_device_id mceusb_dev_table[] = {
@@ -216,8 +220,9 @@ static struct usb_device_id mceusb_dev_table[] = {
        { USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
        /* Philips/Spinel plus IR transceiver for ASUS */
        { USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
-       /* Realtek MCE IR Receiver */
-       { USB_DEVICE(VENDOR_REALTEK, 0x0161) },
+       /* Realtek MCE IR Receiver and card reader */
+       { USB_DEVICE(VENDOR_REALTEK, 0x0161),
+         .driver_info = MULTIFUNCTION },
        /* SMK/Toshiba G83C0004D410 */
        { USB_DEVICE(VENDOR_SMK, 0x031d),
          .driver_info = MCE_GEN2_TX_INV },
@@ -1101,7 +1106,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
        bool is_gen3;
        bool is_microsoft_gen1;
        bool tx_mask_normal;
-       bool is_polaris;
+       int ir_intfnum;
 
        dev_dbg(&intf->dev, "%s called\n", __func__);
 
@@ -1110,13 +1115,11 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
        is_gen3 = mceusb_model[model].mce_gen3;
        is_microsoft_gen1 = mceusb_model[model].mce_gen1;
        tx_mask_normal = mceusb_model[model].tx_mask_normal;
-       is_polaris = mceusb_model[model].is_polaris;
+       ir_intfnum = mceusb_model[model].ir_intfnum;
 
-       if (is_polaris) {
-               /* Interface 0 is IR */
-               if (idesc->desc.bInterfaceNumber)
-                       return -ENODEV;
-       }
+       /* There are multi-function devices with non-IR interfaces */
+       if (idesc->desc.bInterfaceNumber != ir_intfnum)
+               return -ENODEV;
 
        /* step through the endpoints to find first bulk in and out endpoint */
        for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
index 273d9d674792db39170486ec3fa75e0c3626651e..d4d64492a05713d3c19998029983ef76c092ae06 100644 (file)
@@ -385,8 +385,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt)
 
 static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
 {
-       /* set number of bytes needed for wake key comparison (default 67) */
-       nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP);
+       /* set number of bytes needed for wake from s3 (default 65) */
+       nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
+                              CIR_WAKE_FIFO_CMP_DEEP);
 
        /* set tolerance/variance allowed per byte during wake compare */
        nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
index 1df82351cb0390d420813eddee8a8e5e59424d3f..048135eea70201e12cc63a103334e9c8606d45fa 100644 (file)
@@ -305,8 +305,11 @@ struct nvt_dev {
 #define CIR_WAKE_IRFIFOSTS_RX_EMPTY    0x20
 #define CIR_WAKE_IRFIFOSTS_RX_FULL     0x10
 
-/* CIR Wake FIFO buffer is 67 bytes long */
-#define CIR_WAKE_FIFO_LEN              67
+/*
+ * The CIR Wake FIFO buffer is 67 bytes long, but the stock remote wakes
+ * the system comparing only 65 bytes (fails with this set to 67)
+ */
+#define CIR_WAKE_FIFO_CMP_BYTES                65
 /* CIR Wake byte comparison tolerance */
 #define CIR_WAKE_CMP_TOLERANCE         5
 
index 512a2f4ada0e113ccc213ff5a5c50d0d93eaaf90..5b4422ef4e6d4705566bc8f45a23cf6a9e9719fc 100644 (file)
@@ -850,7 +850,7 @@ static ssize_t store_protocols(struct device *device,
                        count++;
                } else {
                        for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
-                               if (!strncasecmp(tmp, proto_names[i].name, strlen(proto_names[i].name))) {
+                               if (!strcasecmp(tmp, proto_names[i].name)) {
                                        tmp += strlen(proto_names[i].name);
                                        mask = proto_names[i].type;
                                        break;
index e41e4ad5cc4009213f4ead95ae991cbd9a8dce87..9c475c600fc9c9a0d8a5f7a568dae7a88579ee4d 100644 (file)
@@ -1758,7 +1758,12 @@ static int vidioc_reqbufs(struct file *file, void *priv,
        if (rc < 0)
                return rc;
 
-       return videobuf_reqbufs(&fh->vb_vidq, rb);
+       if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               rc = videobuf_reqbufs(&fh->vb_vidq, rb);
+       else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+               rc = videobuf_reqbufs(&fh->vb_vbiq, rb);
+
+       return rc;
 }
 
 static int vidioc_querybuf(struct file *file, void *priv,
@@ -1772,7 +1777,12 @@ static int vidioc_querybuf(struct file *file, void *priv,
        if (rc < 0)
                return rc;
 
-       return videobuf_querybuf(&fh->vb_vidq, b);
+       if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               rc = videobuf_querybuf(&fh->vb_vidq, b);
+       else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+               rc = videobuf_querybuf(&fh->vb_vbiq, b);
+
+       return rc;
 }
 
 static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1785,7 +1795,12 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
        if (rc < 0)
                return rc;
 
-       return videobuf_qbuf(&fh->vb_vidq, b);
+       if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               rc = videobuf_qbuf(&fh->vb_vidq, b);
+       else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+               rc = videobuf_qbuf(&fh->vb_vbiq, b);
+
+       return rc;
 }
 
 static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1806,7 +1821,12 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
                dev->greenscreen_detected = 0;
        }
 
-       return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
+       if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
+       else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+               rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK);
+
+       return rc;
 }
 
 static struct v4l2_file_operations au0828_v4l_fops = {
index 87177733cf925594532adc82bd48e623bbd46d76..68ad1963f421c59be2b5f34e518d76e94c8fc120 100644 (file)
@@ -95,6 +95,53 @@ static const struct cx18_card cx18_card_hvr1600_esmt = {
        .i2c = &cx18_i2c_std,
 };
 
+static const struct cx18_card cx18_card_hvr1600_s5h1411 = {
+       .type = CX18_CARD_HVR_1600_S5H1411,
+       .name = "Hauppauge HVR-1600",
+       .comment = "Simultaneous Digital and Analog TV capture supported\n",
+       .v4l2_capabilities = CX18_CAP_ENCODER,
+       .hw_audio_ctrl = CX18_HW_418_AV,
+       .hw_muxer = CX18_HW_CS5345,
+       .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
+                 CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL |
+                 CX18_HW_Z8F0811_IR_HAUP,
+       .video_inputs = {
+               { CX18_CARD_INPUT_VID_TUNER,  0, CX18_AV_COMPOSITE7 },
+               { CX18_CARD_INPUT_SVIDEO1,    1, CX18_AV_SVIDEO1    },
+               { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 },
+               { CX18_CARD_INPUT_SVIDEO2,    2, CX18_AV_SVIDEO2    },
+               { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 },
+       },
+       .audio_inputs = {
+               { CX18_CARD_INPUT_AUD_TUNER,
+                 CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
+               { CX18_CARD_INPUT_LINE_IN1,
+                 CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 },
+               { CX18_CARD_INPUT_LINE_IN2,
+                 CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 },
+       },
+       .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
+                        CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 },
+       .ddr = {
+               /* ESMT M13S128324A-5B memory */
+               .chip_config = 0x003,
+               .refresh = 0x30c,
+               .timing1 = 0x44220e82,
+               .timing2 = 0x08,
+               .tune_lane = 0,
+               .initial_emrs = 0,
+       },
+       .gpio_init.initial_value = 0x3001,
+       .gpio_init.direction = 0x3001,
+       .gpio_i2c_slave_reset = {
+               .active_lo_mask = 0x3001,
+               .msecs_asserted = 10,
+               .msecs_recovery = 40,
+               .ir_reset_mask  = 0x0001,
+       },
+       .i2c = &cx18_i2c_std,
+};
+
 static const struct cx18_card cx18_card_hvr1600_samsung = {
        .type = CX18_CARD_HVR_1600_SAMSUNG,
        .name = "Hauppauge HVR-1600 (Preproduction)",
@@ -523,7 +570,8 @@ static const struct cx18_card *cx18_card_list[] = {
        &cx18_card_toshiba_qosmio_dvbt,
        &cx18_card_leadtek_pvr2100,
        &cx18_card_leadtek_dvr3100h,
-       &cx18_card_gotview_dvd3
+       &cx18_card_gotview_dvd3,
+       &cx18_card_hvr1600_s5h1411
 };
 
 const struct cx18_card *cx18_get_card(u16 index)
index 944af8adbe0c8095fb59ee11d50b567d25cc024c..b1c3cbd9274387d253e6c3506b6a01c547d8d156 100644 (file)
@@ -157,6 +157,7 @@ MODULE_PARM_DESC(cardtype,
                 "\t\t\t 7 = Leadtek WinFast PVR2100\n"
                 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n"
                 "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n"
+                "\t\t\t 10 = Hauppauge HVR 1600 (S5H1411)\n"
                 "\t\t\t 0 = Autodetect (default)\n"
                 "\t\t\t-1 = Ignore this card\n\t\t");
 MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
@@ -337,6 +338,7 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
        switch (cx->card->type) {
        case CX18_CARD_HVR_1600_ESMT:
        case CX18_CARD_HVR_1600_SAMSUNG:
+       case CX18_CARD_HVR_1600_S5H1411:
                tveeprom_hauppauge_analog(&c, tv, eedata);
                break;
        case CX18_CARD_YUAN_MPC718:
@@ -365,7 +367,25 @@ static void cx18_process_eeprom(struct cx18 *cx)
           from the model number. Use the cardtype module option if you
           have one of these preproduction models. */
        switch (tv.model) {
-       case 74000 ... 74999:
+       case 74301: /* Retail models */
+       case 74321:
+       case 74351: /* OEM models */
+       case 74361:
+               /* Digital side is s5h1411/tda18271 */
+               cx->card = cx18_get_card(CX18_CARD_HVR_1600_S5H1411);
+               break;
+       case 74021: /* Retail models */
+       case 74031:
+       case 74041:
+       case 74141:
+       case 74541: /* OEM models */
+       case 74551:
+       case 74591:
+       case 74651:
+       case 74691:
+       case 74751:
+       case 74891:
+               /* Digital side is s5h1409/mxl5005s */
                cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
                break;
        case 0x718:
@@ -377,7 +397,8 @@ static void cx18_process_eeprom(struct cx18 *cx)
                CX18_ERR("Invalid EEPROM\n");
                return;
        default:
-               CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model);
+               CX18_ERR("Unknown model %d, defaulting to original HVR-1600 "
+                        "(cardtype=1)\n", tv.model);
                cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
                break;
        }
index 306caac6d3fc05a15dfcfc7998e51e1bf10c168f..f736679d25178a5e78ae815f24ca006779f66eaa 100644 (file)
@@ -85,7 +85,8 @@
 #define CX18_CARD_LEADTEK_PVR2100     6 /* Leadtek WinFast PVR2100 */
 #define CX18_CARD_LEADTEK_DVR3100H    7 /* Leadtek WinFast DVR3100 H */
 #define CX18_CARD_GOTVIEW_PCI_DVD3    8 /* GoTView PCI DVD3 Hybrid */
-#define CX18_CARD_LAST               8
+#define CX18_CARD_HVR_1600_S5H1411    9 /* Hauppauge HVR 1600 s5h1411/tda18271*/
+#define CX18_CARD_LAST               9
 
 #define CX18_ENC_STREAM_TYPE_MPG  0
 #define CX18_ENC_STREAM_TYPE_TS   1
index f0381d62518d52b1e491795a3ba53a7d7bbca6a3..f41922bd402025118a9e35c8453675ed1ffa62cf 100644 (file)
@@ -29,6 +29,8 @@
 #include "cx18-gpio.h"
 #include "s5h1409.h"
 #include "mxl5005s.h"
+#include "s5h1411.h"
+#include "tda18271.h"
 #include "zl10353.h"
 
 #include <linux/firmware.h>
@@ -76,6 +78,32 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
        .hvr1600_opt   = S5H1409_HVR1600_OPTIMIZE
 };
 
+/*
+ * CX18_CARD_HVR_1600_S5H1411
+ */
+static struct s5h1411_config hcw_s5h1411_config = {
+       .output_mode   = S5H1411_SERIAL_OUTPUT,
+       .gpio          = S5H1411_GPIO_OFF,
+       .vsb_if        = S5H1411_IF_44000,
+       .qam_if        = S5H1411_IF_4000,
+       .inversion     = S5H1411_INVERSION_ON,
+       .status_mode   = S5H1411_DEMODLOCKING,
+       .mpeg_timing   = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+
+static struct tda18271_std_map hauppauge_tda18271_std_map = {
+       .atsc_6   = { .if_freq = 5380, .agc_mode = 3, .std = 3,
+                     .if_lvl = 6, .rfagc_top = 0x37 },
+       .qam_6    = { .if_freq = 4000, .agc_mode = 3, .std = 0,
+                     .if_lvl = 6, .rfagc_top = 0x37 },
+};
+
+static struct tda18271_config hauppauge_tda18271_config = {
+       .std_map = &hauppauge_tda18271_std_map,
+       .gate    = TDA18271_GATE_DIGITAL,
+       .output_opt = TDA18271_OUTPUT_LT_OFF,
+};
+
 /*
  * CX18_CARD_LEADTEK_DVR3100H
  */
@@ -244,6 +272,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
        switch (cx->card->type) {
        case CX18_CARD_HVR_1600_ESMT:
        case CX18_CARD_HVR_1600_SAMSUNG:
+       case CX18_CARD_HVR_1600_S5H1411:
                v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL);
                v |= 0x00400000; /* Serial Mode */
                v |= 0x00002000; /* Data Length - Byte */
@@ -455,6 +484,15 @@ static int dvb_register(struct cx18_stream *stream)
                        ret = 0;
                }
                break;
+       case CX18_CARD_HVR_1600_S5H1411:
+               dvb->fe = dvb_attach(s5h1411_attach,
+                                    &hcw_s5h1411_config,
+                                    &cx->i2c_adap[0]);
+               if (dvb->fe != NULL)
+                       dvb_attach(tda18271_attach, dvb->fe,
+                                  0x60, &cx->i2c_adap[0],
+                                  &hauppauge_tda18271_config);
+               break;
        case CX18_CARD_LEADTEK_DVR3100H:
                dvb->fe = dvb_attach(zl10353_attach,
                                     &leadtek_dvr3100h_demod,
index ed3d8f55029b936968d5bf384f70beaaaaa3f4c9..307ff543c2543ba75251121411ca05f59e868d69 100644 (file)
@@ -122,10 +122,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
 
        if (!i2c_wait_done(i2c_adap))
                goto eio;
-       if (!i2c_slave_did_ack(i2c_adap)) {
-               retval = -ENXIO;
-               goto err;
-       }
        if (i2c_debug) {
                printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
                if (!(ctrl & I2C_NOSTOP))
@@ -158,7 +154,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
 
  eio:
        retval = -EIO;
- err:
        if (i2c_debug)
                printk(KERN_ERR " ERR: %d\n", retval);
        return retval;
@@ -209,10 +204,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
 
                if (!i2c_wait_done(i2c_adap))
                        goto eio;
-               if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) {
-                       retval = -ENXIO;
-                       goto err;
-               }
                msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff;
                if (i2c_debug) {
                        dprintk(1, " %02x", msg->buf[cnt]);
@@ -224,7 +215,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
 
  eio:
        retval = -EIO;
- err:
        if (i2c_debug)
                printk(KERN_ERR " ERR: %d\n", retval);
        return retval;
index 6fc09dd41b9dd56581ae6d5b9b597beea94ce0d6..35796e0352475b6536bfd47315107e418e6716fa 100644 (file)
@@ -2015,7 +2015,8 @@ static int cx25840_probe(struct i2c_client *client,
                kfree(state);
                return err;
        }
-       v4l2_ctrl_cluster(2, &state->volume);
+       if (!is_cx2583x(state))
+               v4l2_ctrl_cluster(2, &state->volume);
        v4l2_ctrl_handler_setup(&state->hdl);
 
        if (client->dev.platform_data) {
index 9b4faf009196afad6de8f4b3ffeebc0dbd2f05e6..9c29e964d400b955d36074487c17c22e1195bbcd 100644 (file)
@@ -628,22 +628,66 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
 static void ivtv_irq_dma_err(struct ivtv *itv)
 {
        u32 data[CX2341X_MBOX_MAX_DATA];
+       u32 status;
 
        del_timer(&itv->dma_timer);
+
        ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
+       status = read_reg(IVTV_REG_DMASTATUS);
        IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
-                               read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
-       write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
+                               status, itv->cur_dma_stream);
+       /*
+        * We do *not* write back to the IVTV_REG_DMASTATUS register to
+        * clear the error status, if either the encoder write (0x02) or
+        * decoder read (0x01) bus master DMA operation do not indicate
+        * completed.  We can race with the DMA engine, which may have
+        * transitioned to completed status *after* we read the register.
+        * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
+        * DMA engine has completed, will cause the DMA engine to stop working.
+        */
+       status &= 0x3;
+       if (status == 0x3)
+               write_reg(status, IVTV_REG_DMASTATUS);
+
        if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
            itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
                struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
 
-               /* retry */
-               if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
+               if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
+                       /* retry */
+                       /*
+                        * FIXME - handle cases of DMA error similar to
+                        * encoder below, except conditioned on status & 0x1
+                        */
                        ivtv_dma_dec_start(s);
-               else
-                       ivtv_dma_enc_start(s);
-               return;
+                       return;
+               } else {
+                       if ((status & 0x2) == 0) {
+                               /*
+                                * CX2341x Bus Master DMA write is ongoing.
+                                * Reset the timer and let it complete.
+                                */
+                               itv->dma_timer.expires =
+                                               jiffies + msecs_to_jiffies(600);
+                               add_timer(&itv->dma_timer);
+                               return;
+                       }
+
+                       if (itv->dma_retries < 3) {
+                               /*
+                                * CX2341x Bus Master DMA write has ended.
+                                * Retry the write, starting with the first
+                                * xfer segment. Just retrying the current
+                                * segment is not sufficient.
+                                */
+                               s->sg_processed = 0;
+                               itv->dma_retries++;
+                               ivtv_dma_enc_start_xfer(s);
+                               return;
+                       }
+                       /* Too many retries, give up on this one */
+               }
+
        }
        if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
                ivtv_udma_start(itv);
index c179041d91f8a3d214c56648473e469f3c516a39..e7e717800ee26f5beca36d03cded34d27b76c478 100644 (file)
@@ -1011,7 +1011,6 @@ static int m2mtest_remove(struct platform_device *pdev)
        v4l2_m2m_release(dev->m2m_dev);
        del_timer_sync(&dev->timer);
        video_unregister_device(dev->vfd);
-       video_device_release(dev->vfd);
        v4l2_device_unregister(&dev->v4l2_dev);
        kfree(dev);
 
index b63f8cafa671749dafeaac6616fa1d2926e5fa3b..561909b65ce6b7b083dcad61e71afcbdae45a9de 100644 (file)
@@ -57,7 +57,7 @@
 #include <linux/usb.h>
 
 #define S2255_MAJOR_VERSION    1
-#define S2255_MINOR_VERSION    20
+#define S2255_MINOR_VERSION    21
 #define S2255_RELEASE          0
 #define S2255_VERSION          KERNEL_VERSION(S2255_MAJOR_VERSION, \
                                               S2255_MINOR_VERSION, \
@@ -312,9 +312,9 @@ struct s2255_fh {
 };
 
 /* current cypress EEPROM firmware version */
-#define S2255_CUR_USB_FWVER    ((3 << 8) | 6)
+#define S2255_CUR_USB_FWVER    ((3 << 8) | 11)
 /* current DSP FW version */
-#define S2255_CUR_DSP_FWVER     8
+#define S2255_CUR_DSP_FWVER     10102
 /* Need DSP version 5+ for video status feature */
 #define S2255_MIN_DSP_STATUS      5
 #define S2255_MIN_DSP_COLORFILTER 8
@@ -492,9 +492,11 @@ static void planar422p_to_yuv_packed(const unsigned char *in,
 
 static void s2255_reset_dsppower(struct s2255_dev *dev)
 {
-       s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b0b, NULL, 0, 1);
+       s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1);
        msleep(10);
        s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
+       msleep(600);
+       s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1);
        return;
 }
 
index 6a1f9404261277991099dbf446cd949b83ce5dcc..c45e6305b26f464025430e0ce6c77df93ae09af3 100644 (file)
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
        unsigned long flags;
        struct asic3 *asic;
 
-       desc->chip->ack(irq);
+       desc->irq_data.chip->irq_ack(&desc->irq_data);
 
-       asic = desc->handler_data;
+       asic = get_irq_data(irq);
 
        for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
                u32 status;
index 33c923d215c79452eed9a28cf97b449616d49608..fdd8a1b8bc67dc972fd8d215eab00361e46f728d 100644 (file)
@@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
 
        /* Voice codec interface client */
        cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
-       cell->name = "davinci_vcif";
+       cell->name = "davinci-vcif";
        cell->driver_data = davinci_vc;
 
        /* Voice codec CQ93VC client */
        cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
-       cell->name = "cq93vc";
+       cell->name = "cq93vc-codec";
        cell->driver_data = davinci_vc;
 
        ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
index 627cf577b16d2ea2f696a7806de74ad950494618..e9018d1394ee7d8a6044c1c252a2f6d38f8470c3 100644 (file)
@@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
 static inline int __tps6586x_writes(struct i2c_client *client, int reg,
                                  int len, uint8_t *val)
 {
-       int ret;
+       int ret, i;
 
-       ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
-       if (ret < 0) {
-               dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
-               return ret;
+       for (i = 0; i < len; i++) {
+               ret = __tps6586x_write(client, reg + i, *(val + i));
+               if (ret < 0)
+                       return ret;
        }
 
        return 0;
index 000cb414a78a3afcbfdcb212c363feb125f86ed2..92b85e28a15ee7edaca835fcc2755ba0eb181c0b 100644 (file)
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
        idev->close      = ucb1x00_ts_close;
 
        __set_bit(EV_ABS, idev->evbit);
-       __set_bit(ABS_X, idev->absbit);
-       __set_bit(ABS_Y, idev->absbit);
-       __set_bit(ABS_PRESSURE, idev->absbit);
 
        input_set_drvdata(idev, ts);
 
+       ucb1x00_adc_enable(ts->ucb);
+       ts->x_res = ucb1x00_ts_read_xres(ts);
+       ts->y_res = ucb1x00_ts_read_yres(ts);
+       ucb1x00_adc_disable(ts->ucb);
+
+       input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
+       input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
+       input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
+
        err = input_register_device(idev);
        if (err)
                goto fail;
index 41233c7fa581137fbfc0020e40022e222489b695..f4016a075fd611000f8f6f1059deb8631722a211 100644 (file)
@@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev)
        struct wm8994 *wm8994 = dev_get_drvdata(dev);
        int ret;
 
+       /* Don't actually go through with the suspend if the CODEC is
+        * still active (eg, for audio passthrough from CP. */
+       ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
+       if (ret < 0) {
+               dev_err(dev, "Failed to read power status: %d\n", ret);
+       } else if (ret & WM8994_VMID_SEL_MASK) {
+               dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+               return 0;
+       }
+
        /* GPIO configuration state is saved here since we may be configuring
         * the GPIO alternate functions even if we're not using the gpiolib
         * driver for them.
@@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev)
        if (ret < 0)
                dev_err(dev, "Failed to save LDO registers: %d\n", ret);
 
+       wm8994->suspended = true;
+
        ret = regulator_bulk_disable(wm8994->num_supplies,
                                     wm8994->supplies);
        if (ret != 0) {
@@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev)
        struct wm8994 *wm8994 = dev_get_drvdata(dev);
        int ret;
 
+       /* We may have lied to the PM core about suspending */
+       if (!wm8994->suspended)
+               return 0;
+
        ret = regulator_bulk_enable(wm8994->num_supplies,
                                    wm8994->supplies);
        if (ret != 0) {
@@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev)
        if (ret < 0)
                dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
 
+       wm8994->suspended = false;
+
        return 0;
 }
 #endif
index 63ee4c1a5315b585b9342dc8427c4c89d6d399d4..b6e1c9a6679edd11ae94738775b813ecf34632df 100644 (file)
@@ -449,6 +449,7 @@ static const struct i2c_device_id bmp085_id[] = {
        { "bmp085", 0 },
        { }
 };
+MODULE_DEVICE_TABLE(i2c, bmp085_id);
 
 static struct i2c_driver bmp085_driver = {
        .driver = {
index 6625c057be05cf5a585d690b0d1d921cd938709b..150b5f3cd401a7c8549e9e15910a1107b713986e 100644 (file)
@@ -1529,7 +1529,7 @@ void mmc_rescan(struct work_struct *work)
         * still present
         */
        if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
-           && mmc_card_is_removable(host))
+           && !(host->caps & MMC_CAP_NONREMOVABLE))
                host->bus_ops->detect(host);
 
        /*
index 5c4a54d9b6a402ed92dad1ad5b9d1252d908d368..ebc62ad4cc567b7aee3313835050770dd1ff14f0 100644 (file)
@@ -792,7 +792,6 @@ int mmc_attach_sdio(struct mmc_host *host)
         */
        mmc_release_host(host);
        err = mmc_add_card(host->card);
-       mmc_claim_host(host);
        if (err)
                goto remove_added;
 
@@ -805,12 +804,12 @@ int mmc_attach_sdio(struct mmc_host *host)
                        goto remove_added;
        }
 
+       mmc_claim_host(host);
        return 0;
 
 
 remove_added:
        /* Remove without lock if the device has been added. */
-       mmc_release_host(host);
        mmc_sdio_remove(host);
        mmc_claim_host(host);
 remove:
index a8c3e1c9b02a78a64a79f5701abac9d1ba29a3e4..4aaa88f8ab5f0323f015dd34c46c4d0f98703ada 100644 (file)
@@ -1230,10 +1230,32 @@ static int inval_cache_and_wait_for_operation(
        sleep_time = chip_op_time / 2;
 
        for (;;) {
+               if (chip->state != chip_state) {
+                       /* Someone's suspended the operation: sleep */
+                       DECLARE_WAITQUEUE(wait, current);
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+                       mutex_unlock(&chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+                       mutex_lock(&chip->mutex);
+                       continue;
+               }
+
                status = map_read(map, cmd_adr);
                if (map_word_andequal(map, status, status_OK, status_OK))
                        break;
 
+               if (chip->erase_suspended && chip_state == FL_ERASING)  {
+                       /* Erase suspend occured while sleep: reset timeout */
+                       timeo = reset_timeo;
+                       chip->erase_suspended = 0;
+               }
+               if (chip->write_suspended && chip_state == FL_WRITING)  {
+                       /* Write suspend occured while sleep: reset timeout */
+                       timeo = reset_timeo;
+                       chip->write_suspended = 0;
+               }
                if (!timeo) {
                        map_write(map, CMD(0x70), cmd_adr);
                        chip->state = FL_STATUS;
@@ -1257,27 +1279,6 @@ static int inval_cache_and_wait_for_operation(
                        timeo--;
                }
                mutex_lock(&chip->mutex);
-
-               while (chip->state != chip_state) {
-                       /* Someone's suspended the operation: sleep */
-                       DECLARE_WAITQUEUE(wait, current);
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       add_wait_queue(&chip->wq, &wait);
-                       mutex_unlock(&chip->mutex);
-                       schedule();
-                       remove_wait_queue(&chip->wq, &wait);
-                       mutex_lock(&chip->mutex);
-               }
-               if (chip->erase_suspended && chip_state == FL_ERASING)  {
-                       /* Erase suspend occured while sleep: reset timeout */
-                       timeo = reset_timeo;
-                       chip->erase_suspended = 0;
-               }
-               if (chip->write_suspended && chip_state == FL_WRITING)  {
-                       /* Write suspend occured while sleep: reset timeout */
-                       timeo = reset_timeo;
-                       chip->write_suspended = 0;
-               }
        }
 
        /* Done and happy. */
index d72a5fb2d041eb7f57a4257c9de708d061f63e35..4e1be51cc122618d60b69297726b96dcb896a0ef 100644 (file)
@@ -1935,14 +1935,14 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
 }
 
 
-static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
+static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index)
 {
        int i,num_erase_regions;
        uint8_t uaddr;
 
-       if (! (jedec_table[index].devtypes & p_cfi->device_type)) {
+       if (!(jedec_table[index].devtypes & cfi->device_type)) {
                DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
-                     jedec_table[index].name, 4 * (1<<p_cfi->device_type));
+                     jedec_table[index].name, 4 * (1<<cfi->device_type));
                return 0;
        }
 
@@ -1950,27 +1950,28 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
 
        num_erase_regions = jedec_table[index].nr_regions;
 
-       p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
-       if (!p_cfi->cfiq) {
+       cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+       if (!cfi->cfiq) {
                //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
                return 0;
        }
 
-       memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
+       memset(cfi->cfiq, 0, sizeof(struct cfi_ident));
 
-       p_cfi->cfiq->P_ID = jedec_table[index].cmd_set;
-       p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
-       p_cfi->cfiq->DevSize = jedec_table[index].dev_size;
-       p_cfi->cfi_mode = CFI_MODE_JEDEC;
+       cfi->cfiq->P_ID = jedec_table[index].cmd_set;
+       cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
+       cfi->cfiq->DevSize = jedec_table[index].dev_size;
+       cfi->cfi_mode = CFI_MODE_JEDEC;
+       cfi->sector_erase_cmd = CMD(0x30);
 
        for (i=0; i<num_erase_regions; i++){
-               p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
+               cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
        }
-       p_cfi->cmdset_priv = NULL;
+       cfi->cmdset_priv = NULL;
 
        /* This may be redundant for some cases, but it doesn't hurt */
-       p_cfi->mfr = jedec_table[index].mfr_id;
-       p_cfi->id = jedec_table[index].dev_id;
+       cfi->mfr = jedec_table[index].mfr_id;
+       cfi->id = jedec_table[index].dev_id;
 
        uaddr = jedec_table[index].uaddr;
 
@@ -1978,8 +1979,8 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
           our brains explode when we see the datasheets talking about address
           lines numbered from A-1 to A18. The CFI table has unlock addresses
           in device-words according to the mode the device is connected in */
-       p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
-       p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
+       cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type;
+       cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type;
 
        return 1;       /* ok */
 }
@@ -2175,7 +2176,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
                                       "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
                                       __func__, cfi->mfr, cfi->id,
                                       cfi->addr_unlock1, cfi->addr_unlock2 );
-                               if (!cfi_jedec_setup(cfi, i))
+                               if (!cfi_jedec_setup(map, cfi, i))
                                        return 0;
                                goto ok_out;
                        }
index 77d64ce19e9f51828e301d6f6eb23f038fef6951..92de7e3a49a5e3b1c6c0590c8c3687cdebd1c132 100644 (file)
@@ -151,6 +151,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
                printk(KERN_ERR MOD_NAME
                       " %s(): Unable to register resource %pR - kernel bug?\n",
                       __func__, &window->rsrc);
+               return -EBUSY;
        }
 
 
index cb20c67995d8ba52652d8765b80dbceb82eba2b8..e0a2373bf0e2fe5ad6fc9423d5db19f96e25de5a 100644 (file)
@@ -413,7 +413,6 @@ error3:
 error2:
        list_del(&new->list);
 error1:
-       kfree(new);
        return ret;
 }
 
index 15682ec8530ed008f8f3f8ddbd16e577306a6407..28af71c61834e8d08bd474a88cddbf8cf5d4bbe0 100644 (file)
@@ -968,6 +968,6 @@ static void __exit omap_nand_exit(void)
 module_init(omap_nand_init);
 module_exit(omap_nand_exit);
 
-MODULE_ALIAS(DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
index e78914938c5c6930cee9b9462bd33cd271bdda2d..ac08750748a351ec3e70dd32825b5385f2a8966e 100644 (file)
@@ -131,7 +131,7 @@ static struct platform_driver generic_onenand_driver = {
        .remove         = __devexit_p(generic_onenand_remove),
 };
 
-MODULE_ALIAS(DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
 
 static int __init generic_onenand_init(void)
 {
index ac31f461cc1c11cef486a5c2b6d7f37f47813e90..c849cacf4b2faab2679f1231982f6cb12e0aa192 100644 (file)
@@ -860,7 +860,7 @@ static void __exit omap2_onenand_exit(void)
 module_init(omap2_onenand_init);
 module_exit(omap2_onenand_exit);
 
-MODULE_ALIAS(DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
index 39214e51245225056c1cedf9c3404f6cdd51dc2f..7ca0eded2561c2261b16be4dbf1087214eb3b029 100644 (file)
@@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data)
     int csr0, boguscnt;
     int handled = 0;
 
-    if (dev == NULL) {
-       printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n");
-       return IRQ_NONE;
-    }
-
     lance->RAP = CSR0;                 /* PCnet-ISA Controller Status */
 
     if (!(lance->RDP & INTR))          /* Check if any interrupt has been */
index 653c62475cb601582549a0abe63a4edf65fdfd68..8849699c66c42764511f0ec3a6852b4ee9cc3757 100644 (file)
@@ -22,7 +22,7 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.62.00-5"
+#define DRV_MODULE_VERSION      "1.62.00-6"
 #define DRV_MODULE_RELDATE      "2011/01/30"
 #define BNX2X_BC_VER            0x040200
 
@@ -1211,6 +1211,7 @@ struct bnx2x {
        /* DCBX Negotation results */
        struct dcbx_features                    dcbx_local_feat;
        u32                                     dcbx_error;
+       u32                                     pending_max;
 };
 
 /**
@@ -1613,19 +1614,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 #define BNX2X_BTR                      4
 #define MAX_SPQ_PENDING                        8
 
-
-/* CMNG constants
-   derived from lab experiments, and not from system spec calculations !!! */
-#define DEF_MIN_RATE                   100
-/* resolution of the rate shaping timer - 100 usec */
-#define RS_PERIODIC_TIMEOUT_USEC       100
-/* resolution of fairness algorithm in usecs -
-   coefficient for calculating the actual t fair */
-#define T_FAIR_COEF                    10000000
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE                                   100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC                       400
 /* number of bytes in single QM arbitration cycle -
-   coefficient for calculating the fairness timer */
-#define QM_ARB_BYTES                   40000
-#define FAIR_MEM                       2
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES                                   160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES                                                100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH                               32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF    ((MIN_ABOVE_THRESH +  QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM                                       2
 
 
 #define ATTN_NIG_FOR_FUNC              (1L << 8)
index 710ce5d04c530056a1682dc7992bf6ba414b0b34..a71b329405335b4a93c0b55e9fbb45808b7d39e9 100644 (file)
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 #endif
 }
 
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ *             nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN     12
+/**
+ * Calculate the approximate value of the MSS for this
+ * aggregation using the first packet of it.
+ *
+ * @param bp
+ * @param parsing_flags Parsing flags from the START CQE
+ * @param len_on_bd Total length of the first packet for the
+ *                  aggregation.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+                                   u16 len_on_bd)
+{
+       /* TPA arrgregation won't have an IP options and TCP options
+        * other than timestamp.
+        */
+       u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+
+
+       /* Check if there was a TCP timestamp, if there is it's will
+        * always be 12 bytes length: nop nop kind length echo val.
+        *
+        * Otherwise FW would close the aggregation.
+        */
+       if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+               hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+       return len_on_bd - hdrs_len;
+}
+
 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                               struct sk_buff *skb,
                               struct eth_fast_path_rx_cqe *fp_cqe,
-                              u16 cqe_idx)
+                              u16 cqe_idx, u16 parsing_flags)
 {
        struct sw_rx_page *rx_pg, old_rx_pg;
        u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
        /* This is needed in order to enable forwarding support */
        if (frag_size)
-               skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
-                                              max(frag_size, (u32)len_on_bd));
+               skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
+                                                             len_on_bd);
 
 #ifdef BNX2X_STOP_ON_ERROR
        if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        if (likely(new_skb)) {
                /* fix ip xsum and give it to the stack */
                /* (no need to map the new skb) */
+               u16 parsing_flags =
+                       le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
 
                prefetch(skb);
                prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
 
                if (!bnx2x_fill_frag_skb(bp, fp, skb,
-                                        &cqe->fast_path_cqe, cqe_idx)) {
-                       if ((le16_to_cpu(cqe->fast_path_cqe.
-                           pars_flags.flags) & PARSING_FLAGS_VLAN))
+                                        &cqe->fast_path_cqe, cqe_idx,
+                                        parsing_flags)) {
+                       if (parsing_flags & PARSING_FLAGS_VLAN)
                                __vlan_hwaccel_put_tag(skb,
                                                 le16_to_cpu(cqe->fast_path_cqe.
                                                             vlan_tag));
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
 {
        u16 line_speed = bp->link_vars.line_speed;
        if (IS_MF(bp)) {
-               u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
-                                               FUNC_MF_CFG_MAX_BW_MASK) >>
-                                               FUNC_MF_CFG_MAX_BW_SHIFT;
-               /* Calculate the current MAX line speed limit for the DCC
-                * capable devices
+               u16 maxCfg = bnx2x_extract_max_cfg(bp,
+                                                  bp->mf_config[BP_VN(bp)]);
+
+               /* Calculate the current MAX line speed limit for the MF
+                * devices
                 */
-               if (IS_MF_SD(bp)) {
+               if (IS_MF_SI(bp))
+                       line_speed = (line_speed * maxCfg) / 100;
+               else { /* SD mode */
                        u16 vn_max_rate = maxCfg * 100;
 
                        if (vn_max_rate < line_speed)
                                line_speed = vn_max_rate;
-               } else /* IS_MF_SI(bp)) */
-                       line_speed = (line_speed * maxCfg) / 100;
+               }
        }
 
        return line_speed;
@@ -959,6 +996,23 @@ void bnx2x_free_skbs(struct bnx2x *bp)
        bnx2x_free_rx_skbs(bp);
 }
 
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+       /* load old values */
+       u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+       if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+               /* leave all but MAX value */
+               mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+               /* set new MAX value */
+               mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+                               & FUNC_MF_CFG_MAX_BW_MASK;
+
+               bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+       }
+}
+
 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
 {
        int i, offset = 1;
@@ -1427,6 +1481,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        bnx2x_set_eth_mac(bp, 1);
 
+       if (bp->pending_max) {
+               bnx2x_update_max_mf_config(bp, bp->pending_max);
+               bp->pending_max = 0;
+       }
+
        if (bp->port.pmf)
                bnx2x_initial_phy_init(bp, load_mode);
 
index 03eb4d68e6bbed0f73ea5988bc2d4ad1e8f0a5d8..85ea7f26b51f19778e8137d224bd1a245ab9d643 100644 (file)
@@ -341,6 +341,15 @@ void bnx2x_dcbx_init(struct bnx2x *bp);
  */
 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
 
+/**
+ * Updates MAX part of MF configuration in HW
+ * (if required)
+ *
+ * @param bp
+ * @param value
+ */
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+
 /* dev_close main block */
 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
 
@@ -1044,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
 void bnx2x_release_phy_lock(struct bnx2x *bp);
 
+/**
+ * Extracts MAX BW part from MF configuration.
+ *
+ * @param bp
+ * @param mf_cfg
+ *
+ * @return u16
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+       u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+                             FUNC_MF_CFG_MAX_BW_SHIFT;
+       if (!max_cfg) {
+               BNX2X_ERR("Illegal configuration detected for Max BW - "
+                         "using 100 instead\n");
+               max_cfg = 100;
+       }
+       return max_cfg;
+}
+
 #endif /* BNX2X_CMN_H */
index 5b44a8b4850905f1033e4c30a5c0cf943e3dacf0..7e92f9d0dcfdd49cb946bfd5ce112ae15b545af1 100644 (file)
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        speed |= (cmd->speed_hi << 16);
 
        if (IS_MF_SI(bp)) {
-               u32 param = 0;
+               u32 part;
                u32 line_speed = bp->link_vars.line_speed;
 
                /* use 10G if no link detected */
@@ -251,23 +251,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                                       REQ_BC_VER_4_SET_MF_BW);
                        return -EINVAL;
                }
-               if (line_speed < speed) {
-                       BNX2X_DEV_INFO("New speed should be less or equal "
-                                      "to actual line speed\n");
+
+               part = (speed * 100) / line_speed;
+
+               if (line_speed < speed || !part) {
+                       BNX2X_DEV_INFO("Speed setting should be in a range "
+                                      "from 1%% to 100%% "
+                                      "of actual line speed\n");
                        return -EINVAL;
                }
-               /* load old values */
-               param = bp->mf_config[BP_VN(bp)];
-
-               /* leave only MIN value */
-               param &= FUNC_MF_CFG_MIN_BW_MASK;
 
-               /* set new MAX value */
-               param |= (((speed * 100) / line_speed)
-                                << FUNC_MF_CFG_MAX_BW_SHIFT)
-                                 & FUNC_MF_CFG_MAX_BW_MASK;
+               if (bp->state != BNX2X_STATE_OPEN)
+                       /* store value for following "load" */
+                       bp->pending_max = part;
+               else
+                       bnx2x_update_max_mf_config(bp, part);
 
-               bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
                return 0;
        }
 
@@ -1781,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
                { 0x100, 0x350 }, /* manuf_info */
                { 0x450,  0xf0 }, /* feature_info */
                { 0x640,  0x64 }, /* upgrade_key_info */
-               { 0x6a4,  0x64 },
                { 0x708,  0x70 }, /* manuf_key_info */
-               { 0x778,  0x70 },
                {     0,     0 }
        };
        __be32 buf[0x350 / 4];
@@ -1933,11 +1930,11 @@ static void bnx2x_self_test(struct net_device *dev,
                buf[4] = 1;
                etest->flags |= ETH_TEST_FL_FAILED;
        }
-       if (bp->port.pmf)
-               if (bnx2x_link_test(bp, is_serdes) != 0) {
-                       buf[5] = 1;
-                       etest->flags |= ETH_TEST_FL_FAILED;
-               }
+
+       if (bnx2x_link_test(bp, is_serdes) != 0) {
+               buf[5] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
 
 #ifdef BNX2X_EXTRA_DEBUG
        bnx2x_panic_dump(bp);
index 5a268e9a0895bf0a092522974d61673a6c34da3d..fa6dbe3f2058d799e664d47f9515b5826b55719c 100644 (file)
@@ -241,7 +241,7 @@ static const struct {
        /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
         * want to handle "system kill" flow at the moment.
         */
-       BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
+       BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
        BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
        BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
        BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
index d584d32c747dc447eb12bdcbfb1fe6704be47213..aa032339e321b48889ae521fb198c6a590b92c58 100644 (file)
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
                vn_max_rate = 0;
 
        } else {
+               u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
                vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
                                FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-               /* If min rate is zero - set it to 1 */
+               /* If fairness is enabled (not all min rates are zeroes) and
+                  if current min rate is zero - set it to 1.
+                  This is a requirement of the algorithm. */
                if (bp->vn_weight_sum && (vn_min_rate == 0))
                        vn_min_rate = DEF_MIN_RATE;
-               vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
-                               FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+
+               if (IS_MF_SI(bp))
+                       /* maxCfg in percents of linkspeed */
+                       vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+               else
+                       /* maxCfg is absolute in 100Mb units */
+                       vn_max_rate = maxCfg * 100;
        }
 
        DP(NETIF_MSG_IFUP,
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
                m_fair_vn.vn_credit_delta =
                        max_t(u32, (vn_min_rate * (T_FAIR_COEF /
                                                   (8 * bp->vn_weight_sum))),
-                             (bp->cmng.fair_vars.fair_threshold * 2));
+                             (bp->cmng.fair_vars.fair_threshold +
+                                                       MIN_ABOVE_THRESH));
                DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
                   m_fair_vn.vn_credit_delta);
        }
@@ -2082,8 +2092,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
                bnx2x_calc_vn_weight_sum(bp);
 
                /* calculate and set min-max rate for each vn */
-               for (vn = VN_0; vn < E1HVN_MAX; vn++)
-                       bnx2x_init_vn_minmax(bp, vn);
+               if (bp->port.pmf)
+                       for (vn = VN_0; vn < E1HVN_MAX; vn++)
+                               bnx2x_init_vn_minmax(bp, vn);
 
                /* always enable rate shaping and fairness */
                bp->cmng.flags.cmng_enables |=
@@ -2152,13 +2163,6 @@ static void bnx2x_link_attn(struct bnx2x *bp)
                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
        }
 
-       /* indicate link status only if link status actually changed */
-       if (prev_link_status != bp->link_vars.link_status)
-               bnx2x_link_report(bp);
-
-       if (IS_MF(bp))
-               bnx2x_link_sync_notify(bp);
-
        if (bp->link_vars.link_up && bp->link_vars.line_speed) {
                int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
 
@@ -2170,6 +2174,13 @@ static void bnx2x_link_attn(struct bnx2x *bp)
                        DP(NETIF_MSG_IFUP,
                           "single function mode without fairness\n");
        }
+
+       if (IS_MF(bp))
+               bnx2x_link_sync_notify(bp);
+
+       /* indicate link status only if link status actually changed */
+       if (prev_link_status != bp->link_vars.link_status)
+               bnx2x_link_report(bp);
 }
 
 void bnx2x__link_status_update(struct bnx2x *bp)
index bda60d590fa88aeb29bf7f15c0c649548905544f..3445ded6674f6482d6a6de7c8ad76c59d4a71ae1 100644 (file)
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
        if (unlikely(bp->panic))
                return;
 
+       bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
        /* Protect a state change flow */
        spin_lock_bh(&bp->stats_lock);
        state = bp->stats_state;
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
        spin_unlock_bh(&bp->stats_lock);
 
-       bnx2x_stats_stm[state][event].action(bp);
-
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
                   state, event, bp->stats_state);
index 1024ae158227306aeacd30fa69159b3bfac97987..a5d5d0b5b1558679f32d682f2b067c9fe3db2a45 100644 (file)
@@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port)
 }
 
 /**
- * __get_rx_machine_lock - lock the port's RX machine
+ * __get_state_machine_lock - lock the port's state machines
  * @port: the port we're looking at
  *
  */
-static inline void __get_rx_machine_lock(struct port *port)
+static inline void __get_state_machine_lock(struct port *port)
 {
-       spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+       spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
 }
 
 /**
- * __release_rx_machine_lock - unlock the port's RX machine
+ * __release_state_machine_lock - unlock the port's state machines
  * @port: the port we're looking at
  *
  */
-static inline void __release_rx_machine_lock(struct port *port)
+static inline void __release_state_machine_lock(struct port *port)
 {
-       spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+       spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
 }
 
 /**
@@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port)
 }
 
 /**
- * __initialize_port_locks - initialize a port's RX machine spinlock
+ * __initialize_port_locks - initialize a port's STATE machine spinlock
  * @port: the port we're looking at
  *
  */
 static inline void __initialize_port_locks(struct port *port)
 {
        // make sure it isn't called twice
-       spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+       spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
 }
 
 //conversions
@@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
 {
        rx_states_t last_state;
 
-       // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback)
-       __get_rx_machine_lock(port);
-
        // keep current State Machine state to compare later if it was changed
        last_state = port->sm_rx_state;
 
@@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                                pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
                                       "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
                                       port->slave->dev->master->name, port->slave->dev->name);
-                               __release_rx_machine_lock(port);
                                return;
                        }
                        __update_selected(lacpdu, port);
@@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                        break;
                }
        }
-       __release_rx_machine_lock(port);
 }
 
 /**
@@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
                        goto re_arm;
                }
 
+               /* Lock around state machines to protect data accessed
+                * by all (e.g., port->sm_vars).  ad_rx_machine may run
+                * concurrently due to incoming LACPDU.
+                */
+               __get_state_machine_lock(port);
+
                ad_rx_machine(NULL, port);
                ad_periodic_machine(port);
                ad_port_selection_logic(port);
@@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
                // turn off the BEGIN bit, since we already handled it
                if (port->sm_vars & AD_PORT_BEGIN)
                        port->sm_vars &= ~AD_PORT_BEGIN;
+
+               __release_state_machine_lock(port);
        }
 
 re_arm:
@@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
                case AD_TYPE_LACPDU:
                        pr_debug("Received LACPDU on port %d\n",
                                 port->actor_port_number);
+                       /* Protect against concurrent state machines */
+                       __get_state_machine_lock(port);
                        ad_rx_machine(lacpdu, port);
+                       __release_state_machine_lock(port);
                        break;
 
                case AD_TYPE_MARKER:
index 2c46a154f2c604d956b0d1ee2384b1280eb95288..b28baff708641b7b9375891222cc47f133eafdfb 100644 (file)
@@ -264,7 +264,8 @@ struct ad_bond_info {
 struct ad_slave_info {
        struct aggregator aggregator;       // 802.3ad aggregator structure
        struct port port;                   // 802.3ad port structure
-       spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt
+       spinlock_t state_machine_lock; /* mutex state machines vs.
+                                         incoming LACPDU */
        u16 id;
 };
 
index 5157e15e96eba705a9e5750e0f46e725e3cebc53..aeea9f9ff6e8ac4798feb8d664f2a774f5dc9219 100644 (file)
@@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = {
 };
 
 static const struct can_bittiming_const softing_btr_const = {
+       .name = "softing",
        .tseg1_min = 1,
        .tseg1_max = 16,
        .tseg2_min = 1,
index 7ff170cbc7dcfa7ec73b8ffe055289aaba8093b9..302be4aa69d6d4252ad2be33a5a0d55dfb128c6d 100644 (file)
@@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
        u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
        int kcqe_cnt;
 
+       /* status block index must be read before reading other fields */
+       rmb();
        cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
 
        while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
                barrier();
                if (status_idx != *cp->kcq1.status_idx_ptr) {
                        status_idx = (u16) *cp->kcq1.status_idx_ptr;
+                       /* status block index must be read first */
+                       rmb();
                        cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
                } else
                        break;
@@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
        u32 last_status = *info->status_idx_ptr;
        int kcqe_cnt;
 
+       /* status block index must be read before reading the KCQ */
+       rmb();
        while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
 
                service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
                        break;
 
                last_status = *info->status_idx_ptr;
+               /* status block index must be read before reading the KCQ */
+               rmb();
        }
        return last_status;
 }
@@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
 {
        struct cnic_dev *dev = (struct cnic_dev *) data;
        struct cnic_local *cp = dev->cnic_priv;
-       u32 status_idx;
+       u32 status_idx, new_status_idx;
 
        if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
                return;
 
-       status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+       while (1) {
+               status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
 
-       CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+               CNIC_WR16(dev, cp->kcq1.io_addr,
+                         cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
 
-       if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
-               status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+               if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+                       cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+                                          status_idx, IGU_INT_ENABLE, 1);
+                       break;
+               }
+
+               new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+               if (new_status_idx != status_idx)
+                       continue;
 
                CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
                          MAX_KCQ_IDX);
 
                cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
                                status_idx, IGU_INT_ENABLE, 1);
-       } else {
-               cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
-                                  status_idx, IGU_INT_ENABLE, 1);
+
+               break;
        }
 }
 
index 2a628d17d178f14a0167780c01c48b351c41550f..7018bfe408a4ecd6f21076e7093a06cb86ae2fab 100644 (file)
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
        int                     ret;
 
        /* free and bail if we are shutting down */
-       if (unlikely(!netif_running(ndev))) {
+       if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
                dev_kfree_skb_any(skb);
                return;
        }
index 2d4c4fc1d90053fa6c04785eac027ef0a59d424c..461dd6f905f78ca476b1edfed1c71a9ea49c2eac 100644 (file)
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
        /* Checksum mode */
        dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
 
-       /* GPIO0 on pre-activate PHY */
-       iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
        iow(db, DM9000_GPCR, GPCR_GEP_CNTL);    /* Let GPIO0 output */
-       iow(db, DM9000_GPR, 0); /* Enable PHY */
 
        ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
 
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
        unsigned long flags;
 
        /* Save previous register address */
-       reg_save = readb(db->io_addr);
        spin_lock_irqsave(&db->lock, flags);
+       reg_save = readb(db->io_addr);
 
        netif_stop_queue(dev);
        dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
        if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
                return -EAGAIN;
 
+       /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
+       iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
+       mdelay(1); /* delay needs by DM9000B */
+
        /* Initialize DM9000 board */
        dm9000_reset(db);
        dm9000_init_dm9000(dev);
index 9d8a20b72fa9ecdaa694481f1c4151a5dd51ce3d..8318ea06cb6dcec33caedf56bdda6c2070a298c7 100644 (file)
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                bp->mii_bus->irq[i] = PHY_POLL;
 
-       platform_set_drvdata(bp->dev, bp->mii_bus);
-
        if (mdiobus_register(bp->mii_bus)) {
                err = -ENXIO;
                goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
        bp = netdev_priv(dev);
        bp->dev = dev;
 
+       platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        spin_lock_init(&bp->lock);
index 55c1711f1688d626d1ed84427882c8b25983fccc..33e7c45a4fe4831bf306d501b93ff12ce673b065 100644 (file)
@@ -42,7 +42,8 @@
 #define GBE_CONFIG_RAM_BASE \
        ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
 
-#define GBE_CONFIG_BASE_VIRT    phys_to_virt(GBE_CONFIG_RAM_BASE)
+#define GBE_CONFIG_BASE_VIRT \
+       ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
 
 #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
        (iowrite16_rep(base + offset, data, count))
index 3fa110ddb0413d90690917ad0bf464c5d62b6397..2e5022849f1828c19a1bc968e7112cdf487d4145 100644 (file)
@@ -5967,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                /* APME bit in EEPROM is mapped to WUC.APME */
                eeprom_data = er32(WUC);
                eeprom_apme_mask = E1000_WUC_APME;
-               if (eeprom_data & E1000_WUC_PHY_WAKE)
+               if ((hw->mac.type > e1000_ich10lan) &&
+                   (eeprom_data & E1000_WUC_PHY_WAKE))
                        adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
        } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
                if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
index 2a71373719ae9abde329770132c99c6b888b9e21..cd0282d5d40f13935eeafb2a0fe5b7c2587b9f17 100644 (file)
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = {
        }, {
                .name = "imx28-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
-       }
+       },
+       { }
 };
 
 static unsigned char macaddr[ETH_ALEN];
index 74486a8b009acb5d059523785b4823df3d262038..af3822f9ea9a28c1dc8c058cf5e4ef9e7f634ccf 100644 (file)
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
  *  The parameter rar_count will usually be hw->mac.rar_entry_count
  *  unless there are workarounds that change this.
  **/
-void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
                                   u8 *mc_addr_list, u32 mc_addr_count,
                                   u32 rar_used_count, u32 rar_count)
 {
index f69e73e2191e9e4a54f3f617e14d8ac6c33734ef..79ccb54ab00c064716e068a1a2b175c03b138252 100644 (file)
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                bp->mii_bus->irq[i] = PHY_POLL;
 
-       platform_set_drvdata(bp->dev, bp->mii_bus);
+       dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
 
        if (mdiobus_register(bp->mii_bus))
                goto err_out_free_mdio_irq;
index 5933621ac3ffa73f7c3a19db049b5bbc813560be..fc27a9926d9e52c28a63d6be8852518e5cebb4da 100644 (file)
@@ -528,8 +528,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
                vnet_hdr_len = q->vnet_hdr_sz;
 
                err = -EINVAL;
-               if ((len -= vnet_hdr_len) < 0)
+               if (len < vnet_hdr_len)
                        goto err;
+               len -= vnet_hdr_len;
 
                err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
                                           sizeof(vnet_hdr));
index 9226cda4d054d4156f1f05e9a2df07856b300b10..530ab5a10bd3a8395e0ff4ef68b9732d21fda100 100644 (file)
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+       PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
        PCMCIA_DEVICE_NULL,
 };
index 27e6f6d43cac9be4ef52e1c011e8bfa5752404c9..e3ebd90ae6513cd2b80faf100332e0bea01c5c81 100644 (file)
@@ -49,8 +49,8 @@
 #include <asm/processor.h>
 
 #define DRV_NAME       "r6040"
-#define DRV_VERSION    "0.26"
-#define DRV_RELDATE    "30May2010"
+#define DRV_VERSION    "0.27"
+#define DRV_RELDATE    "23Feb2011"
 
 /* PHY CHIP Address */
 #define PHY1_ADDR      1       /* For MAC1 */
@@ -69,6 +69,8 @@
 
 /* MAC registers */
 #define MCR0           0x00    /* Control register 0 */
+#define  MCR0_PROMISC  0x0020  /* Promiscuous mode */
+#define  MCR0_HASH_EN  0x0100  /* Enable multicast hash table function */
 #define MCR1           0x04    /* Control register 1 */
 #define  MAC_RST       0x0001  /* Reset the MAC */
 #define MBCR           0x08    /* Bus control */
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev)
 {
        struct r6040_private *lp = netdev_priv(dev);
        void __iomem *ioaddr = lp->base;
-       u16 *adrp;
-       u16 reg;
        unsigned long flags;
        struct netdev_hw_addr *ha;
        int i;
+       u16 *adrp;
+       u16 hash_table[4] = { 0 };
+
+       spin_lock_irqsave(&lp->lock, flags);
 
-       /* MAC Address */
+       /* Keep our MAC Address */
        adrp = (u16 *)dev->dev_addr;
        iowrite16(adrp[0], ioaddr + MID_0L);
        iowrite16(adrp[1], ioaddr + MID_0M);
        iowrite16(adrp[2], ioaddr + MID_0H);
 
-       /* Promiscous Mode */
-       spin_lock_irqsave(&lp->lock, flags);
-
        /* Clear AMCP & PROM bits */
-       reg = ioread16(ioaddr) & ~0x0120;
-       if (dev->flags & IFF_PROMISC) {
-               reg |= 0x0020;
-               lp->mcr0 |= 0x0020;
-       }
-       /* Too many multicast addresses
-        * accept all traffic */
-       else if ((netdev_mc_count(dev) > MCAST_MAX) ||
-                (dev->flags & IFF_ALLMULTI))
-               reg |= 0x0020;
+       lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
 
-       iowrite16(reg, ioaddr);
-       spin_unlock_irqrestore(&lp->lock, flags);
+       /* Promiscuous mode */
+       if (dev->flags & IFF_PROMISC)
+               lp->mcr0 |= MCR0_PROMISC;
 
-       /* Build the hash table */
-       if (netdev_mc_count(dev) > MCAST_MAX) {
-               u16 hash_table[4];
-               u32 crc;
+       /* Enable multicast hash table function to
+        * receive all multicast packets. */
+       else if (dev->flags & IFF_ALLMULTI) {
+               lp->mcr0 |= MCR0_HASH_EN;
 
-               for (i = 0; i < 4; i++)
-                       hash_table[i] = 0;
+               for (i = 0; i < MCAST_MAX ; i++) {
+                       iowrite16(0, ioaddr + MID_1L + 8 * i);
+                       iowrite16(0, ioaddr + MID_1M + 8 * i);
+                       iowrite16(0, ioaddr + MID_1H + 8 * i);
+               }
 
+               for (i = 0; i < 4; i++)
+                       hash_table[i] = 0xffff;
+       }
+       /* Use internal multicast address registers if the number of
+        * multicast addresses is not greater than MCAST_MAX. */
+       else if (netdev_mc_count(dev) <= MCAST_MAX) {
+               i = 0;
                netdev_for_each_mc_addr(ha, dev) {
-                       char *addrs = ha->addr;
+                       u16 *adrp = (u16 *) ha->addr;
+                       iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
+                       iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
+                       iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
+                       i++;
+               }
+               while (i < MCAST_MAX) {
+                       iowrite16(0, ioaddr + MID_1L + 8 * i);
+                       iowrite16(0, ioaddr + MID_1M + 8 * i);
+                       iowrite16(0, ioaddr + MID_1H + 8 * i);
+                       i++;
+               }
+       }
+       /* Otherwise, Enable multicast hash table function. */
+       else {
+               u32 crc;
 
-                       if (!(*addrs & 1))
-                               continue;
+               lp->mcr0 |= MCR0_HASH_EN;
+
+               for (i = 0; i < MCAST_MAX ; i++) {
+                       iowrite16(0, ioaddr + MID_1L + 8 * i);
+                       iowrite16(0, ioaddr + MID_1M + 8 * i);
+                       iowrite16(0, ioaddr + MID_1H + 8 * i);
+               }
 
-                       crc = ether_crc_le(6, addrs);
+               /* Build multicast hash table */
+               netdev_for_each_mc_addr(ha, dev) {
+                       u8 *addrs = ha->addr;
+
+                       crc = ether_crc(ETH_ALEN, addrs);
                        crc >>= 26;
-                       hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+                       hash_table[crc >> 4] |= 1 << (crc & 0xf);
                }
-               /* Fill the MAC hash tables with their values */
+       }
+
+       iowrite16(lp->mcr0, ioaddr + MCR0);
+
+       /* Fill the MAC hash tables with their values */
+       if (lp->mcr0 && MCR0_HASH_EN) {
                iowrite16(hash_table[0], ioaddr + MAR0);
                iowrite16(hash_table[1], ioaddr + MAR1);
                iowrite16(hash_table[2], ioaddr + MAR2);
                iowrite16(hash_table[3], ioaddr + MAR3);
        }
-       /* Multicast Address 1~4 case */
-       i = 0;
-       netdev_for_each_mc_addr(ha, dev) {
-               if (i >= MCAST_MAX)
-                       break;
-               adrp = (u16 *) ha->addr;
-               iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
-               iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
-               iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
-               i++;
-       }
-       while (i < MCAST_MAX) {
-               iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
-               iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
-               iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
-               i++;
-       }
+
+       spin_unlock_irqrestore(&lp->lock, flags);
 }
 
 static void netdev_get_drvinfo(struct net_device *dev,
index 469ab0b7ce315bb0c5ad8c71f481fc893448e849..7ffdb80adf40fd7ffd0e716e777b7b2e9f7e35ef 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
 #include <linux/firmware.h>
+#include <linux/pci-aspm.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -617,8 +618,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
        }
 }
 
-static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
        int i;
 
        RTL_W8(ERIDR, cmd);
@@ -630,7 +632,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
                        break;
        }
 
-       ocp_write(ioaddr, 0x1, 0x30, 0x00000001);
+       ocp_write(tp, 0x1, 0x30, 0x00000001);
 }
 
 #define OOB_CMD_RESET          0x00
@@ -2868,8 +2870,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
 {
        void __iomem *ioaddr = tp->mmio_addr;
 
-       if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+       if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+            (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+           (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
                return;
+       }
 
        if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
             (tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2891,6 +2896,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_25:
        case RTL_GIGA_MAC_VER_26:
+       case RTL_GIGA_MAC_VER_27:
+       case RTL_GIGA_MAC_VER_28:
                RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
                break;
        }
@@ -2900,12 +2907,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
 {
        void __iomem *ioaddr = tp->mmio_addr;
 
-       if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+       if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+            (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+           (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
                return;
+       }
 
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_25:
        case RTL_GIGA_MAC_VER_26:
+       case RTL_GIGA_MAC_VER_27:
+       case RTL_GIGA_MAC_VER_28:
                RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
                break;
        }
@@ -3009,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        mii->reg_num_mask = 0x1f;
        mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
 
+       /* disable ASPM completely as that cause random device stop working
+        * problems as well as full system hangs for some PCIe devices users */
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                                    PCIE_LINK_STATE_CLKPM);
+
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
        rc = pci_enable_device(pdev);
        if (rc < 0) {
@@ -3042,7 +3059,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_mwi_2;
        }
 
-       tp->cp_cmd = PCIMulRW | RxChkSum;
+       tp->cp_cmd = RxChkSum;
 
        if ((sizeof(dma_addr_t) > 4) &&
            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3318,7 +3335,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        /* Disable interrupts */
        rtl8169_irq_mask_and_ack(ioaddr);
 
-       if (tp->mac_version == RTL_GIGA_MAC_VER_28) {
+       if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+           tp->mac_version == RTL_GIGA_MAC_VER_28) {
                while (RTL_R8(TxPoll) & NPQ)
                        udelay(20);
 
@@ -3847,8 +3865,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
        Cxpl_dbg_sel | \
        ASF | \
        PktCntrDisable | \
-       PCIDAC | \
-       PCIMulRW)
+       Mac_dbgo_sel)
 
 static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 {
@@ -3878,8 +3895,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
        if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
                RTL_W8(Config1, cfg1 & ~LEDS0);
 
-       RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-
        rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
 }
 
@@ -3891,8 +3906,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
 
        RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-
-       RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
 }
 
 static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3918,6 +3931,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
                }
        }
 
+       RTL_W8(Cfg9346, Cfg9346_Unlock);
+
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_07:
                rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3932,14 +3947,13 @@ static void rtl_hw_start_8101(struct net_device *dev)
                break;
        }
 
-       RTL_W8(Cfg9346, Cfg9346_Unlock);
+       RTL_W8(Cfg9346, Cfg9346_Lock);
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
        rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 
-       tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
-
+       tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
        RTL_W16(CPlusCmd, tp->cp_cmd);
 
        RTL_W16(IntrMitigate, 0x0000);
@@ -3949,14 +3963,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
        RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
        rtl_set_rx_tx_config_registers(tp);
 
-       RTL_W8(Cfg9346, Cfg9346_Lock);
-
        RTL_R8(IntrMask);
 
        rtl_set_rx_mode(dev);
 
-       RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-
        RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
 
        RTL_W16(IntrMask, tp->intr_event);
index 0e8bb19ed60d6f8bf78efb6b0910a8f4d94907bb..ca886d98bdc78e6b5a2a618ee7afb6ee271b329e 100644 (file)
@@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
                                  struct ethtool_test *test, u64 *data)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_self_tests efx_tests;
+       struct efx_self_tests *efx_tests;
        int already_up;
-       int rc;
+       int rc = -ENOMEM;
+
+       efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+       if (!efx_tests)
+               goto fail;
+
 
        ASSERT_RTNL();
        if (efx->state != STATE_RUNNING) {
@@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
                if (rc) {
                        netif_err(efx, drv, efx->net_dev,
                                  "failed opening device.\n");
-                       goto fail2;
+                       goto fail1;
                }
        }
 
-       memset(&efx_tests, 0, sizeof(efx_tests));
-
-       rc = efx_selftest(efx, &efx_tests, test->flags);
+       rc = efx_selftest(efx, efx_tests, test->flags);
 
        if (!already_up)
                dev_close(efx->net_dev);
@@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
                   rc == 0 ? "passed" : "failed",
                   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
 
- fail2:
- fail1:
+fail1:
        /* Fill ethtool results structures */
-       efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+       efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+       kfree(efx_tests);
+fail:
        if (rc)
                test->flags |= ETH_TEST_FL_FAILED;
 }
index 42daf98ba73636fc9a2b39f7e51397ab519be5b6..35b28f42d208ec7a413f37ee00165af6dfb887a7 100644 (file)
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
        memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-       /* device is off until link detection */
-       netif_carrier_off(dev);
-
        return dev;
 }
 
index 64bfdae5956fee14160d26bca07670c5025b74eb..d70bde95460ba24c53585783c9816db0425911b8 100644 (file)
@@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev)
        smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
        smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740);
 
+       /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */
+       spin_lock_irq(&pdata->mac_lock);
+       smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q);
+       spin_unlock_irq(&pdata->mac_lock);
+
        /* Make sure EEPROM has finished loading before setting GPIO_CFG */
        timeout = 50;
        while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) &&
index 02b622e3b9fb3b13a5457fe85d01bc1875125716..5002f5be47be7dcbd95e0fd9cee2a80910046a81 100644 (file)
@@ -650,6 +650,10 @@ static const struct usb_device_id products[] = {
        USB_DEVICE(0x0fe6, 0x8101),     /* DM9601 USB to Fast Ethernet Adapter */
        .driver_info = (unsigned long)&dm9601_info,
         },
+       {
+        USB_DEVICE(0x0fe6, 0x9700),    /* DM9601 USB to Fast Ethernet Adapter */
+        .driver_info = (unsigned long)&dm9601_info,
+        },
        {
         USB_DEVICE(0x0a46, 0x9000),    /* DM9000E */
         .driver_info = (unsigned long)&dm9601_info,
index 78c26fdccad1389c8d8a6ef88459c934e7776c00..62ce2f4e8605b847f4b242d766b032fc70ed6968 100644 (file)
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
        return 0;
 }
 
+/*
+ * Wait for synth to settle
+ */
+static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+                       struct ieee80211_channel *channel)
+{
+       /*
+        * On 5211+ read activation -> rx delay
+        * and use it (100ns steps).
+        */
+       if (ah->ah_version != AR5K_AR5210) {
+               u32 delay;
+               delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+                       AR5K_PHY_RX_DELAY_M;
+               delay = (channel->hw_value & CHANNEL_CCK) ?
+                       ((delay << 2) / 22) : (delay / 10);
+               if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+                       delay = delay << 1;
+               if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+                       delay = delay << 2;
+               /* XXX: /2 on turbo ? Let's be safe
+                * for now */
+               udelay(100 + delay);
+       } else {
+               mdelay(1);
+       }
+}
+
 
 /**********************\
 * RF Gain optimization *
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
        case AR5K_RF5111:
                ret = ath5k_hw_rf5111_channel(ah, channel);
                break;
+       case AR5K_RF2317:
        case AR5K_RF2425:
                ret = ath5k_hw_rf2425_channel(ah, channel);
                break;
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                /* Failed */
                if (i >= 100)
                        return -EIO;
+
+               /* Set channel and wait for synth */
+               ret = ath5k_hw_channel(ah, channel);
+               if (ret)
+                       return ret;
+
+               ath5k_hw_wait_for_synth(ah, channel);
        }
 
        /*
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
        if (ret)
                return ret;
 
+       /* Write OFDM timings on 5212*/
+       if (ah->ah_version == AR5K_AR5212 &&
+               channel->hw_value & CHANNEL_OFDM) {
+
+               ret = ath5k_hw_write_ofdm_timings(ah, channel);
+               if (ret)
+                       return ret;
+
+               /* Spur info is available only from EEPROM versions
+                * greater than 5.3, but the EEPROM routines will use
+                * static values for older versions */
+               if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+                       ath5k_hw_set_spur_mitigation_filter(ah,
+                                                           channel);
+       }
+
+       /* If we used fast channel switching
+        * we are done, release RF bus and
+        * fire up NF calibration.
+        *
+        * Note: Only NF calibration due to
+        * channel change, not AGC calibration
+        * since AGC is still running !
+        */
+       if (fast) {
+               /*
+                * Release RF Bus grant
+                */
+               AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+                                   AR5K_PHY_RFBUS_REQ_REQUEST);
+
+               /*
+                * Start NF calibration
+                */
+               AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+                                       AR5K_PHY_AGCCTL_NF);
+
+               return ret;
+       }
+
        /*
         * For 5210 we do all initialization using
         * initvals, so we don't have to modify
         * any settings (5210 also only supports
         * a/aturbo modes)
         */
-       if ((ah->ah_version != AR5K_AR5210) && !fast) {
+       if (ah->ah_version != AR5K_AR5210) {
 
                /*
                 * Write initial RF gain settings
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                if (ret)
                        return ret;
 
-               /* Write OFDM timings on 5212*/
-               if (ah->ah_version == AR5K_AR5212 &&
-                       channel->hw_value & CHANNEL_OFDM) {
-
-                       ret = ath5k_hw_write_ofdm_timings(ah, channel);
-                       if (ret)
-                               return ret;
-
-                       /* Spur info is available only from EEPROM versions
-                        * greater than 5.3, but the EEPROM routines will use
-                        * static values for older versions */
-                       if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
-                               ath5k_hw_set_spur_mitigation_filter(ah,
-                                                                   channel);
-               }
-
                /*Enable/disable 802.11b mode on 5111
                (enable 2111 frequency converter + CCK)*/
                if (ah->ah_radio == AR5K_RF5111) {
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
         */
        ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
 
+       ath5k_hw_wait_for_synth(ah, channel);
+
        /*
-        * On 5211+ read activation -> rx delay
-        * and use it.
+        * Perform ADC test to see if baseband is ready
+        * Set tx hold and check adc test register
         */
-       if (ah->ah_version != AR5K_AR5210) {
-               u32 delay;
-               delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
-                       AR5K_PHY_RX_DELAY_M;
-               delay = (channel->hw_value & CHANNEL_CCK) ?
-                       ((delay << 2) / 22) : (delay / 10);
-               if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
-                       delay = delay << 1;
-               if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
-                       delay = delay << 2;
-               /* XXX: /2 on turbo ? Let's be safe
-                * for now */
-               udelay(100 + delay);
-       } else {
-               mdelay(1);
-       }
-
-       if (fast)
-               /*
-                * Release RF Bus grant
-                */
-               AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
-                                   AR5K_PHY_RFBUS_REQ_REQUEST);
-       else {
-               /*
-                * Perform ADC test to see if baseband is ready
-                * Set tx hold and check adc test register
-                */
-               phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
-               ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
-               for (i = 0; i <= 20; i++) {
-                       if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
-                               break;
-                       udelay(200);
-               }
-               ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+       phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+       ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+       for (i = 0; i <= 20; i++) {
+               if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+                       break;
+               udelay(200);
        }
+       ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
 
        /*
         * Start automatic gain control calibration
index 23838e37d45fc3b0b3884f7321d51bdcad4c092d..1a7fa6ea4cf57fedabac3a673d7c2f69be0d8cc7 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/device.h>
 #include <linux/leds.h>
 #include <linux/completion.h>
-#include <linux/pm_qos_params.h>
 
 #include "debug.h"
 #include "common.h"
@@ -57,8 +56,6 @@ struct ath_node;
 
 #define A_MAX(a, b) ((a) > (b) ? (a) : (b))
 
-#define ATH9K_PM_QOS_DEFAULT_VALUE     55
-
 #define TSF_TO_TU(_h,_l) \
        ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
 
@@ -633,8 +630,6 @@ struct ath_softc {
        struct ath_descdma txsdma;
 
        struct ath_ant_comb ant_comb;
-
-       struct pm_qos_request_list pm_qos_req;
 };
 
 struct ath_wiphy {
@@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
 extern struct ieee80211_ops ath9k_ops;
 extern int ath9k_modparam_nohwcrypt;
 extern int led_blink;
-extern int ath9k_pm_qos_value;
 extern bool is_ath9k_unloaded;
 
 irqreturn_t ath_isr(int irq, void *dev);
index 5ab3084eb9cb589a773f9f5c21d0181b9f67cdbd..07b1633b7f3ffe3beb4a41af65e8947eb1e9b30a 100644 (file)
@@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
        struct tx_buf *tx_buf = NULL;
        struct sk_buff *nskb = NULL;
        int ret = 0, i;
-       u16 *hdr, tx_skb_cnt = 0;
+       u16 tx_skb_cnt = 0;
        u8 *buf;
+       __le16 *hdr;
 
        if (hif_dev->tx.tx_skb_cnt == 0)
                return 0;
@@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
 
                buf = tx_buf->buf;
                buf += tx_buf->offset;
-               hdr = (u16 *)buf;
-               *hdr++ = nskb->len;
-               *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+               hdr = (__le16 *)buf;
+               *hdr++ = cpu_to_le16(nskb->len);
+               *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
                buf += 4;
                memcpy(buf, nskb->data, nskb->len);
                tx_buf->len = nskb->len + 4;
index 087a6a95edd5893a8a9282646a4ebc0a3dda40a3..a033d01bf8a0bfe4203847c71458474b5e3a38cc 100644 (file)
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable;
 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
 
-int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
-module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
-
 bool is_ath9k_unloaded;
 /* We use the hw_value as an index into our private channel structure */
 
@@ -762,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
        ath_init_leds(sc);
        ath_start_rfkill_poll(sc);
 
-       pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
-                          PM_QOS_DEFAULT_VALUE);
-
        return 0;
 
 error_world:
@@ -831,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc)
        }
 
        ieee80211_unregister_hw(hw);
-       pm_qos_remove_request(&sc->pm_qos_req);
        ath_rx_cleanup(sc);
        ath_tx_cleanup(sc);
        ath9k_deinit_softc(sc);
index 180170d3ce25511e40f3e252aa7c5223d664920e..2915b11edefb915ca89321a7e2a349e1f525dcf6 100644 (file)
@@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
        struct ath_common *common = ath9k_hw_common(ah);
 
        if (!(ints & ATH9K_INT_GLOBAL))
-               ath9k_hw_enable_interrupts(ah);
+               ath9k_hw_disable_interrupts(ah);
 
        ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
 
@@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
                        REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
        }
 
-       ath9k_hw_enable_interrupts(ah);
+       if (ints & ATH9K_INT_GLOBAL)
+               ath9k_hw_enable_interrupts(ah);
 
        return;
 }
index da5c64597c1fbba4c4d6879819963769cf4e6f56..a09d15f7aa6e0eff95cadfcb04ca280b3c292528 100644 (file)
@@ -1173,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
                        ath9k_btcoex_timer_resume(sc);
        }
 
-       /* User has the option to provide pm-qos value as a module
-        * parameter rather than using the default value of
-        * 'ATH9K_PM_QOS_DEFAULT_VALUE'.
-        */
-       pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value);
-
        if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
                common->bus_ops->extn_synch_en(common);
 
@@ -1345,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 
        sc->sc_flags |= SC_OP_INVALID;
 
-       pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
-
        mutex_unlock(&sc->mutex);
 
        ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
index 537732e5964fa2e4122e11face3943e0855b9119..f82c400be28864d58edd9a5d38bd10f8a8c73f9d 100644 (file)
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
        { USB_DEVICE(0x057c, 0x8402) },
        /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
        { USB_DEVICE(0x1668, 0x1200) },
+       /* Airlive X.USB a/b/g/n */
+       { USB_DEVICE(0x1b75, 0x9170) },
 
        /* terminate */
        {}
index 79ab0a6b138636781482a24bcc35e7da7e21f133..537fb8c84e3af9dab42874a99f26bef07a6a8245 100644 (file)
@@ -51,7 +51,7 @@
 #include "iwl-agn-debugfs.h"
 
 /* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 2
+#define IWL5000_UCODE_API_MAX 5
 #define IWL5150_UCODE_API_MAX 2
 
 /* Lowest firmware API version supported */
index 1eacba4daa5bb1d35edc265ac8862c464eac4387..0494d7b102d42cfe058578264ef6104b2fdd018e 100644 (file)
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
        while (i != idx) {
                u16 len;
                struct sk_buff *skb;
+               dma_addr_t dma_addr;
                desc = &ring[i];
                len = le16_to_cpu(desc->len);
                skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
 
                        len = priv->common.rx_mtu;
                }
+               dma_addr = le32_to_cpu(desc->host_addr);
+               pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
+                       priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
                skb_put(skb, len);
 
                if (p54_rx(dev, skb)) {
-                       pci_unmap_single(priv->pdev,
-                                        le32_to_cpu(desc->host_addr),
-                                        priv->common.rx_mtu + 32,
-                                        PCI_DMA_FROMDEVICE);
+                       pci_unmap_single(priv->pdev, dma_addr,
+                               priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
                        rx_buf[i] = NULL;
-                       desc->host_addr = 0;
+                       desc->host_addr = cpu_to_le32(0);
                } else {
                        skb_trim(skb, 0);
+                       pci_dma_sync_single_for_device(priv->pdev, dma_addr,
+                               priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
                        desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
                }
 
index 21713a7638c41b84307a2b35b9d07990ce1645bf..9b344a921e742e29ce483450ea9313186f2d0e23 100644 (file)
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
        {USB_DEVICE(0x1413, 0x5400)},   /* Telsey 802.11g USB2.0 Adapter */
        {USB_DEVICE(0x1435, 0x0427)},   /* Inventel UR054G */
        {USB_DEVICE(0x1668, 0x1050)},   /* Actiontec 802UIG-1 */
+       {USB_DEVICE(0x1740, 0x1000)},   /* Senao NUB-350 */
        {USB_DEVICE(0x2001, 0x3704)},   /* DLink DWL-G122 rev A2 */
        {USB_DEVICE(0x2001, 0x3705)},   /* D-Link DWL-G120 rev C1 */
        {USB_DEVICE(0x413c, 0x5513)},   /* Dell WLA3310 USB Wireless Adapter */
index 848cc2cce247fb5781255cda3b36477db0e2a959..518542b4bf9e87a0571431d4374842b72ef62faa 100644 (file)
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
        __le32 mode;
        int ret;
 
+       if (priv->device_type != RNDIS_BCM4320B)
+               return -ENOTSUPP;
+
        netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
                                enabled ? "enabled" : "disabled",
                                timeout);
index aa97971a38afe57f0c56d0dbd0c09c40cd2f0ab5..3b3f1e45ab3e58e7538a3ae02c5a4058caaad29b 100644 (file)
@@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
                 */
                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
+               /*
+                * The hardware has already checked the Michael Mic and has
+                * stripped it from the frame. Signal this to mac80211.
+                */
+               rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
                if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
                        rxdesc->flags |= RX_FLAG_DECRYPTED;
                else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
        { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
 #endif
 #ifdef CONFIG_RT2800PCI_RT35XX
+       { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) },
+       { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) },
        { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
        { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
        { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
index b97a4a54ff4cb2eb9499014cb8647df74efd9982..197a36c05fdaf18a294000b9eae8b5dad2073eea 100644 (file)
@@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
                 */
                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
+               /*
+                * The hardware has already checked the Michael Mic and has
+                * stripped it from the frame. Signal this to mac80211.
+                */
+               rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
                if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
                        rxdesc->flags |= RX_FLAG_DECRYPTED;
                else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
index ffedfd49275499023866a970ef3d05fbe440b5f0..ea1580085347fda5a378dfc3e2291c918ba86a66 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 menuconfig NFC_DEVICES
-       bool "NFC devices"
+       bool "Near Field Communication (NFC) devices"
        default n
        ---help---
          You'll have to say Y if your computer contains an NFC device that
index bae647264dd66bba88492f59f1315c0464ce2e8f..724f65d8f9e4aa5deed5dcba2a6451f5b1b58d0b 100644 (file)
@@ -60,7 +60,7 @@ enum pn544_irq {
 struct pn544_info {
        struct miscdevice miscdev;
        struct i2c_client *i2c_dev;
-       struct regulator_bulk_data regs[2];
+       struct regulator_bulk_data regs[3];
 
        enum pn544_state state;
        wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@ struct pn544_info {
 
 static const char reg_vdd_io[] = "Vdd_IO";
 static const char reg_vbat[]   = "VBat";
+static const char reg_vsim[]   = "VSim";
 
 /* sysfs interface */
 static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
 
        info->regs[0].supply = reg_vdd_io;
        info->regs[1].supply = reg_vbat;
+       info->regs[2].supply = reg_vsim;
        r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
                                 info->regs);
        if (r < 0)
index 28295d0a50f64b919e4d43e1e8e73307f04a4683..4d87b5dc92843525c162ab3222d1c50191c45aa2 100644 (file)
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata;
        (p)->unique_id = of_pdt_unique_id++; \
 } while (0)
 
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
 {
-       return dp->path_component_name;
+       int len, ourlen, plen;
+       char *n;
+
+       dp->path_component_name = build_path_component(dp);
+
+       plen = strlen(dp->parent->full_name);
+       ourlen = strlen(dp->path_component_name);
+       len = ourlen + plen + 2;
+
+       n = prom_early_alloc(len);
+       strcpy(n, dp->parent->full_name);
+       if (!of_node_is_root(dp->parent)) {
+               strcpy(n + plen, "/");
+               plen++;
+       }
+       strcpy(n + plen, dp->path_component_name);
+
+       return n;
 }
 
-#else
+#else /* CONFIG_SPARC */
 
 static inline void of_pdt_incr_unique_id(void *p) { }
 static inline void irq_trans_init(struct device_node *dp) { }
 
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
 {
-       return dp->name;
+       static int failsafe_id = 0; /* for generating unique names on failure */
+       char *buf;
+       int len;
+
+       if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
+               goto failsafe;
+
+       buf = prom_early_alloc(len + 1);
+       if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
+               goto failsafe;
+       return buf;
+
+ failsafe:
+       buf = prom_early_alloc(strlen(dp->parent->full_name) +
+                              strlen(dp->name) + 16);
+       sprintf(buf, "%s/%s@unknown%i",
+               of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
+               dp->name, failsafe_id++);
+       pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
+       return buf;
 }
 
 #endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name)
        return buf;
 }
 
-static char * __init of_pdt_try_pkg2path(phandle node)
-{
-       char *res, *buf = NULL;
-       int len;
-
-       if (!of_pdt_prom_ops->pkg2path)
-               return NULL;
-
-       if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
-               return NULL;
-       buf = prom_early_alloc(len + 1);
-       if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
-               pr_err("%s: package-to-path failed\n", __func__);
-               return NULL;
-       }
-
-       res = strrchr(buf, '/');
-       if (!res) {
-               pr_err("%s: couldn't find / in %s\n", __func__, buf);
-               return NULL;
-       }
-       return res+1;
-}
-
-/*
- * When fetching the node's name, first try using package-to-path; if
- * that fails (either because the arch hasn't supplied a PROM callback,
- * or some other random failure), fall back to just looking at the node's
- * 'name' property.
- */
-static char * __init of_pdt_build_name(phandle node)
-{
-       char *buf;
-
-       buf = of_pdt_try_pkg2path(node);
-       if (!buf)
-               buf = of_pdt_get_one_property(node, "name");
-
-       return buf;
-}
-
 static struct device_node * __init of_pdt_create_node(phandle node,
                                                    struct device_node *parent)
 {
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
 
        kref_init(&dp->kref);
 
-       dp->name = of_pdt_build_name(node);
+       dp->name = of_pdt_get_one_property(node, "name");
        dp->type = of_pdt_get_one_property(node, "device_type");
        dp->phandle = node;
 
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
        return dp;
 }
 
-static char * __init of_pdt_build_full_name(struct device_node *dp)
-{
-       int len, ourlen, plen;
-       char *n;
-
-       plen = strlen(dp->parent->full_name);
-       ourlen = strlen(of_pdt_node_name(dp));
-       len = ourlen + plen + 2;
-
-       n = prom_early_alloc(len);
-       strcpy(n, dp->parent->full_name);
-       if (!of_node_is_root(dp->parent)) {
-               strcpy(n + plen, "/");
-               plen++;
-       }
-       strcpy(n + plen, of_pdt_node_name(dp));
-
-       return n;
-}
-
 static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
                                                   phandle node,
                                                   struct device_node ***nextp)
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
                *(*nextp) = dp;
                *nextp = &dp->allnext;
 
-#if defined(CONFIG_SPARC)
-               dp->path_component_name = build_path_component(dp);
-#endif
                dp->full_name = of_pdt_build_full_name(dp);
 
                dp->child = of_pdt_build_tree(dp,
index 3a5a6fcc0eada9305be0acd3e0a67e8348c85a62..492b7d807fe8f7a17eafcbe7aa3b9bbeb4fbd89d 100644 (file)
@@ -243,7 +243,7 @@ struct pci_ops pcifront_bus_ops = {
 
 #ifdef CONFIG_PCI_MSI
 static int pci_frontend_enable_msix(struct pci_dev *dev,
-                                   int **vector, int nvec)
+                                   int vector[], int nvec)
 {
        int err;
        int i;
@@ -277,18 +277,24 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
        if (likely(!err)) {
                if (likely(!op.value)) {
                        /* we get the result */
-                       for (i = 0; i < nvec; i++)
-                               *(*vector+i) = op.msix_entries[i].vector;
-                       return 0;
+                       for (i = 0; i < nvec; i++) {
+                               if (op.msix_entries[i].vector <= 0) {
+                                       dev_warn(&dev->dev, "MSI-X entry %d is invalid: %d!\n",
+                                               i, op.msix_entries[i].vector);
+                                       err = -EINVAL;
+                                       vector[i] = -1;
+                                       continue;
+                               }
+                               vector[i] = op.msix_entries[i].vector;
+                       }
                } else {
                        printk(KERN_DEBUG "enable msix get value %x\n",
                                op.value);
-                       return op.value;
                }
        } else {
                dev_err(&dev->dev, "enable msix get err %x\n", err);
-               return err;
        }
+       return err;
 }
 
 static void pci_frontend_disable_msix(struct pci_dev *dev)
@@ -310,7 +316,7 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
                dev_err(&dev->dev, "pci_disable_msix get err %x\n", err);
 }
 
-static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
+static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
 {
        int err;
        struct xen_pci_op op = {
@@ -324,7 +330,13 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
 
        err = do_pci_op(pdev, &op);
        if (likely(!err)) {
-               *(*vector) = op.value;
+               vector[0] = op.value;
+               if (op.value <= 0) {
+                       dev_warn(&dev->dev, "MSI entry is invalid: %d!\n",
+                               op.value);
+                       err = -EINVAL;
+                       vector[0] = -1;
+               }
        } else {
                dev_err(&dev->dev, "pci frontend enable msi failed for dev "
                                    "%x:%x\n", op.bus, op.devfn);
@@ -733,8 +745,7 @@ static void free_pdev(struct pcifront_device *pdev)
 
        pcifront_free_roots(pdev);
 
-       /*For PCIE_AER error handling job*/
-       flush_scheduled_work();
+       cancel_work_sync(&pdev->op_work);
 
        if (pdev->irq >= 0)
                unbind_from_irqhandler(pdev->irq, pdev);
index 0bdda5b3ed550cc74504a7ceea539700b69cb9bd..42fbf1a75576abf6540923a834e2ddf4df045ea7 100644 (file)
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
                flags |= CONF_ENABLE_IOCARD;
        if (flags & CONF_ENABLE_IOCARD)
                s->socket.flags |= SS_IOCARD;
+       if (flags & CONF_ENABLE_ZVCARD)
+               s->socket.flags |= SS_ZVCARD | SS_IOCARD;
        if (flags & CONF_ENABLE_SPKR) {
                s->socket.flags |= SS_SPKR_ENA;
                status = CCSR_AUDIO_ENA;
index 3755e7c8c715f5f9bebf60c5d9a7ff4a5e250f65..2c540542b5af7fb183a57f8ea66b230649c16c2e 100644 (file)
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
 }
 #endif
 
-static void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev)
 {
        struct pcmcia_low_level *ops = dev->platform_data;
        /*
index bb62ea87b8f9c55158027c1716442d5593ab0772..b609b45469ed71179852378f49354ef1c798d433 100644 (file)
@@ -1,3 +1,4 @@
 int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
 void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
+void pxa2xx_configure_sockets(struct device *dev);
 
index c3f72192af66e1fd67c7a69dc9faa0cdd2ef65df..a52039564e74b1ec07da8fe98e4b447aaf4499ce 100644 (file)
@@ -181,6 +181,9 @@ static int __init colibri_pcmcia_init(void)
 {
        int ret;
 
+       if (!machine_is_colibri() && !machine_is_colibri320())
+               return -ENODEV;
+
        colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
        if (!colibri_pcmcia_device)
                return -ENOMEM;
index b9f8c8fb42bd52417eb1236a8f18b77dea6c5592..25afe637c6573e52448925eeb0090455ede758dc 100644 (file)
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
                lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
 
                pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+               pxa2xx_configure_sockets(&sadev->dev);
                ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
                                pxa2xx_drv_pcmcia_add_one);
        }
index f3a73dd7766094641aa80a0da03751d0b5af5dfd..e4c4f3dc0728fc5c495b2283d08c180210a29379 100644 (file)
@@ -6,7 +6,7 @@ comment "PPS generators support"
 
 config PPS_GENERATOR_PARPORT
        tristate "Parallel port PPS signal generator"
-       depends on PARPORT
+       depends on PARPORT && BROKEN
        help
          If you say yes here you get support for a PPS signal generator which
          utilizes STROBE pin of a parallel port to send PPS signals. It uses
index cba1b43f7519b554e20ed787577a8f1f3d04c011..a4e8eb9fece6a53c7e2ade169df43dcd9a8696a0 100644 (file)
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
 {
        unsigned long flags;
        int captured = 0;
-       struct pps_ktime ts_real;
+       struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
 
        /* check event type */
        BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
index 76b41853a8775195f2f3f9f0696b0f2e52de9404..1269fbd2decad1a9898329f84addb5bb2720392d 100644 (file)
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
 
        /* Several chips lock up trying to read undefined config space */
        if (capable(CAP_SYS_ADMIN))
-               size = 0x200000;
+               size = RIO_MAINT_SPACE_SZ;
 
-       if (off > size)
+       if (off >= size)
                return 0;
        if (off + count > size) {
                size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
        loff_t init_off = off;
        u8 *data = (u8 *) buf;
 
-       if (off > 0x200000)
+       if (off >= RIO_MAINT_SPACE_SZ)
                return 0;
-       if (off + count > 0x200000) {
-               size = 0x200000 - off;
+       if (off + count > RIO_MAINT_SPACE_SZ) {
+               size = RIO_MAINT_SPACE_SZ - off;
                count = size;
        }
 
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
                 .name = "config",
                 .mode = S_IRUGO | S_IWUSR,
                 },
-       .size = 0x200000,
+       .size = RIO_MAINT_SPACE_SZ,
        .read = rio_read_config,
        .write = rio_write_config,
 };
index f53d31b950d4c16b2ac3befd6833c0022a639f42..2bb5de1f2421283013e512ab6e643f4b9f05f38a 100644 (file)
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
 
        dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
 
-       BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
+       BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
 
        return mc13xxx_regulators[id].voltages[val];
 }
index 8b0d2c4bde919fb9815903288acabb1afa9b63eb..06df898842c0026ae63a86641d8aa067271ff649 100644 (file)
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
                return REGULATOR_MODE_IDLE;
        default:
                BUG();
+               return -EINVAL;
        }
 }
 
index c404b61386bf072329f1f140da38ea91a118610f..09b4437b3e616aa57cdfec861b2b2ddf0ff9dd59 100644 (file)
@@ -117,6 +117,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
                                        struct module *owner)
 {
        struct rtc_device *rtc;
+       struct rtc_wkalrm alrm;
        int id, err;
 
        if (idr_pre_get(&rtc_idr, GFP_KERNEL) == 0) {
@@ -166,6 +167,12 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
        rtc->pie_timer.function = rtc_pie_update_irq;
        rtc->pie_enabled = 0;
 
+       /* Check to see if there is an ALARM already set in hw */
+       err = __rtc_read_alarm(rtc, &alrm);
+
+       if (!err && !rtc_valid_tm(&alrm.time))
+               rtc_set_alarm(rtc, &alrm);
+
        strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
        dev_set_name(&rtc->dev, "rtc%d", id);
 
index cb2f0728fd70dc187bcc80be8fa7d21bd35fbe7c..8ec6b069a7f5803921a074852dc3c61f678f1443 100644 (file)
@@ -116,6 +116,186 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
 }
 EXPORT_SYMBOL_GPL(rtc_set_mmss);
 
+static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+       int err;
+
+       err = mutex_lock_interruptible(&rtc->ops_lock);
+       if (err)
+               return err;
+
+       if (rtc->ops == NULL)
+               err = -ENODEV;
+       else if (!rtc->ops->read_alarm)
+               err = -EINVAL;
+       else {
+               memset(alarm, 0, sizeof(struct rtc_wkalrm));
+               err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
+       }
+
+       mutex_unlock(&rtc->ops_lock);
+       return err;
+}
+
+int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+       int err;
+       struct rtc_time before, now;
+       int first_time = 1;
+       unsigned long t_now, t_alm;
+       enum { none, day, month, year } missing = none;
+       unsigned days;
+
+       /* The lower level RTC driver may return -1 in some fields,
+        * creating invalid alarm->time values, for reasons like:
+        *
+        *   - The hardware may not be capable of filling them in;
+        *     many alarms match only on time-of-day fields, not
+        *     day/month/year calendar data.
+        *
+        *   - Some hardware uses illegal values as "wildcard" match
+        *     values, which non-Linux firmware (like a BIOS) may try
+        *     to set up as e.g. "alarm 15 minutes after each hour".
+        *     Linux uses only oneshot alarms.
+        *
+        * When we see that here, we deal with it by using values from
+        * a current RTC timestamp for any missing (-1) values.  The
+        * RTC driver prevents "periodic alarm" modes.
+        *
+        * But this can be racey, because some fields of the RTC timestamp
+        * may have wrapped in the interval since we read the RTC alarm,
+        * which would lead to us inserting inconsistent values in place
+        * of the -1 fields.
+        *
+        * Reading the alarm and timestamp in the reverse sequence
+        * would have the same race condition, and not solve the issue.
+        *
+        * So, we must first read the RTC timestamp,
+        * then read the RTC alarm value,
+        * and then read a second RTC timestamp.
+        *
+        * If any fields of the second timestamp have changed
+        * when compared with the first timestamp, then we know
+        * our timestamp may be inconsistent with that used by
+        * the low-level rtc_read_alarm_internal() function.
+        *
+        * So, when the two timestamps disagree, we just loop and do
+        * the process again to get a fully consistent set of values.
+        *
+        * This could all instead be done in the lower level driver,
+        * but since more than one lower level RTC implementation needs it,
+        * then it's probably best best to do it here instead of there..
+        */
+
+       /* Get the "before" timestamp */
+       err = rtc_read_time(rtc, &before);
+       if (err < 0)
+               return err;
+       do {
+               if (!first_time)
+                       memcpy(&before, &now, sizeof(struct rtc_time));
+               first_time = 0;
+
+               /* get the RTC alarm values, which may be incomplete */
+               err = rtc_read_alarm_internal(rtc, alarm);
+               if (err)
+                       return err;
+
+               /* full-function RTCs won't have such missing fields */
+               if (rtc_valid_tm(&alarm->time) == 0)
+                       return 0;
+
+               /* get the "after" timestamp, to detect wrapped fields */
+               err = rtc_read_time(rtc, &now);
+               if (err < 0)
+                       return err;
+
+               /* note that tm_sec is a "don't care" value here: */
+       } while (   before.tm_min   != now.tm_min
+                || before.tm_hour  != now.tm_hour
+                || before.tm_mon   != now.tm_mon
+                || before.tm_year  != now.tm_year);
+
+       /* Fill in the missing alarm fields using the timestamp; we
+        * know there's at least one since alarm->time is invalid.
+        */
+       if (alarm->time.tm_sec == -1)
+               alarm->time.tm_sec = now.tm_sec;
+       if (alarm->time.tm_min == -1)
+               alarm->time.tm_min = now.tm_min;
+       if (alarm->time.tm_hour == -1)
+               alarm->time.tm_hour = now.tm_hour;
+
+       /* For simplicity, only support date rollover for now */
+       if (alarm->time.tm_mday == -1) {
+               alarm->time.tm_mday = now.tm_mday;
+               missing = day;
+       }
+       if (alarm->time.tm_mon == -1) {
+               alarm->time.tm_mon = now.tm_mon;
+               if (missing == none)
+                       missing = month;
+       }
+       if (alarm->time.tm_year == -1) {
+               alarm->time.tm_year = now.tm_year;
+               if (missing == none)
+                       missing = year;
+       }
+
+       /* with luck, no rollover is needed */
+       rtc_tm_to_time(&now, &t_now);
+       rtc_tm_to_time(&alarm->time, &t_alm);
+       if (t_now < t_alm)
+               goto done;
+
+       switch (missing) {
+
+       /* 24 hour rollover ... if it's now 10am Monday, an alarm that
+        * that will trigger at 5am will do so at 5am Tuesday, which
+        * could also be in the next month or year.  This is a common
+        * case, especially for PCs.
+        */
+       case day:
+               dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
+               t_alm += 24 * 60 * 60;
+               rtc_time_to_tm(t_alm, &alarm->time);
+               break;
+
+       /* Month rollover ... if it's the 31th, an alarm on the 3rd will
+        * be next month.  An alarm matching on the 30th, 29th, or 28th
+        * may end up in the month after that!  Many newer PCs support
+        * this type of alarm.
+        */
+       case month:
+               dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
+               do {
+                       if (alarm->time.tm_mon < 11)
+                               alarm->time.tm_mon++;
+                       else {
+                               alarm->time.tm_mon = 0;
+                               alarm->time.tm_year++;
+                       }
+                       days = rtc_month_days(alarm->time.tm_mon,
+                                       alarm->time.tm_year);
+               } while (days < alarm->time.tm_mday);
+               break;
+
+       /* Year rollover ... easy except for leap years! */
+       case year:
+               dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
+               do {
+                       alarm->time.tm_year++;
+               } while (rtc_valid_tm(&alarm->time) != 0);
+               break;
+
+       default:
+               dev_warn(&rtc->dev, "alarm rollover not handled\n");
+       }
+
+done:
+       return 0;
+}
+
 int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
        int err;
index 26d1cf5d19ae4046899925d8a294d6417cc09401..518a76ec71cab46717903154db6713870593dcdb 100644 (file)
@@ -183,33 +183,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        return 0;
 }
 
-/*
- * Handle commands from user-space
- */
-static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
-                       unsigned long arg)
-{
-       int ret = 0;
-
-       pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg);
-
-       /* important:  scrub old status before enabling IRQs */
-       switch (cmd) {
-       case RTC_UIE_OFF:       /* update off */
-               at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
-               break;
-       case RTC_UIE_ON:        /* update on */
-               at91_sys_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
-               at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV);
-               break;
-       default:
-               ret = -ENOIOCTLCMD;
-               break;
-       }
-
-       return ret;
-}
-
 static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        pr_debug("%s(): cmd=%08x\n", __func__, enabled);
@@ -269,7 +242,6 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
 }
 
 static const struct rtc_class_ops at91_rtc_ops = {
-       .ioctl          = at91_rtc_ioctl,
        .read_time      = at91_rtc_readtime,
        .set_time       = at91_rtc_settime,
        .read_alarm     = at91_rtc_readalarm,
index c36749e4c926b8d87e391738e0f7949beb374854..a3ad957507dc9ebce9a360c85330e067d5576f9f 100644 (file)
@@ -216,33 +216,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        return 0;
 }
 
-/*
- * Handle commands from user-space
- */
-static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
-                       unsigned long arg)
-{
-       struct sam9_rtc *rtc = dev_get_drvdata(dev);
-       int ret = 0;
-       u32 mr = rtt_readl(rtc, MR);
-
-       dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
-
-       switch (cmd) {
-       case RTC_UIE_OFF:               /* update off */
-               rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
-               break;
-       case RTC_UIE_ON:                /* update on */
-               rtt_writel(rtc, MR, mr | AT91_RTT_RTTINCIEN);
-               break;
-       default:
-               ret = -ENOIOCTLCMD;
-               break;
-       }
-
-       return ret;
-}
-
 static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct sam9_rtc *rtc = dev_get_drvdata(dev);
@@ -303,13 +276,12 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
 }
 
 static const struct rtc_class_ops at91_rtc_ops = {
-       .ioctl          = at91_rtc_ioctl,
        .read_time      = at91_rtc_readtime,
        .set_time       = at91_rtc_settime,
        .read_alarm     = at91_rtc_readalarm,
        .set_alarm      = at91_rtc_setalarm,
        .proc           = at91_rtc_proc,
-       .alarm_irq_enabled = at91_rtc_alarm_irq_enable,
+       .alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
 /*
index 17971d93354d2041c860e8dfd57f29d60ff6536e..ca9cff85ab8a8f6d34faf6173524184daca730b2 100644 (file)
@@ -240,32 +240,6 @@ static void bfin_rtc_int_set_alarm(struct bfin_rtc *rtc)
         */
        bfin_rtc_int_set(rtc->rtc_alarm.tm_yday == -1 ? RTC_ISTAT_ALARM : RTC_ISTAT_ALARM_DAY);
 }
-static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
-       struct bfin_rtc *rtc = dev_get_drvdata(dev);
-       int ret = 0;
-
-       dev_dbg_stamp(dev);
-
-       bfin_rtc_sync_pending(dev);
-
-       switch (cmd) {
-       case RTC_UIE_ON:
-               dev_dbg_stamp(dev);
-               bfin_rtc_int_set(RTC_ISTAT_SEC);
-               break;
-       case RTC_UIE_OFF:
-               dev_dbg_stamp(dev);
-               bfin_rtc_int_clear(~RTC_ISTAT_SEC);
-               break;
-
-       default:
-               dev_dbg_stamp(dev);
-               ret = -ENOIOCTLCMD;
-       }
-
-       return ret;
-}
 
 static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
@@ -358,7 +332,6 @@ static int bfin_rtc_proc(struct device *dev, struct seq_file *seq)
 }
 
 static struct rtc_class_ops bfin_rtc_ops = {
-       .ioctl         = bfin_rtc_ioctl,
        .read_time     = bfin_rtc_read_time,
        .set_time      = bfin_rtc_set_time,
        .read_alarm    = bfin_rtc_read_alarm,
index 159b95e4b420a5df26cad9a211de870cb02b9b93..911e75cdc125107186a53518b748dbc10329afe8 100644 (file)
@@ -377,50 +377,6 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
        return 0;
 }
 
-static int cmos_irq_set_freq(struct device *dev, int freq)
-{
-       struct cmos_rtc *cmos = dev_get_drvdata(dev);
-       int             f;
-       unsigned long   flags;
-
-       if (!is_valid_irq(cmos->irq))
-               return -ENXIO;
-
-       if (!is_power_of_2(freq))
-               return -EINVAL;
-       /* 0 = no irqs; 1 = 2^15 Hz ... 15 = 2^0 Hz */
-       f = ffs(freq);
-       if (f-- > 16)
-               return -EINVAL;
-       f = 16 - f;
-
-       spin_lock_irqsave(&rtc_lock, flags);
-       hpet_set_periodic_freq(freq);
-       CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT);
-       spin_unlock_irqrestore(&rtc_lock, flags);
-
-       return 0;
-}
-
-static int cmos_irq_set_state(struct device *dev, int enabled)
-{
-       struct cmos_rtc *cmos = dev_get_drvdata(dev);
-       unsigned long   flags;
-
-       if (!is_valid_irq(cmos->irq))
-               return -ENXIO;
-
-       spin_lock_irqsave(&rtc_lock, flags);
-
-       if (enabled)
-               cmos_irq_enable(cmos, RTC_PIE);
-       else
-               cmos_irq_disable(cmos, RTC_PIE);
-
-       spin_unlock_irqrestore(&rtc_lock, flags);
-       return 0;
-}
-
 static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -440,25 +396,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int cmos_update_irq_enable(struct device *dev, unsigned int enabled)
-{
-       struct cmos_rtc *cmos = dev_get_drvdata(dev);
-       unsigned long   flags;
-
-       if (!is_valid_irq(cmos->irq))
-               return -EINVAL;
-
-       spin_lock_irqsave(&rtc_lock, flags);
-
-       if (enabled)
-               cmos_irq_enable(cmos, RTC_UIE);
-       else
-               cmos_irq_disable(cmos, RTC_UIE);
-
-       spin_unlock_irqrestore(&rtc_lock, flags);
-       return 0;
-}
-
 #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
 
 static int cmos_procfs(struct device *dev, struct seq_file *seq)
@@ -503,10 +440,7 @@ static const struct rtc_class_ops cmos_rtc_ops = {
        .read_alarm             = cmos_read_alarm,
        .set_alarm              = cmos_set_alarm,
        .proc                   = cmos_procfs,
-       .irq_set_freq           = cmos_irq_set_freq,
-       .irq_set_state          = cmos_irq_set_state,
        .alarm_irq_enable       = cmos_alarm_irq_enable,
-       .update_irq_enable      = cmos_update_irq_enable,
 };
 
 /*----------------------------------------------------------------*/
index 34647fc1ee98231dc7444a2ec8d4514d2c07ded1..8d46838dff8a0aaac95e032211bd9945f2ced1c4 100644 (file)
@@ -231,10 +231,6 @@ davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
        case RTC_WIE_OFF:
                rtc_ctrl &= ~PRTCSS_RTC_CTRL_WEN;
                break;
-       case RTC_UIE_OFF:
-       case RTC_UIE_ON:
-               ret = -ENOTTY;
-               break;
        default:
                ret = -ENOIOCTLCMD;
        }
@@ -473,55 +469,6 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
        return 0;
 }
 
-static int davinci_rtc_irq_set_state(struct device *dev, int enabled)
-{
-       struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
-       unsigned long flags;
-       u8 rtc_ctrl;
-
-       spin_lock_irqsave(&davinci_rtc_lock, flags);
-
-       rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
-
-       if (enabled) {
-               while (rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL)
-                      & PRTCSS_RTC_CTRL_WDTBUS)
-                       cpu_relax();
-
-               rtc_ctrl |= PRTCSS_RTC_CTRL_TE;
-               rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
-
-               rtcss_write(davinci_rtc, 0x0, PRTCSS_RTC_CLKC_CNT);
-
-               rtc_ctrl |= PRTCSS_RTC_CTRL_TIEN |
-                           PRTCSS_RTC_CTRL_TMMD |
-                           PRTCSS_RTC_CTRL_TMRFLG;
-       } else
-               rtc_ctrl &= ~PRTCSS_RTC_CTRL_TIEN;
-
-       rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
-
-       spin_unlock_irqrestore(&davinci_rtc_lock, flags);
-
-       return 0;
-}
-
-static int davinci_rtc_irq_set_freq(struct device *dev, int freq)
-{
-       struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
-       unsigned long flags;
-       u16 tmr_counter = (0x8000 >> (ffs(freq) - 1));
-
-       spin_lock_irqsave(&davinci_rtc_lock, flags);
-
-       rtcss_write(davinci_rtc, tmr_counter & 0xFF, PRTCSS_RTC_TMR0);
-       rtcss_write(davinci_rtc, (tmr_counter & 0xFF00) >> 8, PRTCSS_RTC_TMR1);
-
-       spin_unlock_irqrestore(&davinci_rtc_lock, flags);
-
-       return 0;
-}
-
 static struct rtc_class_ops davinci_rtc_ops = {
        .ioctl                  = davinci_rtc_ioctl,
        .read_time              = davinci_rtc_read_time,
@@ -529,8 +476,6 @@ static struct rtc_class_ops davinci_rtc_ops = {
        .alarm_irq_enable       = davinci_rtc_alarm_irq_enable,
        .read_alarm             = davinci_rtc_read_alarm,
        .set_alarm              = davinci_rtc_set_alarm,
-       .irq_set_state          = davinci_rtc_irq_set_state,
-       .irq_set_freq           = davinci_rtc_irq_set_freq,
 };
 
 static int __init davinci_rtc_probe(struct platform_device *pdev)
index 37268e97de49e8f6d741dfd5e88271bb87f6566d..3fffd708711f52fd646692b068fd3d8e8aef0655 100644 (file)
@@ -397,29 +397,12 @@ static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int ds1511_rtc_update_irq_enable(struct device *dev,
-       unsigned int enabled)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
-       if (pdata->irq <= 0)
-               return -EINVAL;
-       if (enabled)
-               pdata->irqen |= RTC_UF;
-       else
-               pdata->irqen &= ~RTC_UF;
-       ds1511_rtc_update_alarm(pdata);
-       return 0;
-}
-
 static const struct rtc_class_ops ds1511_rtc_ops = {
        .read_time              = ds1511_rtc_read_time,
        .set_time               = ds1511_rtc_set_time,
        .read_alarm             = ds1511_rtc_read_alarm,
        .set_alarm              = ds1511_rtc_set_alarm,
        .alarm_irq_enable       = ds1511_rtc_alarm_irq_enable,
-       .update_irq_enable      = ds1511_rtc_update_irq_enable,
 };
 
  static ssize_t
index ff432e2ca275a14311165a7e65f93326d3379f6b..fee41b97c9e89e3835734454a720fb772ed17202 100644 (file)
@@ -227,29 +227,12 @@ static int ds1553_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int ds1553_rtc_update_irq_enable(struct device *dev,
-       unsigned int enabled)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
-       if (pdata->irq <= 0)
-               return -EINVAL;
-       if (enabled)
-               pdata->irqen |= RTC_UF;
-       else
-               pdata->irqen &= ~RTC_UF;
-       ds1553_rtc_update_alarm(pdata);
-       return 0;
-}
-
 static const struct rtc_class_ops ds1553_rtc_ops = {
        .read_time              = ds1553_rtc_read_time,
        .set_time               = ds1553_rtc_set_time,
        .read_alarm             = ds1553_rtc_read_alarm,
        .set_alarm              = ds1553_rtc_set_alarm,
        .alarm_irq_enable       = ds1553_rtc_alarm_irq_enable,
-       .update_irq_enable      = ds1553_rtc_update_irq_enable,
 };
 
 static ssize_t ds1553_nvram_read(struct file *filp, struct kobject *kobj,
index 23a9ee19764cb5358b87eb5c935474a82bbbae4f..27b7bf672ac624249b2a64abda0464af5d74f2cc 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
  *
- * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Copyright (C) 2009-2011 Freescale Semiconductor.
  * Author: Jack Lan <jack.lan@freescale.com>
  *
  * This program is free software; you can redistribute  it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
                time->tm_hour = bcd2bin(hour);
        }
 
-       time->tm_wday = bcd2bin(week);
+       /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+       time->tm_wday = bcd2bin(week) - 1;
        time->tm_mday = bcd2bin(day);
-       time->tm_mon = bcd2bin(month & 0x7F);
+       /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+       time->tm_mon = bcd2bin(month & 0x7F) - 1;
        if (century)
                add_century = 100;
 
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
        buf[0] = bin2bcd(time->tm_sec);
        buf[1] = bin2bcd(time->tm_min);
        buf[2] = bin2bcd(time->tm_hour);
-       buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+       /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+       buf[3] = bin2bcd(time->tm_wday + 1);
        buf[4] = bin2bcd(time->tm_mday); /* Date */
-       buf[5] = bin2bcd(time->tm_mon);
+       /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+       buf[5] = bin2bcd(time->tm_mon + 1);
        if (time->tm_year >= 100) {
                buf[5] |= 0x80;
                buf[6] = bin2bcd(time->tm_year - 100);
@@ -335,23 +339,6 @@ static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int ds3232_update_irq_enable(struct device *dev, unsigned int enabled)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds3232 *ds3232 = i2c_get_clientdata(client);
-
-       if (client->irq <= 0)
-               return -EINVAL;
-
-       if (enabled)
-               ds3232->rtc->irq_data |= RTC_UF;
-       else
-               ds3232->rtc->irq_data &= ~RTC_UF;
-
-       ds3232_update_alarm(client);
-       return 0;
-}
-
 static irqreturn_t ds3232_irq(int irq, void *dev_id)
 {
        struct i2c_client *client = dev_id;
@@ -402,7 +389,6 @@ static const struct rtc_class_ops ds3232_rtc_ops = {
        .read_alarm = ds3232_read_alarm,
        .set_alarm = ds3232_set_alarm,
        .alarm_irq_enable = ds3232_alarm_irq_enable,
-       .update_irq_enable = ds3232_update_irq_enable,
 };
 
 static int __devinit ds3232_probe(struct i2c_client *client,
index 2e16f72c90569fdb0fe6e26b16f6257e1604de2b..b6473631d18213e80ba404ec93b4ffe13d83e92d 100644 (file)
@@ -168,12 +168,6 @@ static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
        return ret;
 }
 
-static int jz4740_rtc_update_irq_enable(struct device *dev, unsigned int enable)
-{
-       struct jz4740_rtc *rtc = dev_get_drvdata(dev);
-       return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_1HZ_IRQ, enable);
-}
-
 static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
 {
        struct jz4740_rtc *rtc = dev_get_drvdata(dev);
@@ -185,7 +179,6 @@ static struct rtc_class_ops jz4740_rtc_ops = {
        .set_mmss       = jz4740_rtc_set_mmss,
        .read_alarm     = jz4740_rtc_read_alarm,
        .set_alarm      = jz4740_rtc_set_alarm,
-       .update_irq_enable = jz4740_rtc_update_irq_enable,
        .alarm_irq_enable = jz4740_rtc_alarm_irq_enable,
 };
 
index 5314b153bfba73f795d11b2099534af19c0098a5..c420064695598edbe9cdc42c5767d35cc21d2811 100644 (file)
@@ -282,12 +282,6 @@ static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev)
        return IRQ_HANDLED;
 }
 
-static int mc13xxx_rtc_update_irq_enable(struct device *dev,
-               unsigned int enabled)
-{
-       return mc13xxx_rtc_irq_enable(dev, enabled, MC13XXX_IRQ_1HZ);
-}
-
 static int mc13xxx_rtc_alarm_irq_enable(struct device *dev,
                unsigned int enabled)
 {
@@ -300,7 +294,6 @@ static const struct rtc_class_ops mc13xxx_rtc_ops = {
        .read_alarm = mc13xxx_rtc_read_alarm,
        .set_alarm = mc13xxx_rtc_set_alarm,
        .alarm_irq_enable = mc13xxx_rtc_alarm_irq_enable,
-       .update_irq_enable = mc13xxx_rtc_update_irq_enable,
 };
 
 static irqreturn_t mc13xxx_rtc_reset_handler(int irq, void *dev)
index dfcdf0901d21e74ec4995d7b957fba2675a00537..b40c1ff1ebc8e4b5e003a32312664e5be07ee6d1 100644 (file)
@@ -240,32 +240,12 @@ static int mpc5121_rtc_alarm_irq_enable(struct device *dev,
        return 0;
 }
 
-static int mpc5121_rtc_update_irq_enable(struct device *dev,
-                                        unsigned int enabled)
-{
-       struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
-       struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
-       int val;
-
-       val = in_8(&regs->int_enable);
-
-       if (enabled)
-               val = (val & ~0x8) | 0x1;
-       else
-               val &= ~0x1;
-
-       out_8(&regs->int_enable, val);
-
-       return 0;
-}
-
 static const struct rtc_class_ops mpc5121_rtc_ops = {
        .read_time = mpc5121_rtc_read_time,
        .set_time = mpc5121_rtc_set_time,
        .read_alarm = mpc5121_rtc_read_alarm,
        .set_alarm = mpc5121_rtc_set_alarm,
        .alarm_irq_enable = mpc5121_rtc_alarm_irq_enable,
-       .update_irq_enable = mpc5121_rtc_update_irq_enable,
 };
 
 static int __devinit mpc5121_rtc_probe(struct platform_device *op,
index 8d2cf60271207e3ef135e0cd8e7cc062871a323d..b86bc328463b2ec2731c0028f2b08fae90cc5e04 100644 (file)
@@ -247,25 +247,6 @@ static int mrst_set_alarm(struct device *dev, struct rtc_wkalrm *t)
        return 0;
 }
 
-static int mrst_irq_set_state(struct device *dev, int enabled)
-{
-       struct mrst_rtc *mrst = dev_get_drvdata(dev);
-       unsigned long   flags;
-
-       if (!mrst->irq)
-               return -ENXIO;
-
-       spin_lock_irqsave(&rtc_lock, flags);
-
-       if (enabled)
-               mrst_irq_enable(mrst, RTC_PIE);
-       else
-               mrst_irq_disable(mrst, RTC_PIE);
-
-       spin_unlock_irqrestore(&rtc_lock, flags);
-       return 0;
-}
-
 /* Currently, the vRTC doesn't support UIE ON/OFF */
 static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
@@ -312,7 +293,6 @@ static const struct rtc_class_ops mrst_rtc_ops = {
        .read_alarm     = mrst_read_alarm,
        .set_alarm      = mrst_set_alarm,
        .proc           = mrst_procfs,
-       .irq_set_state  = mrst_irq_set_state,
        .alarm_irq_enable = mrst_rtc_alarm_irq_enable,
 };
 
index 0b06c1e03fd5b764d12d482c27e13cb19a00e14a..826ab64a8fa91d1a4524e7e3e7a2ae2ce565fec8 100644 (file)
@@ -274,12 +274,6 @@ static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int mxc_rtc_update_irq_enable(struct device *dev, unsigned int enabled)
-{
-       mxc_rtc_irq_enable(dev, RTC_1HZ_BIT, enabled);
-       return 0;
-}
-
 /*
  * This function reads the current RTC time into tm in Gregorian date.
  */
@@ -368,7 +362,6 @@ static struct rtc_class_ops mxc_rtc_ops = {
        .read_alarm             = mxc_rtc_read_alarm,
        .set_alarm              = mxc_rtc_set_alarm,
        .alarm_irq_enable       = mxc_rtc_alarm_irq_enable,
-       .update_irq_enable      = mxc_rtc_update_irq_enable,
 };
 
 static int __init mxc_rtc_probe(struct platform_device *pdev)
index ddb0857e15a4af29033c3dcb628c10a116462f76..781068d62f23256b9429899661dc023bb68e555c 100644 (file)
@@ -134,20 +134,6 @@ static void nuc900_rtc_bin2bcd(struct device *dev, struct rtc_time *settm,
        gettm->bcd_hour = bin2bcd(settm->tm_hour) << 16;
 }
 
-static int nuc900_update_irq_enable(struct device *dev, unsigned int enabled)
-{
-       struct nuc900_rtc *rtc = dev_get_drvdata(dev);
-
-       if (enabled)
-               __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
-                               (TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
-       else
-               __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
-                               (~TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
-
-       return 0;
-}
-
 static int nuc900_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct nuc900_rtc *rtc = dev_get_drvdata(dev);
@@ -234,7 +220,6 @@ static struct rtc_class_ops nuc900_rtc_ops = {
        .read_alarm = nuc900_rtc_read_alarm,
        .set_alarm = nuc900_rtc_set_alarm,
        .alarm_irq_enable = nuc900_alarm_irq_enable,
-       .update_irq_enable = nuc900_update_irq_enable,
 };
 
 static int __devinit nuc900_rtc_probe(struct platform_device *pdev)
index b4dbf3a319b30d52aff0e939c3d3f6638d7e8f4f..de0dd7b1f146e0e7fff07bd30487b1da9add0ca1 100644 (file)
@@ -135,44 +135,6 @@ static irqreturn_t rtc_irq(int irq, void *rtc)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_RTC_INTF_DEV
-
-static int
-omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
-       u8 reg;
-
-       switch (cmd) {
-       case RTC_UIE_OFF:
-       case RTC_UIE_ON:
-               break;
-       default:
-               return -ENOIOCTLCMD;
-       }
-
-       local_irq_disable();
-       rtc_wait_not_busy();
-       reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
-       switch (cmd) {
-       /* UIE = Update Interrupt Enable (1/second) */
-       case RTC_UIE_OFF:
-               reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
-               break;
-       case RTC_UIE_ON:
-               reg |= OMAP_RTC_INTERRUPTS_IT_TIMER;
-               break;
-       }
-       rtc_wait_not_busy();
-       rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
-       local_irq_enable();
-
-       return 0;
-}
-
-#else
-#define        omap_rtc_ioctl  NULL
-#endif
-
 static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        u8 reg;
@@ -313,7 +275,6 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
 }
 
 static struct rtc_class_ops omap_rtc_ops = {
-       .ioctl          = omap_rtc_ioctl,
        .read_time      = omap_rtc_read_time,
        .set_time       = omap_rtc_set_time,
        .read_alarm     = omap_rtc_read_alarm,
index 25c0b3fd44f1ba901f96b0b8582a6941335640b4..a633abc428966a91384d92f2613113a3a8861eab 100644 (file)
@@ -131,18 +131,12 @@ static int pcap_rtc_alarm_irq_enable(struct device *dev, unsigned int en)
        return pcap_rtc_irq_enable(dev, PCAP_IRQ_TODA, en);
 }
 
-static int pcap_rtc_update_irq_enable(struct device *dev, unsigned int en)
-{
-       return pcap_rtc_irq_enable(dev, PCAP_IRQ_1HZ, en);
-}
-
 static const struct rtc_class_ops pcap_rtc_ops = {
        .read_time = pcap_rtc_read_time,
        .read_alarm = pcap_rtc_read_alarm,
        .set_alarm = pcap_rtc_set_alarm,
        .set_mmss = pcap_rtc_set_mmss,
        .alarm_irq_enable = pcap_rtc_alarm_irq_enable,
-       .update_irq_enable = pcap_rtc_update_irq_enable,
 };
 
 static int __devinit pcap_rtc_probe(struct platform_device *pdev)
index 16edf94ab42f2826f20024ef3a57778f8e15a0c6..f90c574f9d055a8cffb8398c03b51c83327c0d50 100644 (file)
@@ -106,25 +106,6 @@ pcf50633_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int
-pcf50633_rtc_update_irq_enable(struct device *dev, unsigned int enabled)
-{
-       struct pcf50633_rtc *rtc = dev_get_drvdata(dev);
-       int err;
-
-       if (enabled)
-               err = pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_SECOND);
-       else
-               err = pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_SECOND);
-
-       if (err < 0)
-               return err;
-
-       rtc->second_enabled = enabled;
-
-       return 0;
-}
-
 static int pcf50633_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        struct pcf50633_rtc *rtc;
@@ -262,8 +243,7 @@ static struct rtc_class_ops pcf50633_rtc_ops = {
        .set_time               = pcf50633_rtc_set_time,
        .read_alarm             = pcf50633_rtc_read_alarm,
        .set_alarm              = pcf50633_rtc_set_alarm,
-       .alarm_irq_enable       = pcf50633_rtc_alarm_irq_enable,
-       .update_irq_enable      = pcf50633_rtc_update_irq_enable,
+       .alarm_irq_enable       = pcf50633_rtc_alarm_irq_enable,
 };
 
 static void pcf50633_rtc_irq(int irq, void *data)
index bbdb2f02798a13187c91475696fa9f84594f8f47..d554368c9f577dfc8c9a3d1d57f706543b460ad0 100644 (file)
@@ -35,11 +35,6 @@ static irqreturn_t pl030_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int pl030_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
-       return -ENOIOCTLCMD;
-}
-
 static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
        struct pl030_rtc *rtc = dev_get_drvdata(dev);
@@ -96,7 +91,6 @@ static int pl030_set_time(struct device *dev, struct rtc_time *tm)
 }
 
 static const struct rtc_class_ops pl030_ops = {
-       .ioctl          = pl030_ioctl,
        .read_time      = pl030_read_time,
        .set_time       = pl030_set_time,
        .read_alarm     = pl030_read_alarm,
index b7a6690e5b35e8744295bf212a8e0d75e0d8dd6f..d829ea63c4fb9c97ba358bf5dac661353934ca6e 100644 (file)
@@ -293,57 +293,6 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
        return ret;
 }
 
-/* Periodic interrupt is only available in ST variants. */
-static int pl031_irq_set_state(struct device *dev, int enabled)
-{
-       struct pl031_local *ldata = dev_get_drvdata(dev);
-
-       if (enabled == 1) {
-               /* Clear any pending timer interrupt. */
-               writel(RTC_BIT_PI, ldata->base + RTC_ICR);
-
-               writel(readl(ldata->base + RTC_IMSC) | RTC_BIT_PI,
-                       ldata->base + RTC_IMSC);
-
-               /* Now start the timer */
-               writel(readl(ldata->base + RTC_TCR) | RTC_TCR_EN,
-                       ldata->base + RTC_TCR);
-
-       } else {
-               writel(readl(ldata->base + RTC_IMSC) & (~RTC_BIT_PI),
-                       ldata->base + RTC_IMSC);
-
-               /* Also stop the timer */
-               writel(readl(ldata->base + RTC_TCR) & (~RTC_TCR_EN),
-                       ldata->base + RTC_TCR);
-       }
-       /* Wait at least 1 RTC32 clock cycle to ensure next access
-        * to RTC_TCR will succeed.
-        */
-       udelay(40);
-
-       return 0;
-}
-
-static int pl031_irq_set_freq(struct device *dev, int freq)
-{
-       struct pl031_local *ldata = dev_get_drvdata(dev);
-
-       /* Cant set timer if it is already enabled */
-       if (readl(ldata->base + RTC_TCR) & RTC_TCR_EN) {
-               dev_err(dev, "can't change frequency while timer enabled\n");
-               return -EINVAL;
-       }
-
-       /* If self start bit in RTC_TCR is set timer will start here,
-        * but we never set that bit. Instead we start the timer when
-        * set_state is called with enabled == 1.
-        */
-       writel(RTC_TIMER_FREQ / freq, ldata->base + RTC_TLR);
-
-       return 0;
-}
-
 static int pl031_remove(struct amba_device *adev)
 {
        struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
@@ -440,8 +389,6 @@ static struct rtc_class_ops stv1_pl031_ops = {
        .read_alarm = pl031_read_alarm,
        .set_alarm = pl031_set_alarm,
        .alarm_irq_enable = pl031_alarm_irq_enable,
-       .irq_set_state = pl031_irq_set_state,
-       .irq_set_freq = pl031_irq_set_freq,
 };
 
 /* And the second ST derivative */
@@ -451,8 +398,6 @@ static struct rtc_class_ops stv2_pl031_ops = {
        .read_alarm = pl031_stv2_read_alarm,
        .set_alarm = pl031_stv2_set_alarm,
        .alarm_irq_enable = pl031_alarm_irq_enable,
-       .irq_set_state = pl031_irq_set_state,
-       .irq_set_freq = pl031_irq_set_freq,
 };
 
 static struct amba_id pl031_ids[] = {
index 242bbf86c74a7bbfa9f9e5c8748c41c047a6b1ff..0a59fda5c09d176c5fa0af1f90625c04a29232c4 100644 (file)
@@ -69,6 +69,14 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
                                alrm.enabled ? "yes" : "no");
                seq_printf(seq, "alrm_pending\t: %s\n",
                                alrm.pending ? "yes" : "no");
+               seq_printf(seq, "update IRQ enabled\t: %s\n",
+                       (rtc->uie_rtctimer.enabled) ? "yes" : "no");
+               seq_printf(seq, "periodic IRQ enabled\t: %s\n",
+                       (rtc->pie_enabled) ? "yes" : "no");
+               seq_printf(seq, "periodic IRQ frequency\t: %d\n",
+                       rtc->irq_freq);
+               seq_printf(seq, "max user IRQ frequency\t: %d\n",
+                       rtc->max_user_freq);
        }
 
        seq_printf(seq, "24hr\t\t: yes\n");
index 29e867a1aaa8ba363637ab899148744f2fc616f9..fc9f4991574be31d2f1f576416889336acc03e0e 100644 (file)
@@ -209,32 +209,6 @@ static void pxa_rtc_release(struct device *dev)
        free_irq(pxa_rtc->irq_1Hz, dev);
 }
 
-static int pxa_periodic_irq_set_freq(struct device *dev, int freq)
-{
-       struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
-       int period_ms;
-
-       if (freq < 1 || freq > MAXFREQ_PERIODIC)
-               return -EINVAL;
-
-       period_ms = 1000 / freq;
-       rtc_writel(pxa_rtc, PIAR, period_ms);
-
-       return 0;
-}
-
-static int pxa_periodic_irq_set_state(struct device *dev, int enabled)
-{
-       struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
-
-       if (enabled)
-               rtsr_set_bits(pxa_rtc, RTSR_PIALE | RTSR_PICE);
-       else
-               rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_PICE);
-
-       return 0;
-}
-
 static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
@@ -250,21 +224,6 @@ static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int pxa_update_irq_enable(struct device *dev, unsigned int enabled)
-{
-       struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
-
-       spin_lock_irq(&pxa_rtc->lock);
-
-       if (enabled)
-               rtsr_set_bits(pxa_rtc, RTSR_HZE);
-       else
-               rtsr_clear_bits(pxa_rtc, RTSR_HZE);
-
-       spin_unlock_irq(&pxa_rtc->lock);
-       return 0;
-}
-
 static int pxa_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
@@ -346,10 +305,7 @@ static const struct rtc_class_ops pxa_rtc_ops = {
        .read_alarm = pxa_rtc_read_alarm,
        .set_alarm = pxa_rtc_set_alarm,
        .alarm_irq_enable = pxa_alarm_irq_enable,
-       .update_irq_enable = pxa_update_irq_enable,
        .proc = pxa_rtc_proc,
-       .irq_set_state = pxa_periodic_irq_set_state,
-       .irq_set_freq = pxa_periodic_irq_set_freq,
 };
 
 static int __init pxa_rtc_probe(struct platform_device *pdev)
index 6aaa1550e3b1ebe7094d3d33107fb34bb44779e7..85c1b848dd72383b6253fbef04d2fa870719d076 100644 (file)
@@ -281,57 +281,6 @@ static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm)
        return rs5c372_set_datetime(to_i2c_client(dev), tm);
 }
 
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-
-static int
-rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
-       struct i2c_client       *client = to_i2c_client(dev);
-       struct rs5c372          *rs5c = i2c_get_clientdata(client);
-       unsigned char           buf;
-       int                     status, addr;
-
-       buf = rs5c->regs[RS5C_REG_CTRL1];
-       switch (cmd) {
-       case RTC_UIE_OFF:
-       case RTC_UIE_ON:
-               /* some 327a modes use a different IRQ pin for 1Hz irqs */
-               if (rs5c->type == rtc_rs5c372a
-                               && (buf & RS5C372A_CTRL1_SL1))
-                       return -ENOIOCTLCMD;
-       default:
-               return -ENOIOCTLCMD;
-       }
-
-       status = rs5c_get_regs(rs5c);
-       if (status < 0)
-               return status;
-
-       addr = RS5C_ADDR(RS5C_REG_CTRL1);
-       switch (cmd) {
-       case RTC_UIE_OFF:       /* update off */
-               buf &= ~RS5C_CTRL1_CT_MASK;
-               break;
-       case RTC_UIE_ON:        /* update on */
-               buf &= ~RS5C_CTRL1_CT_MASK;
-               buf |= RS5C_CTRL1_CT4;
-               break;
-       }
-
-       if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
-               printk(KERN_WARNING "%s: can't update alarm\n",
-                       rs5c->rtc->name);
-               status = -EIO;
-       } else
-               rs5c->regs[RS5C_REG_CTRL1] = buf;
-
-       return status;
-}
-
-#else
-#define        rs5c_rtc_ioctl  NULL
-#endif
-
 
 static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
@@ -480,7 +429,6 @@ static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq)
 
 static const struct rtc_class_ops rs5c372_rtc_ops = {
        .proc           = rs5c372_rtc_proc,
-       .ioctl          = rs5c_rtc_ioctl,
        .read_time      = rs5c372_rtc_read_time,
        .set_time       = rs5c372_rtc_set_time,
        .read_alarm     = rs5c_read_alarm,
index af32a62e12a82365cf2e796f4d66d961505f42aa..fde172fb2abe68565b3a5ed4c326ad91381a59ce 100644 (file)
@@ -424,37 +424,12 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int rx8025_irq_set_state(struct device *dev, int enabled)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct rx8025_data *rx8025 = i2c_get_clientdata(client);
-       int ctrl1;
-       int err;
-
-       if (client->irq <= 0)
-               return -ENXIO;
-
-       ctrl1 = rx8025->ctrl1 & ~RX8025_BIT_CTRL1_CT;
-       if (enabled)
-               ctrl1 |= RX8025_BIT_CTRL1_CT_1HZ;
-       if (ctrl1 != rx8025->ctrl1) {
-               rx8025->ctrl1 = ctrl1;
-               err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
-                                      rx8025->ctrl1);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
 static struct rtc_class_ops rx8025_rtc_ops = {
        .read_time = rx8025_get_time,
        .set_time = rx8025_set_time,
        .read_alarm = rx8025_read_alarm,
        .set_alarm = rx8025_set_alarm,
        .alarm_irq_enable = rx8025_alarm_irq_enable,
-       .irq_set_state  = rx8025_irq_set_state,
 };
 
 /*
index cf953ecbfca934e8b9c7058f976610ff092016ae..714964913e5eb37d9c9dff9f0f346bbe6d41d5e7 100644 (file)
@@ -77,47 +77,18 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
 }
 
 /* Update control registers */
-static void s3c_rtc_setaie(int to)
+static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
 {
        unsigned int tmp;
 
-       pr_debug("%s: aie=%d\n", __func__, to);
+       pr_debug("%s: aie=%d\n", __func__, enabled);
 
        tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
 
-       if (to)
+       if (enabled)
                tmp |= S3C2410_RTCALM_ALMEN;
 
        writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
-}
-
-static int s3c_rtc_setpie(struct device *dev, int enabled)
-{
-       unsigned int tmp;
-
-       pr_debug("%s: pie=%d\n", __func__, enabled);
-
-       spin_lock_irq(&s3c_rtc_pie_lock);
-
-       if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
-               tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
-               tmp &= ~S3C64XX_RTCCON_TICEN;
-
-               if (enabled)
-                       tmp |= S3C64XX_RTCCON_TICEN;
-
-               writew(tmp, s3c_rtc_base + S3C2410_RTCCON);
-       } else {
-               tmp = readb(s3c_rtc_base + S3C2410_TICNT);
-               tmp &= ~S3C2410_TICNT_ENABLE;
-
-               if (enabled)
-                       tmp |= S3C2410_TICNT_ENABLE;
-
-               writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
-       }
-
-       spin_unlock_irq(&s3c_rtc_pie_lock);
 
        return 0;
 }
@@ -308,7 +279,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        writeb(alrm_en, base + S3C2410_RTCALM);
 
-       s3c_rtc_setaie(alrm->enabled);
+       s3c_rtc_setaie(dev, alrm->enabled);
 
        return 0;
 }
@@ -377,8 +348,6 @@ static const struct rtc_class_ops s3c_rtcops = {
        .set_time       = s3c_rtc_settime,
        .read_alarm     = s3c_rtc_getalarm,
        .set_alarm      = s3c_rtc_setalarm,
-       .irq_set_freq   = s3c_rtc_setfreq,
-       .irq_set_state  = s3c_rtc_setpie,
        .proc           = s3c_rtc_proc,
        .alarm_irq_enable = s3c_rtc_setaie,
 };
@@ -440,7 +409,7 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
        rtc_device_unregister(rtc);
 
        s3c_rtc_setpie(&dev->dev, 0);
-       s3c_rtc_setaie(0);
+       s3c_rtc_setaie(&dev->dev, 0);
 
        clk_disable(rtc_clk);
        clk_put(rtc_clk);
index 5dfe5ffcb0d332700eb98027f91775e3541c58d3..0b40bb88a88400fa8505ac83c7aa73606da9e183 100644 (file)
@@ -43,7 +43,6 @@
 #define RTC_DEF_TRIM           0
 
 static const unsigned long RTC_FREQ = 1024;
-static unsigned long timer_freq;
 static struct rtc_time rtc_alarm;
 static DEFINE_SPINLOCK(sa1100_rtc_lock);
 
@@ -156,114 +155,11 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int sa1100_irq_set_freq(struct device *dev, int freq)
-{
-       if (freq < 1 || freq > timer_freq) {
-               return -EINVAL;
-       } else {
-               struct rtc_device *rtc = (struct rtc_device *)dev;
-
-               rtc->irq_freq = freq;
-
-               return 0;
-       }
-}
-
-static int rtc_timer1_count;
-
-static int sa1100_irq_set_state(struct device *dev, int enabled)
-{
-       spin_lock_irq(&sa1100_rtc_lock);
-       if (enabled) {
-               struct rtc_device *rtc = (struct rtc_device *)dev;
-
-               OSMR1 = timer_freq / rtc->irq_freq + OSCR;
-               OIER |= OIER_E1;
-               rtc_timer1_count = 1;
-       } else {
-               OIER &= ~OIER_E1;
-       }
-       spin_unlock_irq(&sa1100_rtc_lock);
-
-       return 0;
-}
-
-static inline int sa1100_timer1_retrigger(struct rtc_device *rtc)
-{
-       unsigned long diff;
-       unsigned long period = timer_freq / rtc->irq_freq;
-
-       spin_lock_irq(&sa1100_rtc_lock);
-
-       do {
-               OSMR1 += period;
-               diff = OSMR1 - OSCR;
-               /* If OSCR > OSMR1, diff is a very large number (unsigned
-                * math). This means we have a lost interrupt. */
-       } while (diff > period);
-       OIER |= OIER_E1;
-
-       spin_unlock_irq(&sa1100_rtc_lock);
-
-       return 0;
-}
-
-static irqreturn_t timer1_interrupt(int irq, void *dev_id)
-{
-       struct platform_device *pdev = to_platform_device(dev_id);
-       struct rtc_device *rtc = platform_get_drvdata(pdev);
-
-       /*
-        * If we match for the first time, rtc_timer1_count will be 1.
-        * Otherwise, we wrapped around (very unlikely but
-        * still possible) so compute the amount of missed periods.
-        * The match reg is updated only when the data is actually retrieved
-        * to avoid unnecessary interrupts.
-        */
-       OSSR = OSSR_M1; /* clear match on timer1 */
-
-       rtc_update_irq(rtc, rtc_timer1_count, RTC_PF | RTC_IRQF);
-
-       if (rtc_timer1_count == 1)
-               rtc_timer1_count =
-                       (rtc->irq_freq * ((1 << 30) / (timer_freq >> 2)));
-
-       /* retrigger. */
-       sa1100_timer1_retrigger(rtc);
-
-       return IRQ_HANDLED;
-}
-
-static int sa1100_rtc_read_callback(struct device *dev, int data)
-{
-       if (data & RTC_PF) {
-               struct rtc_device *rtc = (struct rtc_device *)dev;
-
-               /* interpolate missed periods and set match for the next */
-               unsigned long period = timer_freq / rtc->irq_freq;
-               unsigned long oscr = OSCR;
-               unsigned long osmr1 = OSMR1;
-               unsigned long missed = (oscr - osmr1)/period;
-               data += missed << 8;
-               OSSR = OSSR_M1; /* clear match on timer 1 */
-               OSMR1 = osmr1 + (missed + 1)*period;
-               /* Ensure we didn't miss another match in the mean time.
-                * Here we compare (match - OSCR) 8 instead of 0 --
-                * see comment in pxa_timer_interrupt() for explanation.
-                */
-               while ((signed long)((osmr1 = OSMR1) - OSCR) <= 8) {
-                       data += 0x100;
-                       OSSR = OSSR_M1; /* clear match on timer 1 */
-                       OSMR1 = osmr1 + period;
-               }
-       }
-       return data;
-}
-
 static int sa1100_rtc_open(struct device *dev)
 {
        int ret;
-       struct rtc_device *rtc = (struct rtc_device *)dev;
+       struct platform_device *plat_dev = to_platform_device(dev);
+       struct rtc_device *rtc = platform_get_drvdata(plat_dev);
 
        ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, IRQF_DISABLED,
                "rtc 1Hz", dev);
@@ -277,19 +173,11 @@ static int sa1100_rtc_open(struct device *dev)
                dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm);
                goto fail_ai;
        }
-       ret = request_irq(IRQ_OST1, timer1_interrupt, IRQF_DISABLED,
-               "rtc timer", dev);
-       if (ret) {
-               dev_err(dev, "IRQ %d already in use.\n", IRQ_OST1);
-               goto fail_pi;
-       }
        rtc->max_user_freq = RTC_FREQ;
-       sa1100_irq_set_freq(dev, RTC_FREQ);
+       rtc_irq_set_freq(rtc, NULL, RTC_FREQ);
 
        return 0;
 
- fail_pi:
-       free_irq(IRQ_RTCAlrm, dev);
  fail_ai:
        free_irq(IRQ_RTC1Hz, dev);
  fail_ui:
@@ -304,30 +192,10 @@ static void sa1100_rtc_release(struct device *dev)
        OSSR = OSSR_M1;
        spin_unlock_irq(&sa1100_rtc_lock);
 
-       free_irq(IRQ_OST1, dev);
        free_irq(IRQ_RTCAlrm, dev);
        free_irq(IRQ_RTC1Hz, dev);
 }
 
-
-static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
-               unsigned long arg)
-{
-       switch (cmd) {
-       case RTC_UIE_OFF:
-               spin_lock_irq(&sa1100_rtc_lock);
-               RTSR &= ~RTSR_HZE;
-               spin_unlock_irq(&sa1100_rtc_lock);
-               return 0;
-       case RTC_UIE_ON:
-               spin_lock_irq(&sa1100_rtc_lock);
-               RTSR |= RTSR_HZE;
-               spin_unlock_irq(&sa1100_rtc_lock);
-               return 0;
-       }
-       return -ENOIOCTLCMD;
-}
-
 static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        spin_lock_irq(&sa1100_rtc_lock);
@@ -386,31 +254,20 @@ static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 
 static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
 {
-       struct rtc_device *rtc = (struct rtc_device *)dev;
-
-       seq_printf(seq, "trim/divider\t: 0x%08x\n", (u32) RTTR);
-       seq_printf(seq, "update_IRQ\t: %s\n",
-                       (RTSR & RTSR_HZE) ? "yes" : "no");
-       seq_printf(seq, "periodic_IRQ\t: %s\n",
-                       (OIER & OIER_E1) ? "yes" : "no");
-       seq_printf(seq, "periodic_freq\t: %d\n", rtc->irq_freq);
-       seq_printf(seq, "RTSR\t\t: 0x%08x\n", (u32)RTSR);
+       seq_printf(seq, "trim/divider\t\t: 0x%08x\n", (u32) RTTR);
+       seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", (u32)RTSR);
 
        return 0;
 }
 
 static const struct rtc_class_ops sa1100_rtc_ops = {
        .open = sa1100_rtc_open,
-       .read_callback = sa1100_rtc_read_callback,
        .release = sa1100_rtc_release,
-       .ioctl = sa1100_rtc_ioctl,
        .read_time = sa1100_rtc_read_time,
        .set_time = sa1100_rtc_set_time,
        .read_alarm = sa1100_rtc_read_alarm,
        .set_alarm = sa1100_rtc_set_alarm,
        .proc = sa1100_rtc_proc,
-       .irq_set_freq = sa1100_irq_set_freq,
-       .irq_set_state = sa1100_irq_set_state,
        .alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
 };
 
@@ -418,8 +275,6 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
 {
        struct rtc_device *rtc;
 
-       timer_freq = get_clock_tick_rate();
-
        /*
         * According to the manual we should be able to let RTTR be zero
         * and then a default diviser for a 32.768KHz clock is used.
@@ -445,11 +300,6 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, rtc);
 
-       /* Set the irq_freq */
-       /*TODO: Find out who is messing with this value after we initialize
-        * it here.*/
-       rtc->irq_freq = RTC_FREQ;
-
        /* Fix for a nasty initialization problem the in SA11xx RTSR register.
         * See also the comments in sa1100_rtc_interrupt().
         *
index 93314a9e7fa9baa8bac299227c8ed6067333ff7e..e55dc1ac83ab5bd229093a786ba08544bef73d5d 100644 (file)
@@ -344,27 +344,6 @@ static inline void sh_rtc_setcie(struct device *dev, unsigned int enable)
        spin_unlock_irq(&rtc->lock);
 }
 
-static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
-       struct sh_rtc *rtc = dev_get_drvdata(dev);
-       unsigned int ret = 0;
-
-       switch (cmd) {
-       case RTC_UIE_OFF:
-               rtc->periodic_freq &= ~PF_OXS;
-               sh_rtc_setcie(dev, 0);
-               break;
-       case RTC_UIE_ON:
-               rtc->periodic_freq |= PF_OXS;
-               sh_rtc_setcie(dev, 1);
-               break;
-       default:
-               ret = -ENOIOCTLCMD;
-       }
-
-       return ret;
-}
-
 static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        sh_rtc_setaie(dev, enabled);
@@ -598,13 +577,10 @@ static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
 }
 
 static struct rtc_class_ops sh_rtc_ops = {
-       .ioctl          = sh_rtc_ioctl,
        .read_time      = sh_rtc_read_time,
        .set_time       = sh_rtc_set_time,
        .read_alarm     = sh_rtc_read_alarm,
        .set_alarm      = sh_rtc_set_alarm,
-       .irq_set_state  = sh_rtc_irq_set_state,
-       .irq_set_freq   = sh_rtc_irq_set_freq,
        .proc           = sh_rtc_proc,
        .alarm_irq_enable = sh_rtc_alarm_irq_enable,
 };
index 7e7d0c806f2db8eaea1da72cbf3e3052649a7f1b..572e9534b591e78412c6a96cae600bfb762d0889 100644 (file)
@@ -115,19 +115,6 @@ static int stmp3xxx_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int stmp3xxx_update_irq_enable(struct device *dev, unsigned int enabled)
-{
-       struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
-
-       if (enabled)
-               stmp3xxx_setl(BM_RTC_CTRL_ONEMSEC_IRQ_EN,
-                               rtc_data->io + HW_RTC_CTRL);
-       else
-               stmp3xxx_clearl(BM_RTC_CTRL_ONEMSEC_IRQ_EN,
-                               rtc_data->io + HW_RTC_CTRL);
-       return 0;
-}
-
 static int stmp3xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
 {
        struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
@@ -149,8 +136,6 @@ static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
 static struct rtc_class_ops stmp3xxx_rtc_ops = {
        .alarm_irq_enable =
                          stmp3xxx_alarm_irq_enable,
-       .update_irq_enable =
-                         stmp3xxx_update_irq_enable,
        .read_time      = stmp3xxx_rtc_gettime,
        .set_mmss       = stmp3xxx_rtc_set_mmss,
        .read_alarm     = stmp3xxx_rtc_read_alarm,
index a82d6fe970763e5388d787f4212c13228d6ea096..7e96254bd365eae1f73b5fc296f7e705b58e05ee 100644 (file)
@@ -78,11 +78,16 @@ static ssize_t test_irq_store(struct device *dev,
        struct rtc_device *rtc = platform_get_drvdata(plat_dev);
 
        retval = count;
-       if (strncmp(buf, "tick", 4) == 0)
+       if (strncmp(buf, "tick", 4) == 0 && rtc->pie_enabled)
                rtc_update_irq(rtc, 1, RTC_PF | RTC_IRQF);
-       else if (strncmp(buf, "alarm", 5) == 0)
-               rtc_update_irq(rtc, 1, RTC_AF | RTC_IRQF);
-       else if (strncmp(buf, "update", 6) == 0)
+       else if (strncmp(buf, "alarm", 5) == 0) {
+               struct rtc_wkalrm alrm;
+               int err = rtc_read_alarm(rtc, &alrm);
+
+               if (!err && alrm.enabled)
+                       rtc_update_irq(rtc, 1, RTC_AF | RTC_IRQF);
+
+       } else if (strncmp(buf, "update", 6) == 0 && rtc->uie_rtctimer.enabled)
                rtc_update_irq(rtc, 1, RTC_UF | RTC_IRQF);
        else
                retval = -EINVAL;
index ed1b86828124e217ff3bfbf6483d0ab1afa7d216..f9a2799c44d6e177f154f23f938acb8de47db208 100644 (file)
@@ -213,18 +213,6 @@ static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
        return ret;
 }
 
-static int twl_rtc_update_irq_enable(struct device *dev, unsigned enabled)
-{
-       int ret;
-
-       if (enabled)
-               ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
-       else
-               ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
-
-       return ret;
-}
-
 /*
  * Gets current TWL RTC time and date parameters.
  *
@@ -433,7 +421,6 @@ static struct rtc_class_ops twl_rtc_ops = {
        .read_alarm     = twl_rtc_read_alarm,
        .set_alarm      = twl_rtc_set_alarm,
        .alarm_irq_enable = twl_rtc_alarm_irq_enable,
-       .update_irq_enable = twl_rtc_update_irq_enable,
 };
 
 /*----------------------------------------------------------------------*/
index 769190ac6d11ec8e9401dba1735de6d8cc9c4196..c5698cda366a910a6f16008248c3c78aac62823f 100644 (file)
@@ -207,36 +207,6 @@ static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
        return 0;
 }
 
-static int vr41xx_rtc_irq_set_freq(struct device *dev, int freq)
-{
-       u64 count;
-
-       if (!is_power_of_2(freq))
-               return -EINVAL;
-       count = RTC_FREQUENCY;
-       do_div(count, freq);
-
-       spin_lock_irq(&rtc_lock);
-
-       periodic_count = count;
-       rtc1_write(RTCL1LREG, periodic_count);
-       rtc1_write(RTCL1HREG, periodic_count >> 16);
-
-       spin_unlock_irq(&rtc_lock);
-
-       return 0;
-}
-
-static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled)
-{
-       if (enabled)
-               enable_irq(pie_irq);
-       else
-               disable_irq(pie_irq);
-
-       return 0;
-}
-
 static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
        switch (cmd) {
@@ -308,8 +278,6 @@ static const struct rtc_class_ops vr41xx_rtc_ops = {
        .set_time       = vr41xx_rtc_set_time,
        .read_alarm     = vr41xx_rtc_read_alarm,
        .set_alarm      = vr41xx_rtc_set_alarm,
-       .irq_set_freq   = vr41xx_rtc_irq_set_freq,
-       .irq_set_state  = vr41xx_rtc_irq_set_state,
 };
 
 static int __devinit rtc_probe(struct platform_device *pdev)
index 82931dc65c0bbc505460f4c3c1710accd3453723..bdc909bd56da0f4c7c615349df6f43023bd52517 100644 (file)
@@ -315,21 +315,6 @@ static int wm831x_rtc_alarm_irq_enable(struct device *dev,
                return wm831x_rtc_stop_alarm(wm831x_rtc);
 }
 
-static int wm831x_rtc_update_irq_enable(struct device *dev,
-                                       unsigned int enabled)
-{
-       struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
-       int val;
-
-       if (enabled)
-               val = 1 << WM831X_RTC_PINT_FREQ_SHIFT;
-       else
-               val = 0;
-
-       return wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL,
-                              WM831X_RTC_PINT_FREQ_MASK, val);
-}
-
 static irqreturn_t wm831x_alm_irq(int irq, void *data)
 {
        struct wm831x_rtc *wm831x_rtc = data;
@@ -354,7 +339,6 @@ static const struct rtc_class_ops wm831x_rtc_ops = {
        .read_alarm = wm831x_rtc_readalarm,
        .set_alarm = wm831x_rtc_setalarm,
        .alarm_irq_enable = wm831x_rtc_alarm_irq_enable,
-       .update_irq_enable = wm831x_rtc_update_irq_enable,
 };
 
 #ifdef CONFIG_PM
index 3d0dc76b38af24ef47e35b209da51a04e70d2087..66421426e404521c5ecafda56c49e0e34df594f9 100644 (file)
@@ -302,26 +302,6 @@ static int wm8350_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        return ret;
 }
 
-static int wm8350_rtc_update_irq_enable(struct device *dev,
-                                       unsigned int enabled)
-{
-       struct wm8350 *wm8350 = dev_get_drvdata(dev);
-
-       /* Suppress duplicate changes since genirq nests enable and
-        * disable calls. */
-       if (enabled == wm8350->rtc.update_enabled)
-               return 0;
-
-       if (enabled)
-               wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_SEC);
-       else
-               wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
-
-       wm8350->rtc.update_enabled = enabled;
-
-       return 0;
-}
-
 static irqreturn_t wm8350_rtc_alarm_handler(int irq, void *data)
 {
        struct wm8350 *wm8350 = data;
@@ -357,7 +337,6 @@ static const struct rtc_class_ops wm8350_rtc_ops = {
        .read_alarm = wm8350_rtc_readalarm,
        .set_alarm = wm8350_rtc_setalarm,
        .alarm_irq_enable = wm8350_rtc_alarm_irq_enable,
-       .update_irq_enable = wm8350_rtc_update_irq_enable,
 };
 
 #ifdef CONFIG_PM
index c881a14fa5dd7796c2f07a08120e3891254a66df..1f6a4d894e73cf6c6604551f10f862510b0444b9 100644 (file)
@@ -62,8 +62,8 @@ static int xpram_devs;
 /*
  * Parameter parsing functions.
  */
-static int __initdata devs = XPRAM_DEVS;
-static char __initdata *sizes[XPRAM_MAX_DEVS];
+static int devs = XPRAM_DEVS;
+static char *sizes[XPRAM_MAX_DEVS];
 
 module_param(devs, int, 0);
 module_param_array(sizes, charp, NULL, 0);
index 8cd58e412b5eae2cfe3188a03f616190146f8db0..5ad44daef73bdf5ea54eac6e5885c068042d7dd1 100644 (file)
@@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file,
          unsigned int cmd, unsigned long arg)
 {
        void __user *argp;
-       int ct, perm;
+       unsigned int ct;
+       int perm;
 
        argp = (void __user *)arg;
 
index 7a242f07363219a37f1dd4708bd1f98b5f7b4396..267b54e8ff5ae1b247c5825f2d421a3d6af08c72 100644 (file)
@@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request)
        return rc;
 }
 
+static inline void
+tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
+{
+       request->callback = (void *) tape_free_request;
+       request->callback_data = NULL;
+       tape_do_io_async(device, request);
+}
+
 extern int tape_oper_handler(int irq, int status);
 extern void tape_noper_handler(int irq, int status);
 extern int tape_open(struct tape_device *);
index c17f35b6136ace01d56cd643ed0bffdaae8e7b86..c26511171ffead57d7cce9d81612d09d84027185 100644 (file)
@@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int);
  * Medium sense for 34xx tapes. There is no 'real' medium sense call.
  * So we just do a normal sense.
  */
-static int
-tape_34xx_medium_sense(struct tape_device *device)
+static void __tape_34xx_medium_sense(struct tape_request *request)
 {
-       struct tape_request *request;
-       unsigned char       *sense;
-       int                  rc;
-
-       request = tape_alloc_request(1, 32);
-       if (IS_ERR(request)) {
-               DBF_EXCEPTION(6, "MSEN fail\n");
-               return PTR_ERR(request);
-       }
-
-       request->op = TO_MSEN;
-       tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+       struct tape_device *device = request->device;
+       unsigned char *sense;
 
-       rc = tape_do_io_interruptible(device, request);
        if (request->rc == 0) {
                sense = request->cpdata;
 
@@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device)
                        device->tape_generic_status |= GMT_WR_PROT(~0);
                else
                        device->tape_generic_status &= ~GMT_WR_PROT(~0);
-       } else {
+       } else
                DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
                        request->rc);
-       }
        tape_free_request(request);
+}
+
+static int tape_34xx_medium_sense(struct tape_device *device)
+{
+       struct tape_request *request;
+       int rc;
+
+       request = tape_alloc_request(1, 32);
+       if (IS_ERR(request)) {
+               DBF_EXCEPTION(6, "MSEN fail\n");
+               return PTR_ERR(request);
+       }
 
+       request->op = TO_MSEN;
+       tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+       rc = tape_do_io_interruptible(device, request);
+       __tape_34xx_medium_sense(request);
        return rc;
 }
 
+static void tape_34xx_medium_sense_async(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = tape_alloc_request(1, 32);
+       if (IS_ERR(request)) {
+               DBF_EXCEPTION(6, "MSEN fail\n");
+               return;
+       }
+
+       request->op = TO_MSEN;
+       tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+       request->callback = (void *) __tape_34xx_medium_sense;
+       request->callback_data = NULL;
+       tape_do_io_async(device, request);
+}
+
 struct tape_34xx_work {
        struct tape_device      *device;
        enum tape_op             op;
@@ -109,6 +129,9 @@ struct tape_34xx_work {
  * is inserted but cannot call tape_do_io* from an interrupt context.
  * Maybe that's useful for other actions we want to start from the
  * interrupt handler.
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
  */
 static void
 tape_34xx_work_handler(struct work_struct *work)
@@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work)
 
        switch(p->op) {
                case TO_MSEN:
-                       tape_34xx_medium_sense(device);
+                       tape_34xx_medium_sense_async(device);
                        break;
                default:
                        DBF_EVENT(3, "T34XX: internal error: unknown work\n");
index fbe361fcd2c09437e49bc8be0b82c56f08739769..de2e99e0a71b56747df232bef1110815979eec04 100644 (file)
@@ -329,17 +329,17 @@ out:
 /*
  * Enable encryption
  */
-static int tape_3592_enable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
 {
        struct tape_request *request;
        char *data;
 
        DBF_EVENT(6, "tape_3592_enable_crypt\n");
        if (!crypt_supported(device))
-               return -ENOSYS;
+               return ERR_PTR(-ENOSYS);
        request = tape_alloc_request(2, 72);
        if (IS_ERR(request))
-               return PTR_ERR(request);
+               return request;
        data = request->cpdata;
        memset(data,0,72);
 
@@ -354,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device)
        request->op = TO_CRYPT_ON;
        tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
        tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+       return request;
+}
+
+static int tape_3592_enable_crypt(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = __tape_3592_enable_crypt(device);
+       if (IS_ERR(request))
+               return PTR_ERR(request);
        return tape_do_io_free(device, request);
 }
 
+static void tape_3592_enable_crypt_async(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = __tape_3592_enable_crypt(device);
+       if (!IS_ERR(request))
+               tape_do_io_async_free(device, request);
+}
+
 /*
  * Disable encryption
  */
-static int tape_3592_disable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
 {
        struct tape_request *request;
        char *data;
 
        DBF_EVENT(6, "tape_3592_disable_crypt\n");
        if (!crypt_supported(device))
-               return -ENOSYS;
+               return ERR_PTR(-ENOSYS);
        request = tape_alloc_request(2, 72);
        if (IS_ERR(request))
-               return PTR_ERR(request);
+               return request;
        data = request->cpdata;
        memset(data,0,72);
 
@@ -383,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device)
        tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
        tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
 
+       return request;
+}
+
+static int tape_3592_disable_crypt(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = __tape_3592_disable_crypt(device);
+       if (IS_ERR(request))
+               return PTR_ERR(request);
        return tape_do_io_free(device, request);
 }
 
+static void tape_3592_disable_crypt_async(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = __tape_3592_disable_crypt(device);
+       if (!IS_ERR(request))
+               tape_do_io_async_free(device, request);
+}
+
 /*
  * IOCTL: Set encryption status
  */
@@ -457,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
 /*
  * SENSE Medium: Get Sense data about medium state
  */
-static int
-tape_3590_sense_medium(struct tape_device *device)
+static int tape_3590_sense_medium(struct tape_device *device)
 {
        struct tape_request *request;
 
@@ -470,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device)
        return tape_do_io_free(device, request);
 }
 
+static void tape_3590_sense_medium_async(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = tape_alloc_request(1, 128);
+       if (IS_ERR(request))
+               return;
+       request->op = TO_MSEN;
+       tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+       tape_do_io_async_free(device, request);
+}
+
 /*
  * MTTELL: Tell block. Return the number of block relative to current file.
  */
@@ -546,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device,
  * 2. The attention msg is written to the "read subsystem data" buffer.
  * In this case we probably should print it to the console.
  */
-static int
-tape_3590_read_attmsg(struct tape_device *device)
+static void tape_3590_read_attmsg_async(struct tape_device *device)
 {
        struct tape_request *request;
        char *buf;
 
        request = tape_alloc_request(3, 4096);
        if (IS_ERR(request))
-               return PTR_ERR(request);
+               return;
        request->op = TO_READ_ATTMSG;
        buf = request->cpdata;
        buf[0] = PREP_RD_SS_DATA;
@@ -562,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device)
        tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
        tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
        tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
-       return tape_do_io_free(device, request);
+       tape_do_io_async_free(device, request);
 }
 
 /*
  * These functions are used to schedule follow-up actions from within an
  * interrupt context (like unsolicited interrupts).
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
  */
 struct work_handler_data {
        struct tape_device *device;
@@ -583,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work)
 
        switch (p->op) {
        case TO_MSEN:
-               tape_3590_sense_medium(p->device);
+               tape_3590_sense_medium_async(p->device);
                break;
        case TO_READ_ATTMSG:
-               tape_3590_read_attmsg(p->device);
+               tape_3590_read_attmsg_async(p->device);
                break;
        case TO_CRYPT_ON:
-               tape_3592_enable_crypt(p->device);
+               tape_3592_enable_crypt_async(p->device);
                break;
        case TO_CRYPT_OFF:
-               tape_3592_disable_crypt(p->device);
+               tape_3592_disable_crypt_async(p->device);
                break;
        default:
                DBF_EVENT(3, "T3590: work handler undefined for "
index 9045c52abd25798994caf0e63f7fb4c8700446a8..fb2bb35c62cbfc0a56260d0e2a2ee3b135ff461e 100644 (file)
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
                                        &sdev->request_queue->queue_flags);
                if (flagset)
                        queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
-               __blk_run_queue(sdev->request_queue);
+               __blk_run_queue(sdev->request_queue, false);
                if (flagset)
                        queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
                spin_unlock(sdev->request_queue->queue_lock);
index 998c01be3234f7dfe7ff87ffe3d73bcb4ae63b67..5c3ccfc6b6220d99e08f21d24458e23866876392 100644 (file)
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
                  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
        if (flagset)
                queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
-       __blk_run_queue(rport->rqst_q);
+       __blk_run_queue(rport->rqst_q, false);
        if (flagset)
                queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
        spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
index 158cecbec7183c0fb60c828cd2fb8fd2bfc19691..4a109835e4203995bf8c4ea0784639321d7013f5 100644 (file)
@@ -282,6 +282,9 @@ int core_tmr_lun_reset(
 
                        atomic_set(&task->task_active, 0);
                        atomic_set(&task->task_stop, 0);
+               } else {
+                       if (atomic_read(&task->task_execute_queue) != 0)
+                               transport_remove_task_from_execute_queue(task, dev);
                }
                __transport_stop_task_timer(task, &flags);
 
@@ -301,6 +304,7 @@ int core_tmr_lun_reset(
                        DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
                                " task: %p, t_fe_count: %d dev: %p\n", task,
                                fe_count, dev);
+                       atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
                        spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
                                                flags);
                        core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
@@ -310,6 +314,7 @@ int core_tmr_lun_reset(
                }
                DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
                        " t_fe_count: %d dev: %p\n", task, fe_count, dev);
+               atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
                spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
                core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
 
index 236e22d8cfae3e7f3ae66424e320abb7a312b17e..4bbf6c147f896dab5b2ed9319264c900dc0a0c24 100644 (file)
@@ -1207,7 +1207,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
  *
  *
  */
-static void transport_remove_task_from_execute_queue(
+void transport_remove_task_from_execute_queue(
        struct se_task *task,
        struct se_device *dev)
 {
@@ -5549,7 +5549,8 @@ static void transport_generic_wait_for_tasks(
 
                atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
        }
-       if (!atomic_read(&T_TASK(cmd)->t_transport_active))
+       if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
+            atomic_read(&T_TASK(cmd)->t_transport_aborted))
                goto remove;
 
        atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
@@ -5956,6 +5957,9 @@ static void transport_processing_shutdown(struct se_device *dev)
 
                        atomic_set(&task->task_active, 0);
                        atomic_set(&task->task_stop, 0);
+               } else {
+                       if (atomic_read(&task->task_execute_queue) != 0)
+                               transport_remove_task_from_execute_queue(task, dev);
                }
                __transport_stop_task_timer(task, &flags);
 
index f7a5dba3ca23a9f155a33edfd04ad62063aaa456..bf7c687519ef4026ea2a4a7b6506287a47843d6e 100644 (file)
@@ -4,7 +4,6 @@
 
 menuconfig THERMAL
        tristate "Generic Thermal sysfs driver"
-       depends on NET
        help
          Generic Thermal Sysfs driver offers a generic mechanism for
          thermal management. Usually it's made up of one or more thermal
index 7d0e63c79280c37c6a88e9be1e8834eea11eba31..713b7ea4a60709e89e164fd4765e75e5397ed57f 100644 (file)
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
 
 static unsigned int thermal_event_seqnum;
 
-static struct genl_family thermal_event_genl_family = {
-       .id = GENL_ID_GENERATE,
-       .name = THERMAL_GENL_FAMILY_NAME,
-       .version = THERMAL_GENL_VERSION,
-       .maxattr = THERMAL_GENL_ATTR_MAX,
-};
-
-static struct genl_multicast_group thermal_event_mcgrp = {
-       .name = THERMAL_GENL_MCAST_GROUP_NAME,
-};
-
-static int genetlink_init(void);
-static void genetlink_exit(void);
-
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
        int err;
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
 EXPORT_SYMBOL(thermal_zone_device_unregister);
 
+#ifdef CONFIG_NET
+static struct genl_family thermal_event_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .name = THERMAL_GENL_FAMILY_NAME,
+       .version = THERMAL_GENL_VERSION,
+       .maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+       .name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
 int generate_netlink_event(u32 orig, enum events event)
 {
        struct sk_buff *skb;
@@ -1301,6 +1299,15 @@ static int genetlink_init(void)
        return result;
 }
 
+static void genetlink_exit(void)
+{
+       genl_unregister_family(&thermal_event_genl_family);
+}
+#else /* !CONFIG_NET */
+static inline int genetlink_init(void) { return 0; }
+static inline void genetlink_exit(void) {}
+#endif /* !CONFIG_NET */
+
 static int __init thermal_init(void)
 {
        int result = 0;
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
        return result;
 }
 
-static void genetlink_exit(void)
-{
-       genl_unregister_family(&thermal_event_genl_family);
-}
-
 static void __exit thermal_exit(void)
 {
        class_unregister(&thermal_class);
index 93760b2ea1727a908ca862dfc9922e4b13a3db63..1ef4df9bf7e4f785bc609778a0819958ab9d1685 100644 (file)
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = {
        PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
        PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
        PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
+       PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
        PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
        PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
        PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
index d041c6826e432fcb35ac1ca81ed31899d714ed1e..0f299b7aad609bc232e7d10c0c142f47f180534b 100644 (file)
@@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
 
        mutex_lock(&usb_address0_mutex);
 
-       if (!udev->config && oldspeed == USB_SPEED_SUPER) {
-               /* Don't reset USB 3.0 devices during an initial setup */
-               usb_set_device_state(udev, USB_STATE_DEFAULT);
-       } else {
-               /* Reset the device; full speed may morph to high speed */
-               /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
-               retval = hub_port_reset(hub, port1, udev, delay);
-               if (retval < 0)         /* error or disconnect */
-                       goto fail;
-               /* success, speed is known */
-       }
+       /* Reset the device; full speed may morph to high speed */
+       /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+       retval = hub_port_reset(hub, port1, udev, delay);
+       if (retval < 0)         /* error or disconnect */
+               goto fail;
+       /* success, speed is known */
+
        retval = -ENODEV;
 
        if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
index 44c595432d6fec7eff59fadccd0da497244bb17e..81ce6a8e1d94a4b6c7a86c6dbb17a64c1b6e69bf 100644 (file)
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x04b4, 0x0526), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* Samsung Android phone modem - ID conflict with SPH-I500 */
+       { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* Roland SC-8820 */
        { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* M-Systems Flash Disk Pioneers */
        { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Keytouch QWERTY Panel keyboard */
+       { USB_DEVICE(0x0926, 0x3333), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
        { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
 
index 3c6e1a058745c739e40cdd825882993a5671661f..5e1495097ec3bf8ad892c0b653e869c2faed5ab9 100644 (file)
@@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
 
                if (unlikely(!skb))
                        break;
-               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
-                               req->actual);
-               page = NULL;
 
-               if (req->actual < req->length) { /* Last fragment */
+               if (skb->len == 0) { /* First fragment */
                        skb->protocol = htons(ETH_P_PHONET);
                        skb_reset_mac_header(skb);
-                       pskb_pull(skb, 1);
+                       /* Can't use pskb_pull() on page in IRQ */
+                       memcpy(skb_put(skb, 1), page_address(page), 1);
+               }
+
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                               skb->len == 0, req->actual);
+               page = NULL;
+
+               if (req->actual < req->length) { /* Last fragment */
                        skb->dev = dev;
                        dev->stats.rx_packets++;
                        dev->stats.rx_bytes += skb->len;
index e8f4f36fdf0b457ad917481fe89f269c171d37a2..a6f21b891f6869ec49b7e87ea502a65482ba96d6 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
 
 /**
  * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
index fcbf4abbf3815db1acc837fecb3dcec82824e3d7..0231814a97a50ab9407cb9af25a06ca1e1f36a55 100644 (file)
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
        }
 }
 
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
 {
-       void *addr;
+       struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+       void __iomem *addr;
        u32 temp;
        u64 temp_64;
 
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
        }
 }
 
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
 {
        /* Fields are 32 bits wide, DMA addresses are in bytes */
        int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
                dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
 }
 
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
                     struct xhci_container_ctx *ctx,
                     unsigned int last_ep)
 {
index 1d0f45f0e7a62fd191d4c856d0de87715456bfbf..a9534396e85bac027e8f8861ae0aeb330cbb8c99 100644 (file)
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
 
 /***************** Streams structures manipulation *************************/
 
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
                unsigned int num_stream_ctxs,
                struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
 {
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
  * The stream context array must be a power of 2, and can be as small as
  * 64 bytes or as large as 1MB.
  */
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
                unsigned int num_stream_ctxs, dma_addr_t *dma,
                gfp_t mem_flags)
 {
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        val &= DBOFF_MASK;
        xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
                        " from cap regs base addr\n", val);
-       xhci->dba = (void *) xhci->cap_regs + val;
+       xhci->dba = (void __iomem *) xhci->cap_regs + val;
        xhci_dbg_regs(xhci);
        xhci_print_run_regs(xhci);
        /* Set ir_set to interrupt register set 0 */
-       xhci->ir_set = (void *) xhci->run_regs->ir_set;
+       xhci->ir_set = &xhci->run_regs->ir_set[0];
 
        /*
         * Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        /* Set the event ring dequeue address */
        xhci_set_hc_event_deq(xhci);
        xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
-       xhci_print_ir_set(xhci, xhci->ir_set, 0);
+       xhci_print_ir_set(xhci, 0);
 
        /*
         * XXX: Might need to set the Interrupter Moderation Register to
index 3e8211c1ce5adaee7ebfd4ecb59a4ee6fbfa160f..3289bf4832c9acdca8d58a6edaa3d6dd7b2d43e1 100644 (file)
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        state->new_deq_seg = find_trb_seg(cur_td->start_seg,
                        dev->eps[ep_index].stopped_trb,
                        &state->new_cycle_state);
-       if (!state->new_deq_seg)
-               BUG();
+       if (!state->new_deq_seg) {
+               WARN_ON(1);
+               return;
+       }
+
        /* Dig out the cycle state saved by the xHC during the stop ep cmd */
        xhci_dbg(xhci, "Finding endpoint context\n");
        ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        state->new_deq_seg = find_trb_seg(state->new_deq_seg,
                        state->new_deq_ptr,
                        &state->new_cycle_state);
-       if (!state->new_deq_seg)
-               BUG();
+       if (!state->new_deq_seg) {
+               WARN_ON(1);
+               return;
+       }
 
        trb = &state->new_deq_ptr->generic;
        if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
 
                /* Scatter gather list entries may cross 64KB boundaries */
                running_total = TRB_MAX_BUFF_SIZE -
-                       (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+                       (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+               running_total &= TRB_MAX_BUFF_SIZE - 1;
                if (running_total != 0)
                        num_trbs++;
 
                /* How many more 64KB chunks to transfer, how many more TRBs? */
-               while (running_total < sg_dma_len(sg)) {
+               while (running_total < sg_dma_len(sg) && running_total < temp) {
                        num_trbs++;
                        running_total += TRB_MAX_BUFF_SIZE;
                }
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
 {
        if (num_trbs != 0)
-               dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+               dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
                                "TRBs, %d left\n", __func__,
                                urb->ep->desc.bEndpointAddress, num_trbs);
        if (running_total != urb->transfer_buffer_length)
-               dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+               dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
                                "queued %#x (%d), asked for %#x (%d)\n",
                                __func__,
                                urb->ep->desc.bEndpointAddress,
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        sg = urb->sg;
        addr = (u64) sg_dma_address(sg);
        this_sg_len = sg_dma_len(sg);
-       trb_buff_len = TRB_MAX_BUFF_SIZE -
-               (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+       trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
        trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
        if (trb_buff_len > urb->transfer_buffer_length)
                trb_buff_len = urb->transfer_buffer_length;
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
                                (unsigned int) addr + trb_buff_len);
                if (TRB_MAX_BUFF_SIZE -
-                               (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+                               (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
                        xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
                        xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
                                        (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                }
 
                trb_buff_len = TRB_MAX_BUFF_SIZE -
-                       (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+                       (addr & (TRB_MAX_BUFF_SIZE - 1));
                trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
                if (running_total + trb_buff_len > urb->transfer_buffer_length)
                        trb_buff_len =
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        num_trbs = 0;
        /* How much data is (potentially) left before the 64KB boundary? */
        running_total = TRB_MAX_BUFF_SIZE -
-               (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+               (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+       running_total &= TRB_MAX_BUFF_SIZE - 1;
 
        /* If there's some data on this 64KB chunk, or we have to send a
         * zero-length transfer, we need at least one TRB
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        /* How much data is in the first TRB? */
        addr = (u64) urb->transfer_dma;
        trb_buff_len = TRB_MAX_BUFF_SIZE -
-               (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
-       if (urb->transfer_buffer_length < trb_buff_len)
+               (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+       if (trb_buff_len > urb->transfer_buffer_length)
                trb_buff_len = urb->transfer_buffer_length;
 
        first_trb = true;
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
        addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
        td_len = urb->iso_frame_desc[i].length;
 
-       running_total = TRB_MAX_BUFF_SIZE -
-                       (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+       running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+       running_total &= TRB_MAX_BUFF_SIZE - 1;
        if (running_total != 0)
                num_trbs++;
 
index 34cf4e1658773d870fc857c1338e4fe01df3af02..2083fc2179b2a52862b89543a6b08b5d82bfdc7e 100644 (file)
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
 /*
  * Set the run bit and wait for the host to be running.
  */
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
 {
        u32 temp;
        int ret;
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
 
 
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static void xhci_event_ring_work(unsigned long arg)
 {
        unsigned long flags;
        int temp;
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
                        xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
        xhci_writel(xhci, ER_IRQ_ENABLE(temp),
                        &xhci->ir_set->irq_pending);
-       xhci_print_ir_set(xhci, xhci->ir_set, 0);
+       xhci_print_ir_set(xhci, 0);
 
        if (NUM_TEST_NOOPS > 0)
                doorbell = xhci_setup_one_noop(xhci);
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
        temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
        xhci_writel(xhci, ER_IRQ_DISABLE(temp),
                        &xhci->ir_set->irq_pending);
-       xhci_print_ir_set(xhci, xhci->ir_set, 0);
+       xhci_print_ir_set(xhci, 0);
 
        xhci_dbg(xhci, "cleaning up memory\n");
        xhci_mem_cleanup(xhci);
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
                xhci_writel(xhci, ER_IRQ_DISABLE(temp),
                                &xhci->ir_set->irq_pending);
-               xhci_print_ir_set(xhci, xhci->ir_set, 0);
+               xhci_print_ir_set(xhci, 0);
 
                xhci_dbg(xhci, "cleaning up memory\n");
                xhci_mem_cleanup(xhci);
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
 /* Returns 1 if the arguments are OK;
  * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
  */
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
                struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
                const char *func) {
        struct xhci_hcd *xhci;
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
        xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
 }
 
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
                unsigned int slot_id, unsigned int ep_index,
                struct xhci_dequeue_state *deq_state)
 {
index 7f236fd220151ca5fa4f43cee6733e97867f2d5f..7f127df6dd55329c71ea2bd3986717bd3b2d0c23 100644 (file)
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
 }
 
 /* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
 void xhci_print_registers(struct xhci_hcd *xhci);
 void xhci_dbg_regs(struct xhci_hcd *xhci);
 void xhci_print_run_regs(struct xhci_hcd *xhci);
index 54a8bd1047d611a1c4b533ef3008c4d5fe7e602b..c292d5c499e7dc8981a7c6d9fd171f57cc3c7aa0 100644 (file)
@@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev,
        INIT_LIST_HEAD(&musb->out_bulk);
 
        hcd->uses_new_polling = 1;
+       hcd->has_tt = 1;
 
        musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
        musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
index d74a8113ae74173461d7670c056fc3b3df91b4bd..e6400be8a0f81cb70a4c54e0bc349f91fdaf008b 100644 (file)
@@ -488,6 +488,15 @@ struct musb {
        unsigned                set_address:1;
        unsigned                test_mode:1;
        unsigned                softconnect:1;
+
+       u8                      address;
+       u8                      test_mode_nr;
+       u16                     ackpend;                /* ep0 */
+       enum musb_g_ep0_state   ep0_state;
+       struct usb_gadget       g;                      /* the gadget */
+       struct usb_gadget_driver *gadget_driver;        /* its driver */
+#endif
+
        /*
         * FIXME: Remove this flag.
         *
@@ -501,14 +510,6 @@ struct musb {
         */
        unsigned                double_buffer_not_ok:1 __deprecated;
 
-       u8                      address;
-       u8                      test_mode_nr;
-       u16                     ackpend;                /* ep0 */
-       enum musb_g_ep0_state   ep0_state;
-       struct usb_gadget       g;                      /* the gadget */
-       struct usb_gadget_driver *gadget_driver;        /* its driver */
-#endif
-
        struct musb_hdrc_config *config;
 
 #ifdef MUSB_CONFIG_PROC_FS
index a3f12333fc4146f55fbd30864f6909bf2579a71f..bc8badd16897f0fc2a1825572d53ba68a12f4e32 100644 (file)
@@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb)
 
 static int omap2430_musb_exit(struct musb *musb)
 {
+       del_timer_sync(&musb_idle_timer);
 
        omap2430_low_level_exit(musb);
        otg_put_transceiver(musb->xceiv);
index 7481ff8a49e4aaf3f025728e40201c2c4a332ff2..0457813eebeedbdb77063fb911119d14ecf7057e 100644 (file)
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1199, 0x68A3),   /* Sierra Wireless Direct IP modems */
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
+       { USB_DEVICE(0x0f3d, 0x68A3),   /* Airprime/Sierra Wireless Direct IP modems */
+         .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+       },
        { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
 
        { }
index b004b2a485c38765570193d5e162f8172c9e00a1..9c014e2ecd68ef6fcdf0c6c9f5e0d15a8e046c41 100644 (file)
@@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb)
                    __func__, status, endpoint);
        } else {
                tty = tty_port_tty_get(&port->port);
-               if (urb->actual_length) {
-                       tty_insert_flip_string(tty, data, urb->actual_length);
-                       tty_flip_buffer_push(tty);
-               } else
-                       dbg("%s: empty read urb received", __func__);
-               tty_kref_put(tty);
+               if (tty) {
+                       if (urb->actual_length) {
+                               tty_insert_flip_string(tty, data,
+                                               urb->actual_length);
+                               tty_flip_buffer_push(tty);
+                       } else
+                               dbg("%s: empty read urb received", __func__);
+                       tty_kref_put(tty);
+               }
 
                /* Resubmit urb so we continue receiving */
                if (status != -ESHUTDOWN) {
index 15a5d89b7f397596e45475afc83b83a8ea1d2ffa..1c11959a7d588f8b25e03013b83a4c3008b452be 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/uaccess.h>
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
+#include <linux/usb/cdc.h>
 #include "visor.h"
 
 /*
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial,
 
        dbg("%s", __func__);
 
+       /*
+        * some Samsung Android phones in modem mode have the same ID
+        * as SPH-I500, but they are ACM devices, so dont bind to them
+        */
+       if (id->idVendor == SAMSUNG_VENDOR_ID &&
+               id->idProduct == SAMSUNG_SPH_I500_ID &&
+               serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
+               serial->dev->descriptor.bDeviceSubClass ==
+                       USB_CDC_SUBCLASS_ACM)
+               return -ENODEV;
+
        if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
                dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
                        serial->dev->actconfig->desc.bConfigurationValue);
index 8010aaeb5adb4437620bd51c63604897198bd0e1..dd0e84a9bd2fd2cb0abcfe995bf2106f8f72a6a0 100644 (file)
@@ -239,11 +239,15 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
        lcd->spi = spi;
        lcd->power = FB_BLANK_POWERDOWN;
        lcd->buffer = kzalloc(8, GFP_KERNEL);
+       if (!lcd->buffer) {
+               ret = -ENOMEM;
+               goto out_free_lcd;
+       }
 
        ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
        if (IS_ERR(ld)) {
                ret = PTR_ERR(ld);
-               goto out_free_lcd;
+               goto out_free_buffer;
        }
        lcd->ld = ld;
 
@@ -257,6 +261,8 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
 
 out_unregister:
        lcd_device_unregister(ld);
+out_free_buffer:
+       kfree(lcd->buffer);
 out_free_lcd:
        kfree(lcd);
        return ret;
@@ -268,6 +274,7 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
 
        ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
        lcd_device_unregister(lcd->ld);
+       kfree(lcd->buffer);
        kfree(lcd);
 
        return 0;
index eca855a55c0d6abf96096e6b41847c93b06675db..3de4ba0260a50f482a12d0869b35287592cd4ff0 100644 (file)
@@ -646,7 +646,7 @@ static int __devexit cpwd_remove(struct platform_device *op)
        struct cpwd *p = dev_get_drvdata(&op->dev);
        int i;
 
-       for (i = 0; i < 4; i++) {
+       for (i = 0; i < WD_NUMDEVS; i++) {
                misc_deregister(&p->devs[i].misc);
 
                if (!p->enabled) {
index 24b966d5061a0a75dad00477c326257a7864b235..204a5603c4ae34e44d1df071a9af24c832a83a81 100644 (file)
@@ -710,7 +710,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
        return 0;
 }
 
-static void __devexit hpwdt_exit_nmi_decoding(void)
+static void hpwdt_exit_nmi_decoding(void)
 {
        unregister_die_notifier(&die_notifier);
        if (cru_rom_addr)
@@ -726,7 +726,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
        return 0;
 }
 
-static void __devexit hpwdt_exit_nmi_decoding(void)
+static void hpwdt_exit_nmi_decoding(void)
 {
 }
 #endif /* CONFIG_HPWDT_NMI_DECODING */
index c7d67e9a74659bc004abd8ba64b39426003cf788..79906255eeb6bde9b6bd6ebb5653280833819b50 100644 (file)
@@ -201,11 +201,14 @@ static struct miscdevice fitpc2_wdt_miscdev = {
 static int __init fitpc2_wdt_init(void)
 {
        int err;
+       const char *brd_name;
 
-       if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2"))
+       brd_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+       if (!brd_name || !strstr(brd_name, "SBC-FITPC2"))
                return -ENODEV;
 
-       pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME));
+       pr_info("%s found\n", brd_name);
 
        if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) {
                pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
index 0461858e07d004a82c4cc5ce21c036d0fc98c7e2..b61ab1c54293552af1005e508a9311053efee1a6 100644 (file)
@@ -508,7 +508,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
        sch311x_sio_outb(sio_config_port, 0x07, 0x0a);
 
        /* Check if Logical Device Register is currently active */
-       if (sch311x_sio_inb(sio_config_port, 0x30) && 0x01 == 0)
+       if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0)
                printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n");
 
        /* Get the base address of the runtime registers */
index a6c12dec91a1434c57709b8570f3a3666ae04bc7..df2a64dc9672dee584995e4b518ea5f8b0494df7 100644 (file)
@@ -109,7 +109,7 @@ static int w83697ug_select_wd_register(void)
        outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */
        outb_p(0x30, WDT_EFER); /* select CR30 */
        c = inb_p(WDT_EFDR);
-       outb_p(c || 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
+       outb_p(c | 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
 
        return 0;
 }
index 43f9f02c7db0671668343fc19bed043fbbe61fad..718050ace08f134d0d476bf269b0a1125f6ea7ec 100644 (file)
@@ -232,7 +232,7 @@ static int increase_reservation(unsigned long nr_pages)
                set_phys_to_machine(pfn, frame_list[i]);
 
                /* Link back into the page tables if not highmem. */
-               if (pfn < max_low_pfn) {
+               if (!xen_hvm_domain() && pfn < max_low_pfn) {
                        int ret;
                        ret = HYPERVISOR_update_va_mapping(
                                (unsigned long)__va(pfn << PAGE_SHIFT),
@@ -280,7 +280,7 @@ static int decrease_reservation(unsigned long nr_pages)
 
                scrub_page(page);
 
-               if (!PageHighMem(page)) {
+               if (!xen_hvm_domain() && !PageHighMem(page)) {
                        ret = HYPERVISOR_update_va_mapping(
                                (unsigned long)__va(pfn << PAGE_SHIFT),
                                __pte_ma(0), 0);
@@ -296,7 +296,7 @@ static int decrease_reservation(unsigned long nr_pages)
        /* No more mappings: invalidate P2M and add to balloon. */
        for (i = 0; i < nr_pages; i++) {
                pfn = mfn_to_pfn(frame_list[i]);
-               set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+               __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
                balloon_append(pfn_to_page(pfn));
        }
 
@@ -392,15 +392,19 @@ static struct notifier_block xenstore_notifier;
 
 static int __init balloon_init(void)
 {
-       unsigned long pfn, extra_pfn_end;
+       unsigned long pfn, nr_pages, extra_pfn_end;
        struct page *page;
 
-       if (!xen_pv_domain())
+       if (!xen_domain())
                return -ENODEV;
 
        pr_info("xen_balloon: Initialising balloon driver.\n");
 
-       balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
+       if (xen_pv_domain())
+               nr_pages = xen_start_info->nr_pages;
+       else
+               nr_pages = max_pfn;
+       balloon_stats.current_pages = min(nr_pages, max_pfn);
        balloon_stats.target_pages  = balloon_stats.current_pages;
        balloon_stats.balloon_low   = 0;
        balloon_stats.balloon_high  = 0;
index 74681478100ae2c20442df9b4d9d7eb8df9f12c1..0ad1699a1b3e3f70f94d8b6579b624c84cc0986f 100644 (file)
@@ -114,7 +114,7 @@ struct cpu_evtchn_s {
 static __initdata struct cpu_evtchn_s init_evtchn_mask = {
        .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
 };
-static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
+static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask;
 
 static inline unsigned long *cpu_evtchn_mask(int cpu)
 {
@@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 
        BUG_ON(irq == -1);
 #ifdef CONFIG_SMP
-       cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
+       cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
 #endif
 
        clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
@@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void)
 
        /* By default all event channels notify CPU#0. */
        for_each_irq_desc(i, desc) {
-               cpumask_copy(desc->affinity, cpumask_of(0));
+               cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
        }
 #endif
 
@@ -376,81 +376,69 @@ static void unmask_evtchn(int port)
        put_cpu();
 }
 
-static int get_nr_hw_irqs(void)
+static int xen_allocate_irq_dynamic(void)
 {
-       int ret = 1;
+       int first = 0;
+       int irq;
 
 #ifdef CONFIG_X86_IO_APIC
-       ret = get_nr_irqs_gsi();
+       /*
+        * For an HVM guest or domain 0 which see "real" (emulated or
+        * actual repectively) GSIs we allocate dynamic IRQs
+        * e.g. those corresponding to event channels or MSIs
+        * etc. from the range above those "real" GSIs to avoid
+        * collisions.
+        */
+       if (xen_initial_domain() || xen_hvm_domain())
+               first = get_nr_irqs_gsi();
 #endif
 
-       return ret;
-}
+retry:
+       irq = irq_alloc_desc_from(first, -1);
 
-static int find_unbound_pirq(int type)
-{
-       int rc, i;
-       struct physdev_get_free_pirq op_get_free_pirq;
-       op_get_free_pirq.type = type;
+       if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
+               printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
+               first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
+               goto retry;
+       }
 
-       rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
-       if (!rc)
-               return op_get_free_pirq.pirq;
+       if (irq < 0)
+               panic("No available IRQ to bind to: increase nr_irqs!\n");
 
-       for (i = 0; i < nr_irqs; i++) {
-               if (pirq_to_irq[i] < 0)
-                       return i;
-       }
-       return -1;
+       return irq;
 }
 
-static int find_unbound_irq(void)
+static int xen_allocate_irq_gsi(unsigned gsi)
 {
-       struct irq_data *data;
-       int irq, res;
-       int bottom = get_nr_hw_irqs();
-       int top = nr_irqs-1;
-
-       if (bottom == nr_irqs)
-               goto no_irqs;
+       int irq;
 
-       /* This loop starts from the top of IRQ space and goes down.
-        * We need this b/c if we have a PCI device in a Xen PV guest
-        * we do not have an IO-APIC (though the backend might have them)
-        * mapped in. To not have a collision of physical IRQs with the Xen
-        * event channels start at the top of the IRQ space for virtual IRQs.
+       /*
+        * A PV guest has no concept of a GSI (since it has no ACPI
+        * nor access to/knowledge of the physical APICs). Therefore
+        * all IRQs are dynamically allocated from the entire IRQ
+        * space.
         */
-       for (irq = top; irq > bottom; irq--) {
-               data = irq_get_irq_data(irq);
-               /* only 15->0 have init'd desc; handle irq > 16 */
-               if (!data)
-                       break;
-               if (data->chip == &no_irq_chip)
-                       break;
-               if (data->chip != &xen_dynamic_chip)
-                       continue;
-               if (irq_info[irq].type == IRQT_UNBOUND)
-                       return irq;
-       }
-
-       if (irq == bottom)
-               goto no_irqs;
+       if (xen_pv_domain() && !xen_initial_domain())
+               return xen_allocate_irq_dynamic();
 
-       res = irq_alloc_desc_at(irq, -1);
+       /* Legacy IRQ descriptors are already allocated by the arch. */
+       if (gsi < NR_IRQS_LEGACY)
+               return gsi;
 
-       if (WARN_ON(res != irq))
-               return -1;
+       irq = irq_alloc_desc_at(gsi, -1);
+       if (irq < 0)
+               panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
 
        return irq;
-
-no_irqs:
-       panic("No available IRQ to bind to: increase nr_irqs!\n");
 }
 
-static bool identity_mapped_irq(unsigned irq)
+static void xen_free_irq(unsigned irq)
 {
-       /* identity map all the hardware irqs */
-       return irq < get_nr_hw_irqs();
+       /* Legacy IRQ descriptors are managed by the arch. */
+       if (irq < NR_IRQS_LEGACY)
+               return;
+
+       irq_free_desc(irq);
 }
 
 static void pirq_unmask_notify(int irq)
@@ -486,7 +474,7 @@ static bool probing_irq(int irq)
        return desc && desc->action == NULL;
 }
 
-static unsigned int startup_pirq(unsigned int irq)
+static unsigned int __startup_pirq(unsigned int irq)
 {
        struct evtchn_bind_pirq bind_pirq;
        struct irq_info *info = info_for_irq(irq);
@@ -524,9 +512,15 @@ out:
        return 0;
 }
 
-static void shutdown_pirq(unsigned int irq)
+static unsigned int startup_pirq(struct irq_data *data)
+{
+       return __startup_pirq(data->irq);
+}
+
+static void shutdown_pirq(struct irq_data *data)
 {
        struct evtchn_close close;
+       unsigned int irq = data->irq;
        struct irq_info *info = info_for_irq(irq);
        int evtchn = evtchn_from_irq(irq);
 
@@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq)
        info->evtchn = 0;
 }
 
-static void enable_pirq(unsigned int irq)
+static void enable_pirq(struct irq_data *data)
 {
-       startup_pirq(irq);
+       startup_pirq(data);
 }
 
-static void disable_pirq(unsigned int irq)
+static void disable_pirq(struct irq_data *data)
 {
 }
 
-static void ack_pirq(unsigned int irq)
+static void ack_pirq(struct irq_data *data)
 {
-       int evtchn = evtchn_from_irq(irq);
+       int evtchn = evtchn_from_irq(data->irq);
 
-       move_native_irq(irq);
+       move_native_irq(data->irq);
 
        if (VALID_EVTCHN(evtchn)) {
                mask_evtchn(evtchn);
@@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq)
        }
 }
 
-static void end_pirq(unsigned int irq)
-{
-       int evtchn = evtchn_from_irq(irq);
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       if (WARN_ON(!desc))
-               return;
-
-       if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
-           (IRQ_DISABLED|IRQ_PENDING)) {
-               shutdown_pirq(irq);
-       } else if (VALID_EVTCHN(evtchn)) {
-               unmask_evtchn(evtchn);
-               pirq_unmask_notify(irq);
-       }
-}
-
 static int find_irq_by_gsi(unsigned gsi)
 {
        int irq;
@@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
                goto out;       /* XXX need refcount? */
        }
 
-       /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
-        * we are using the !xen_initial_domain() to drop in the function.*/
-       if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
-                               xen_pv_domain())) {
-               irq = gsi;
-               irq_alloc_desc_at(irq, -1);
-       } else
-               irq = find_unbound_irq();
+       irq = xen_allocate_irq_gsi(gsi);
 
        set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
                                      handle_level_irq, name);
@@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
         * this in the priv domain. */
        if (xen_initial_domain() &&
            HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
-               irq_free_desc(irq);
+               xen_free_irq(irq);
                irq = -ENOSPC;
                goto out;
        }
@@ -674,87 +644,46 @@ out:
 }
 
 #ifdef CONFIG_PCI_MSI
-#include <linux/msi.h>
-#include "../pci/msi.h"
-
-void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
+int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
 {
-       spin_lock(&irq_mapping_update_lock);
-
-       if (alloc & XEN_ALLOC_IRQ) {
-               *irq = find_unbound_irq();
-               if (*irq == -1)
-                       goto out;
-       }
-
-       if (alloc & XEN_ALLOC_PIRQ) {
-               *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI);
-               if (*pirq == -1)
-                       goto out;
-       }
+       int rc;
+       struct physdev_get_free_pirq op_get_free_pirq;
 
-       set_irq_chip_and_handler_name(*irq, &xen_pirq_chip,
-                                     handle_level_irq, name);
+       op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
+       rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
 
-       irq_info[*irq] = mk_pirq_info(0, *pirq, 0, 0);
-       pirq_to_irq[*pirq] = *irq;
+       WARN_ONCE(rc == -ENOSYS,
+                 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
 
-out:
-       spin_unlock(&irq_mapping_update_lock);
+       return rc ? -1 : op_get_free_pirq.pirq;
 }
 
-int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
+int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+                            int pirq, int vector, const char *name)
 {
-       int irq = -1;
-       struct physdev_map_pirq map_irq;
-       int rc;
-       int pos;
-       u32 table_offset, bir;
-
-       memset(&map_irq, 0, sizeof(map_irq));
-       map_irq.domid = DOMID_SELF;
-       map_irq.type = MAP_PIRQ_TYPE_MSI;
-       map_irq.index = -1;
-       map_irq.pirq = -1;
-       map_irq.bus = dev->bus->number;
-       map_irq.devfn = dev->devfn;
-
-       if (type == PCI_CAP_ID_MSIX) {
-               pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
-
-               pci_read_config_dword(dev, msix_table_offset_reg(pos),
-                                       &table_offset);
-               bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
-
-               map_irq.table_base = pci_resource_start(dev, bir);
-               map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
-       }
+       int irq, ret;
 
        spin_lock(&irq_mapping_update_lock);
 
-       irq = find_unbound_irq();
-
+       irq = xen_allocate_irq_dynamic();
        if (irq == -1)
                goto out;
 
-       rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
-       if (rc) {
-               printk(KERN_WARNING "xen map irq failed %d\n", rc);
-
-               irq_free_desc(irq);
-
-               irq = -1;
-               goto out;
-       }
-       irq_info[irq] = mk_pirq_info(0, map_irq.pirq, 0, map_irq.index);
-
        set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
-                       handle_level_irq,
-                       (type == PCI_CAP_ID_MSIX) ? "msi-x":"msi");
+                                     handle_level_irq, name);
 
+       irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
+       pirq_to_irq[pirq] = irq;
+       ret = irq_set_msi_desc(irq, msidesc);
+       if (ret < 0)
+               goto error_irq;
 out:
        spin_unlock(&irq_mapping_update_lock);
        return irq;
+error_irq:
+       spin_unlock(&irq_mapping_update_lock);
+       xen_free_irq(irq);
+       return -1;
 }
 #endif
 
@@ -779,11 +708,12 @@ int xen_destroy_irq(int irq)
                        printk(KERN_WARNING "unmap irq failed %d\n", rc);
                        goto out;
                }
-               pirq_to_irq[info->u.pirq.pirq] = -1;
        }
+       pirq_to_irq[info->u.pirq.pirq] = -1;
+
        irq_info[irq] = mk_unbound_info();
 
-       irq_free_desc(irq);
+       xen_free_irq(irq);
 
 out:
        spin_unlock(&irq_mapping_update_lock);
@@ -814,7 +744,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
        irq = evtchn_to_irq[evtchn];
 
        if (irq == -1) {
-               irq = find_unbound_irq();
+               irq = xen_allocate_irq_dynamic();
 
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
                                              handle_fasteoi_irq, "event");
@@ -839,7 +769,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
        irq = per_cpu(ipi_to_irq, cpu)[ipi];
 
        if (irq == -1) {
-               irq = find_unbound_irq();
+               irq = xen_allocate_irq_dynamic();
                if (irq < 0)
                        goto out;
 
@@ -875,7 +805,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
        irq = per_cpu(virq_to_irq, cpu)[virq];
 
        if (irq == -1) {
-               irq = find_unbound_irq();
+               irq = xen_allocate_irq_dynamic();
 
                set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
                                              handle_percpu_irq, "virq");
@@ -934,7 +864,7 @@ static void unbind_from_irq(unsigned int irq)
        if (irq_info[irq].type != IRQT_UNBOUND) {
                irq_info[irq] = mk_unbound_info();
 
-               irq_free_desc(irq);
+               xen_free_irq(irq);
        }
 
        spin_unlock(&irq_mapping_update_lock);
@@ -990,7 +920,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
        if (irq < 0)
                return irq;
 
-       irqflags |= IRQF_NO_SUSPEND;
+       irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
        retval = request_irq(irq, handler, irqflags, devname, dev_id);
        if (retval != 0) {
                unbind_from_irq(irq);
@@ -1234,11 +1164,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
        return 0;
 }
 
-static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
+static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
+                           bool force)
 {
        unsigned tcpu = cpumask_first(dest);
 
-       return rebind_irq_to_cpu(irq, tcpu);
+       return rebind_irq_to_cpu(data->irq, tcpu);
 }
 
 int resend_irq_on_evtchn(unsigned int irq)
@@ -1257,35 +1188,35 @@ int resend_irq_on_evtchn(unsigned int irq)
        return 1;
 }
 
-static void enable_dynirq(unsigned int irq)
+static void enable_dynirq(struct irq_data *data)
 {
-       int evtchn = evtchn_from_irq(irq);
+       int evtchn = evtchn_from_irq(data->irq);
 
        if (VALID_EVTCHN(evtchn))
                unmask_evtchn(evtchn);
 }
 
-static void disable_dynirq(unsigned int irq)
+static void disable_dynirq(struct irq_data *data)
 {
-       int evtchn = evtchn_from_irq(irq);
+       int evtchn = evtchn_from_irq(data->irq);
 
        if (VALID_EVTCHN(evtchn))
                mask_evtchn(evtchn);
 }
 
-static void ack_dynirq(unsigned int irq)
+static void ack_dynirq(struct irq_data *data)
 {
-       int evtchn = evtchn_from_irq(irq);
+       int evtchn = evtchn_from_irq(data->irq);
 
-       move_masked_irq(irq);
+       move_masked_irq(data->irq);
 
        if (VALID_EVTCHN(evtchn))
                unmask_evtchn(evtchn);
 }
 
-static int retrigger_dynirq(unsigned int irq)
+static int retrigger_dynirq(struct irq_data *data)
 {
-       int evtchn = evtchn_from_irq(irq);
+       int evtchn = evtchn_from_irq(data->irq);
        struct shared_info *sh = HYPERVISOR_shared_info;
        int ret = 0;
 
@@ -1334,7 +1265,7 @@ static void restore_cpu_pirqs(void)
 
                printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
 
-               startup_pirq(irq);
+               __startup_pirq(irq);
        }
 }
 
@@ -1445,7 +1376,6 @@ void xen_poll_irq(int irq)
 void xen_irq_resume(void)
 {
        unsigned int cpu, irq, evtchn;
-       struct irq_desc *desc;
 
        init_evtchn_cpu_bindings();
 
@@ -1465,66 +1395,48 @@ void xen_irq_resume(void)
                restore_cpu_ipis(cpu);
        }
 
-       /*
-        * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
-        * are not handled by the IRQ core.
-        */
-       for_each_irq_desc(irq, desc) {
-               if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
-                       continue;
-               if (desc->status & IRQ_DISABLED)
-                       continue;
-
-               evtchn = evtchn_from_irq(irq);
-               if (evtchn == -1)
-                       continue;
-
-               unmask_evtchn(evtchn);
-       }
-
        restore_cpu_pirqs();
 }
 
 static struct irq_chip xen_dynamic_chip __read_mostly = {
-       .name           = "xen-dyn",
+       .name                   = "xen-dyn",
 
-       .disable        = disable_dynirq,
-       .mask           = disable_dynirq,
-       .unmask         = enable_dynirq,
+       .irq_disable            = disable_dynirq,
+       .irq_mask               = disable_dynirq,
+       .irq_unmask             = enable_dynirq,
 
-       .eoi            = ack_dynirq,
-       .set_affinity   = set_affinity_irq,
-       .retrigger      = retrigger_dynirq,
+       .irq_eoi                = ack_dynirq,
+       .irq_set_affinity       = set_affinity_irq,
+       .irq_retrigger          = retrigger_dynirq,
 };
 
 static struct irq_chip xen_pirq_chip __read_mostly = {
-       .name           = "xen-pirq",
+       .name                   = "xen-pirq",
 
-       .startup        = startup_pirq,
-       .shutdown       = shutdown_pirq,
+       .irq_startup            = startup_pirq,
+       .irq_shutdown           = shutdown_pirq,
 
-       .enable         = enable_pirq,
-       .unmask         = enable_pirq,
+       .irq_enable             = enable_pirq,
+       .irq_unmask             = enable_pirq,
 
-       .disable        = disable_pirq,
-       .mask           = disable_pirq,
+       .irq_disable            = disable_pirq,
+       .irq_mask               = disable_pirq,
 
-       .ack            = ack_pirq,
-       .end            = end_pirq,
+       .irq_ack                = ack_pirq,
 
-       .set_affinity   = set_affinity_irq,
+       .irq_set_affinity       = set_affinity_irq,
 
-       .retrigger      = retrigger_dynirq,
+       .irq_retrigger          = retrigger_dynirq,
 };
 
 static struct irq_chip xen_percpu_chip __read_mostly = {
-       .name           = "xen-percpu",
+       .name                   = "xen-percpu",
 
-       .disable        = disable_dynirq,
-       .mask           = disable_dynirq,
-       .unmask         = enable_dynirq,
+       .irq_disable            = disable_dynirq,
+       .irq_mask               = disable_dynirq,
+       .irq_unmask             = enable_dynirq,
 
-       .ack            = ack_dynirq,
+       .irq_ack                = ack_dynirq,
 };
 
 int xen_set_callback_via(uint64_t via)
index 24177272bcb84aed3ec51cb82f91535f0c034fc0..ebb292859b59cdfcfcd915e5e774895231aa9f32 100644 (file)
@@ -34,42 +34,38 @@ enum shutdown_state {
 /* Ignore multiple shutdown requests. */
 static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
 
-#ifdef CONFIG_PM_SLEEP
-static int xen_hvm_suspend(void *data)
-{
-       int err;
-       struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
-       int *cancelled = data;
-
-       BUG_ON(!irqs_disabled());
-
-       err = sysdev_suspend(PMSG_SUSPEND);
-       if (err) {
-               printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
-                      err);
-               return err;
-       }
-
-       *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+struct suspend_info {
+       int cancelled;
+       unsigned long arg; /* extra hypercall argument */
+       void (*pre)(void);
+       void (*post)(int cancelled);
+};
 
-       xen_hvm_post_suspend(*cancelled);
+static void xen_hvm_post_suspend(int cancelled)
+{
+       xen_arch_hvm_post_suspend(cancelled);
        gnttab_resume();
+}
 
-       if (!*cancelled) {
-               xen_irq_resume();
-               xen_console_resume();
-               xen_timer_resume();
-       }
-
-       sysdev_resume();
+static void xen_pre_suspend(void)
+{
+       xen_mm_pin_all();
+       gnttab_suspend();
+       xen_arch_pre_suspend();
+}
 
-       return 0;
+static void xen_post_suspend(int cancelled)
+{
+       xen_arch_post_suspend(cancelled);
+       gnttab_resume();
+       xen_mm_unpin_all();
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int xen_suspend(void *data)
 {
+       struct suspend_info *si = data;
        int err;
-       int *cancelled = data;
 
        BUG_ON(!irqs_disabled());
 
@@ -80,22 +76,20 @@ static int xen_suspend(void *data)
                return err;
        }
 
-       xen_mm_pin_all();
-       gnttab_suspend();
-       xen_pre_suspend();
+       if (si->pre)
+               si->pre();
 
        /*
         * This hypercall returns 1 if suspend was cancelled
         * or the domain was merely checkpointed, and 0 if it
         * is resuming in a new domain.
         */
-       *cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
+       si->cancelled = HYPERVISOR_suspend(si->arg);
 
-       xen_post_suspend(*cancelled);
-       gnttab_resume();
-       xen_mm_unpin_all();
+       if (si->post)
+               si->post(si->cancelled);
 
-       if (!*cancelled) {
+       if (!si->cancelled) {
                xen_irq_resume();
                xen_console_resume();
                xen_timer_resume();
@@ -109,7 +103,7 @@ static int xen_suspend(void *data)
 static void do_suspend(void)
 {
        int err;
-       int cancelled = 1;
+       struct suspend_info si;
 
        shutting_down = SHUTDOWN_SUSPEND;
 
@@ -139,20 +133,29 @@ static void do_suspend(void)
                goto out_resume;
        }
 
-       if (xen_hvm_domain())
-               err = stop_machine(xen_hvm_suspend, &cancelled, cpumask_of(0));
-       else
-               err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
+       si.cancelled = 1;
+
+       if (xen_hvm_domain()) {
+               si.arg = 0UL;
+               si.pre = NULL;
+               si.post = &xen_hvm_post_suspend;
+       } else {
+               si.arg = virt_to_mfn(xen_start_info);
+               si.pre = &xen_pre_suspend;
+               si.post = &xen_post_suspend;
+       }
+
+       err = stop_machine(xen_suspend, &si, cpumask_of(0));
 
        dpm_resume_noirq(PMSG_RESUME);
 
        if (err) {
                printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
-               cancelled = 1;
+               si.cancelled = 1;
        }
 
 out_resume:
-       if (!cancelled) {
+       if (!si.cancelled) {
                xen_arch_resume();
                xs_resume();
        } else
@@ -172,12 +175,39 @@ out:
 }
 #endif /* CONFIG_PM_SLEEP */
 
+struct shutdown_handler {
+       const char *command;
+       void (*cb)(void);
+};
+
+static void do_poweroff(void)
+{
+       shutting_down = SHUTDOWN_POWEROFF;
+       orderly_poweroff(false);
+}
+
+static void do_reboot(void)
+{
+       shutting_down = SHUTDOWN_POWEROFF; /* ? */
+       ctrl_alt_del();
+}
+
 static void shutdown_handler(struct xenbus_watch *watch,
                             const char **vec, unsigned int len)
 {
        char *str;
        struct xenbus_transaction xbt;
        int err;
+       static struct shutdown_handler handlers[] = {
+               { "poweroff",   do_poweroff },
+               { "halt",       do_poweroff },
+               { "reboot",     do_reboot   },
+#ifdef CONFIG_PM_SLEEP
+               { "suspend",    do_suspend  },
+#endif
+               {NULL, NULL},
+       };
+       static struct shutdown_handler *handler;
 
        if (shutting_down != SHUTDOWN_INVALID)
                return;
@@ -194,7 +224,14 @@ static void shutdown_handler(struct xenbus_watch *watch,
                return;
        }
 
-       xenbus_write(xbt, "control", "shutdown", "");
+       for (handler = &handlers[0]; handler->command; handler++) {
+               if (strcmp(str, handler->command) == 0)
+                       break;
+       }
+
+       /* Only acknowledge commands which we are prepared to handle. */
+       if (handler->cb)
+               xenbus_write(xbt, "control", "shutdown", "");
 
        err = xenbus_transaction_end(xbt, 0);
        if (err == -EAGAIN) {
@@ -202,17 +239,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
                goto again;
        }
 
-       if (strcmp(str, "poweroff") == 0 ||
-           strcmp(str, "halt") == 0) {
-               shutting_down = SHUTDOWN_POWEROFF;
-               orderly_poweroff(false);
-       } else if (strcmp(str, "reboot") == 0) {
-               shutting_down = SHUTDOWN_POWEROFF; /* ? */
-               ctrl_alt_del();
-#ifdef CONFIG_PM_SLEEP
-       } else if (strcmp(str, "suspend") == 0) {
-               do_suspend();
-#endif
+       if (handler->cb) {
+               handler->cb();
        } else {
                printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
                shutting_down = SHUTDOWN_INVALID;
@@ -291,27 +319,18 @@ static int shutdown_event(struct notifier_block *notifier,
        return NOTIFY_DONE;
 }
 
-static int __init __setup_shutdown_event(void)
-{
-       /* Delay initialization in the PV on HVM case */
-       if (xen_hvm_domain())
-               return 0;
-
-       if (!xen_pv_domain())
-               return -ENODEV;
-
-       return xen_setup_shutdown_event();
-}
-
 int xen_setup_shutdown_event(void)
 {
        static struct notifier_block xenstore_notifier = {
                .notifier_call = shutdown_event
        };
+
+       if (!xen_domain())
+               return -ENODEV;
        register_xenstore_notifier(&xenstore_notifier);
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(xen_setup_shutdown_event);
 
-subsys_initcall(__setup_shutdown_event);
+subsys_initcall(xen_setup_shutdown_event);
index afbe041f42c5afed624c021898028cfa482dbd1a..319dd0a94d5135389ff7f932ef68872f0936f5e0 100644 (file)
@@ -156,9 +156,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
        if (ret)
                goto out;
        xenbus_probe(NULL);
-       ret = xen_setup_shutdown_event();
-       if (ret)
-               goto out;
        return 0;
 
 out:
index 3db9caa57edcbfcf260436b4cd4fb15e3fc070f4..7cb53aafac1e93c9151d44984494659af5f37490 100644 (file)
@@ -47,7 +47,7 @@ config FS_POSIX_ACL
        def_bool n
 
 config EXPORTFS
-       tristate
+       bool
 
 config FILE_LOCKING
        bool "Enable POSIX file locking API" if EXPERT
index a7f7cef0c0c8343da03006fe39d5cce1a6bd225a..ba01202844c5ab248931fff41e5546f31febbafc 100644 (file)
@@ -48,6 +48,8 @@ obj-$(CONFIG_FS_POSIX_ACL)    += posix_acl.o xattr_acl.o
 obj-$(CONFIG_NFS_COMMON)       += nfs_common/
 obj-$(CONFIG_GENERIC_ACL)      += generic_acl.o
 
+obj-$(CONFIG_FHANDLE)          += fhandle.o
+
 obj-y                          += quota/
 
 obj-$(CONFIG_PROC_FS)          += proc/
index 15690bb1d3b531e65ae9cce255eaf532081667db..789b3afb342328dcfe551d39227a81f09ffb3470 100644 (file)
@@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
        candidate->first = candidate->last = index;
        candidate->offset_first = from;
        candidate->to_last = to;
+       INIT_LIST_HEAD(&candidate->link);
        candidate->usage = 1;
        candidate->state = AFS_WBACK_PENDING;
        init_waitqueue_head(&candidate->waitq);
index fc557a3be0a9af055a9a9505d3c161a77a7f2129..26869cde3953588ff2c0b0aa524966d5c02519d2 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx)
        call_rcu(&ctx->rcu_head, ctx_rcu_free);
 }
 
-#define get_ioctx(kioctx) do {                                         \
-       BUG_ON(atomic_read(&(kioctx)->users) <= 0);                     \
-       atomic_inc(&(kioctx)->users);                                   \
-} while (0)
-#define put_ioctx(kioctx) do {                                         \
-       BUG_ON(atomic_read(&(kioctx)->users) <= 0);                     \
-       if (unlikely(atomic_dec_and_test(&(kioctx)->users)))            \
-               __put_ioctx(kioctx);                                    \
-} while (0)
+static inline void get_ioctx(struct kioctx *kioctx)
+{
+       BUG_ON(atomic_read(&kioctx->users) <= 0);
+       atomic_inc(&kioctx->users);
+}
+
+static inline int try_get_ioctx(struct kioctx *kioctx)
+{
+       return atomic_inc_not_zero(&kioctx->users);
+}
+
+static inline void put_ioctx(struct kioctx *kioctx)
+{
+       BUG_ON(atomic_read(&kioctx->users) <= 0);
+       if (unlikely(atomic_dec_and_test(&kioctx->users)))
+               __put_ioctx(kioctx);
+}
 
 /* ioctx_alloc
  *     Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
@@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
        rcu_read_lock();
 
        hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
-               if (ctx->user_id == ctx_id && !ctx->dead) {
-                       get_ioctx(ctx);
+               /*
+                * RCU protects us against accessing freed memory but
+                * we have to be careful not to get a reference when the
+                * reference count already dropped to 0 (ctx->dead test
+                * is unreliable because of races).
+                */
+               if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
                        ret = ctx;
                        break;
                }
@@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                goto out_put_req;
 
        spin_lock_irq(&ctx->ctx_lock);
+       /*
+        * We could have raced with io_destroy() and are currently holding a
+        * reference to ctx which should be destroyed. We cannot submit IO
+        * since ctx gets freed as soon as io_submit() puts its reference.  The
+        * check here is reliable: io_destroy() sets ctx->dead before waiting
+        * for outstanding IO and the barrier between these two is realized by
+        * unlock of mm->ioctx_lock and lock of ctx->ctx_lock.  Analogously we
+        * increment ctx->reqs_active before checking for ctx->dead and the
+        * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
+        * don't see ctx->dead set here, io_destroy() waits for our IO to
+        * finish.
+        */
+       if (ctx->dead) {
+               spin_unlock_irq(&ctx->ctx_lock);
+               ret = -EINVAL;
+               goto out_put_req;
+       }
        aio_run_iocb(req);
        if (!list_empty(&ctx->run_list)) {
                /* drain the run list */
index 4fb8a34315310ae43c158670426a9699a7c0f7b0..889287019599a861d90a51d6808cfb2d032fc255 100644 (file)
@@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
        ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
        if (ret)
                goto out_del;
+       /*
+        * bdev could be deleted beneath us which would implicitly destroy
+        * the holder directory.  Hold on to it.
+        */
+       kobject_get(bdev->bd_part->holder_dir);
 
        list_add(&holder->list, &bdev->bd_holder_disks);
        goto out_unlock;
@@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
                del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
                del_symlink(bdev->bd_part->holder_dir,
                            &disk_to_dev(disk)->kobj);
+               kobject_put(bdev->bd_part->holder_dir);
                list_del_init(&holder->list);
                kfree(holder);
        }
@@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
  * flush_disk - invalidates all buffer-cache entries on a disk
  *
  * @bdev:      struct block device to be flushed
+ * @kill_dirty: flag to guide handling of dirty inodes
  *
  * Invalidates all buffer-cache entries on a disk. It should be called
  * when a disk has been changed -- either by a media change or online
  * resize.
  */
-static void flush_disk(struct block_device *bdev)
+static void flush_disk(struct block_device *bdev, bool kill_dirty)
 {
-       if (__invalidate_device(bdev)) {
+       if (__invalidate_device(bdev, kill_dirty)) {
                char name[BDEVNAME_SIZE] = "";
 
                if (bdev->bd_disk)
@@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
                       "%s: detected capacity change from %lld to %lld\n",
                       name, bdev_size, disk_size);
                i_size_write(bdev->bd_inode, disk_size);
-               flush_disk(bdev);
+               flush_disk(bdev, false);
        }
 }
 EXPORT_SYMBOL(check_disk_size_change);
@@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev)
        if (!(events & DISK_EVENT_MEDIA_CHANGE))
                return 0;
 
-       flush_disk(bdev);
+       flush_disk(bdev, true);
        if (bdops->revalidate_disk)
                bdops->revalidate_disk(bdev->bd_disk);
        return 1;
@@ -1600,7 +1607,7 @@ fail:
 }
 EXPORT_SYMBOL(lookup_bdev);
 
-int __invalidate_device(struct block_device *bdev)
+int __invalidate_device(struct block_device *bdev, bool kill_dirty)
 {
        struct super_block *sb = get_super(bdev);
        int res = 0;
@@ -1613,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev)
                 * hold).
                 */
                shrink_dcache_sb(sb);
-               res = invalidate_inodes(sb);
+               res = invalidate_inodes(sb, kill_dirty);
                drop_super(sb);
        }
        invalidate_bdev(bdev);
index 2c98b3af6052a25bd7ce5eeb5a0502d2ac1cca8a..7f78cc78fdd0a3dcc77218be33be516b5f2ac038 100644 (file)
@@ -729,6 +729,15 @@ struct btrfs_space_info {
        u64 disk_total;         /* total bytes on disk, takes mirrors into
                                   account */
 
+       /*
+        * we bump reservation progress every time we decrement
+        * bytes_reserved.  This way people waiting for reservations
+        * know something good has happened and they can check
+        * for progress.  The number here isn't to be trusted, it
+        * just shows reclaim activity
+        */
+       unsigned long reservation_progress;
+
        int full;               /* indicates that we cannot allocate any more
                                   chunks for this space */
        int force_alloc;        /* set if we need to force a chunk alloc for
@@ -1254,6 +1263,7 @@ struct btrfs_root {
 #define BTRFS_MOUNT_SPACE_CACHE                (1 << 12)
 #define BTRFS_MOUNT_CLEAR_CACHE                (1 << 13)
 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
+#define BTRFS_MOUNT_ENOSPC_DEBUG        (1 << 15)
 
 #define btrfs_clear_opt(o, opt)                ((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)          ((o) |= BTRFS_MOUNT_##opt)
@@ -2218,6 +2228,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root,
                                   u64 start, u64 end);
 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
                               u64 num_bytes);
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+                           struct btrfs_root *root, u64 type);
 
 /* ctree.c */
 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
index ff27d7a477b2012d1cd900164c6596ed77a9efb2..b4ffad859adb31a5ad5d8628a1fd6a10adae06ec 100644 (file)
@@ -21,9 +21,13 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
        int len = *max_len;
        int type;
 
-       if ((len < BTRFS_FID_SIZE_NON_CONNECTABLE) ||
-           (connectable && len < BTRFS_FID_SIZE_CONNECTABLE))
+       if (connectable && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
+               *max_len = BTRFS_FID_SIZE_CONNECTABLE;
                return 255;
+       } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
+               *max_len = BTRFS_FID_SIZE_NON_CONNECTABLE;
+               return 255;
+       }
 
        len  = BTRFS_FID_SIZE_NON_CONNECTABLE;
        type = FILEID_BTRFS_WITHOUT_PARENT;
index f3c96fc01439c814518709ca5be5f14bef86827a..7b3089b5c2df816522e2356d3064f0b2bdb40c4f 100644 (file)
@@ -3342,15 +3342,16 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
        u64 max_reclaim;
        u64 reclaimed = 0;
        long time_left;
-       int pause = 1;
        int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
        int loops = 0;
+       unsigned long progress;
 
        block_rsv = &root->fs_info->delalloc_block_rsv;
        space_info = block_rsv->space_info;
 
        smp_mb();
        reserved = space_info->bytes_reserved;
+       progress = space_info->reservation_progress;
 
        if (reserved == 0)
                return 0;
@@ -3365,31 +3366,36 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
                writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
 
                spin_lock(&space_info->lock);
-               if (reserved > space_info->bytes_reserved) {
-                       loops = 0;
+               if (reserved > space_info->bytes_reserved)
                        reclaimed += reserved - space_info->bytes_reserved;
-               } else {
-                       loops++;
-               }
                reserved = space_info->bytes_reserved;
                spin_unlock(&space_info->lock);
 
+               loops++;
+
                if (reserved == 0 || reclaimed >= max_reclaim)
                        break;
 
                if (trans && trans->transaction->blocked)
                        return -EAGAIN;
 
-               __set_current_state(TASK_INTERRUPTIBLE);
-               time_left = schedule_timeout(pause);
+               time_left = schedule_timeout_interruptible(1);
 
                /* We were interrupted, exit */
                if (time_left)
                        break;
 
-               pause <<= 1;
-               if (pause > HZ / 10)
-                       pause = HZ / 10;
+               /* we've kicked the IO a few times, if anything has been freed,
+                * exit.  There is no sense in looping here for a long time
+                * when we really need to commit the transaction, or there are
+                * just too many writers without enough free space
+                */
+
+               if (loops > 3) {
+                       smp_mb();
+                       if (progress != space_info->reservation_progress)
+                               break;
+               }
 
        }
        return reclaimed >= to_reclaim;
@@ -3612,6 +3618,7 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
                if (num_bytes) {
                        spin_lock(&space_info->lock);
                        space_info->bytes_reserved -= num_bytes;
+                       space_info->reservation_progress++;
                        spin_unlock(&space_info->lock);
                }
        }
@@ -3844,6 +3851,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
        if (block_rsv->reserved >= block_rsv->size) {
                num_bytes = block_rsv->reserved - block_rsv->size;
                sinfo->bytes_reserved -= num_bytes;
+               sinfo->reservation_progress++;
                block_rsv->reserved = block_rsv->size;
                block_rsv->full = 1;
        }
@@ -4005,7 +4013,6 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
                to_reserve = 0;
        }
        spin_unlock(&BTRFS_I(inode)->accounting_lock);
-
        to_reserve += calc_csum_metadata_size(inode, num_bytes);
        ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
        if (ret)
@@ -4133,6 +4140,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
                        btrfs_set_block_group_used(&cache->item, old_val);
                        cache->reserved -= num_bytes;
                        cache->space_info->bytes_reserved -= num_bytes;
+                       cache->space_info->reservation_progress++;
                        cache->space_info->bytes_used += num_bytes;
                        cache->space_info->disk_used += num_bytes * factor;
                        spin_unlock(&cache->lock);
@@ -4184,6 +4192,7 @@ static int pin_down_extent(struct btrfs_root *root,
        if (reserved) {
                cache->reserved -= num_bytes;
                cache->space_info->bytes_reserved -= num_bytes;
+               cache->space_info->reservation_progress++;
        }
        spin_unlock(&cache->lock);
        spin_unlock(&cache->space_info->lock);
@@ -4234,6 +4243,7 @@ static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
                                space_info->bytes_readonly += num_bytes;
                        cache->reserved -= num_bytes;
                        space_info->bytes_reserved -= num_bytes;
+                       space_info->reservation_progress++;
                }
                spin_unlock(&cache->lock);
                spin_unlock(&space_info->lock);
@@ -4712,6 +4722,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                if (ret) {
                        spin_lock(&cache->space_info->lock);
                        cache->space_info->bytes_reserved -= buf->len;
+                       cache->space_info->reservation_progress++;
                        spin_unlock(&cache->space_info->lock);
                }
                goto out;
@@ -5376,7 +5387,7 @@ again:
                               num_bytes, data, 1);
                goto again;
        }
-       if (ret == -ENOSPC) {
+       if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
                struct btrfs_space_info *sinfo;
 
                sinfo = __find_space_info(root->fs_info, data);
@@ -8065,6 +8076,13 @@ out:
        return ret;
 }
 
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+                           struct btrfs_root *root, u64 type)
+{
+       u64 alloc_flags = get_alloc_profile(root, type);
+       return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+}
+
 /*
  * helper to account the unused space of all the readonly block group in the
  * list. takes mirrors into account.
index 92ac5192c518be5d59de00b1cb5b66c29e1e9cd2..714adc4ac4c24eaae26900bb9e862bc8b874432b 100644 (file)
@@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode,
  */
 u64 count_range_bits(struct extent_io_tree *tree,
                     u64 *start, u64 search_end, u64 max_bytes,
-                    unsigned long bits)
+                    unsigned long bits, int contig)
 {
        struct rb_node *node;
        struct extent_state *state;
        u64 cur_start = *start;
        u64 total_bytes = 0;
+       u64 last = 0;
        int found = 0;
 
        if (search_end <= cur_start) {
@@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
                state = rb_entry(node, struct extent_state, rb_node);
                if (state->start > search_end)
                        break;
-               if (state->end >= cur_start && (state->state & bits)) {
+               if (contig && found && state->start > last + 1)
+                       break;
+               if (state->end >= cur_start && (state->state & bits) == bits) {
                        total_bytes += min(search_end, state->end) + 1 -
                                       max(cur_start, state->start);
                        if (total_bytes >= max_bytes)
@@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
                                *start = state->start;
                                found = 1;
                        }
+                       last = state->end;
+               } else if (contig && found) {
+                       break;
                }
                node = rb_next(node);
                if (!node)
@@ -2912,6 +2918,46 @@ out:
        return sector;
 }
 
+/*
+ * helper function for fiemap, which doesn't want to see any holes.
+ * This maps until we find something past 'last'
+ */
+static struct extent_map *get_extent_skip_holes(struct inode *inode,
+                                               u64 offset,
+                                               u64 last,
+                                               get_extent_t *get_extent)
+{
+       u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
+       struct extent_map *em;
+       u64 len;
+
+       if (offset >= last)
+               return NULL;
+
+       while(1) {
+               len = last - offset;
+               if (len == 0)
+                       break;
+               len = (len + sectorsize - 1) & ~(sectorsize - 1);
+               em = get_extent(inode, NULL, 0, offset, len, 0);
+               if (!em || IS_ERR(em))
+                       return em;
+
+               /* if this isn't a hole return it */
+               if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
+                   em->block_start != EXTENT_MAP_HOLE) {
+                       return em;
+               }
+
+               /* this is a hole, advance to the next extent */
+               offset = extent_map_end(em);
+               free_extent_map(em);
+               if (offset >= last)
+                       break;
+       }
+       return NULL;
+}
+
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len, get_extent_t *get_extent)
 {
@@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        u32 flags = 0;
        u32 found_type;
        u64 last;
+       u64 last_for_get_extent = 0;
        u64 disko = 0;
+       u64 isize = i_size_read(inode);
        struct btrfs_key found_key;
        struct extent_map *em = NULL;
        struct extent_state *cached_state = NULL;
        struct btrfs_path *path;
        struct btrfs_file_extent_item *item;
        int end = 0;
-       u64 em_start = 0, em_len = 0;
+       u64 em_start = 0;
+       u64 em_len = 0;
+       u64 em_end = 0;
        unsigned long emflags;
-       int hole = 0;
 
        if (len == 0)
                return -EINVAL;
@@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                return -ENOMEM;
        path->leave_spinning = 1;
 
+       /*
+        * lookup the last file extent.  We're not using i_size here
+        * because there might be preallocation past i_size
+        */
        ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
                                       path, inode->i_ino, -1, 0);
        if (ret < 0) {
@@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
        found_type = btrfs_key_type(&found_key);
 
-       /* No extents, just return */
+       /* No extents, but there might be delalloc bits */
        if (found_key.objectid != inode->i_ino ||
            found_type != BTRFS_EXTENT_DATA_KEY) {
-               btrfs_free_path(path);
-               return 0;
+               /* have to trust i_size as the end */
+               last = (u64)-1;
+               last_for_get_extent = isize;
+       } else {
+               /*
+                * remember the start of the last extent.  There are a
+                * bunch of different factors that go into the length of the
+                * extent, so its much less complex to remember where it started
+                */
+               last = found_key.offset;
+               last_for_get_extent = last + 1;
        }
-       last = found_key.offset;
        btrfs_free_path(path);
 
+       /*
+        * we might have some extents allocated but more delalloc past those
+        * extents.  so, we trust isize unless the start of the last extent is
+        * beyond isize
+        */
+       if (last < isize) {
+               last = (u64)-1;
+               last_for_get_extent = isize;
+       }
+
        lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
                         &cached_state, GFP_NOFS);
-       em = get_extent(inode, NULL, 0, off, max - off, 0);
+
+       em = get_extent_skip_holes(inode, off, last_for_get_extent,
+                                  get_extent);
        if (!em)
                goto out;
        if (IS_ERR(em)) {
@@ -2973,22 +3046,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        }
 
        while (!end) {
-               hole = 0;
-               off = em->start + em->len;
-               if (off >= max)
-                       end = 1;
+               u64 offset_in_extent;
 
-               if (em->block_start == EXTENT_MAP_HOLE) {
-                       hole = 1;
-                       goto next;
-               }
+               /* break if the extent we found is outside the range */
+               if (em->start >= max || extent_map_end(em) < off)
+                       break;
 
-               em_start = em->start;
-               em_len = em->len;
+               /*
+                * get_extent may return an extent that starts before our
+                * requested range.  We have to make sure the ranges
+                * we return to fiemap always move forward and don't
+                * overlap, so adjust the offsets here
+                */
+               em_start = max(em->start, off);
 
+               /*
+                * record the offset from the start of the extent
+                * for adjusting the disk offset below
+                */
+               offset_in_extent = em_start - em->start;
+               em_end = extent_map_end(em);
+               em_len = em_end - em_start;
+               emflags = em->flags;
                disko = 0;
                flags = 0;
 
+               /*
+                * bump off for our next call to get_extent
+                */
+               off = extent_map_end(em);
+               if (off >= max)
+                       end = 1;
+
                if (em->block_start == EXTENT_MAP_LAST_BYTE) {
                        end = 1;
                        flags |= FIEMAP_EXTENT_LAST;
@@ -2999,42 +3088,34 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        flags |= (FIEMAP_EXTENT_DELALLOC |
                                  FIEMAP_EXTENT_UNKNOWN);
                } else {
-                       disko = em->block_start;
+                       disko = em->block_start + offset_in_extent;
                }
                if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
                        flags |= FIEMAP_EXTENT_ENCODED;
 
-next:
-               emflags = em->flags;
                free_extent_map(em);
                em = NULL;
-               if (!end) {
-                       em = get_extent(inode, NULL, 0, off, max - off, 0);
-                       if (!em)
-                               goto out;
-                       if (IS_ERR(em)) {
-                               ret = PTR_ERR(em);
-                               goto out;
-                       }
-                       emflags = em->flags;
-               }
-
-               if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
+               if ((em_start >= last) || em_len == (u64)-1 ||
+                  (last == (u64)-1 && isize <= em_end)) {
                        flags |= FIEMAP_EXTENT_LAST;
                        end = 1;
                }
 
-               if (em_start == last) {
+               /* now scan forward to see if this is really the last extent. */
+               em = get_extent_skip_holes(inode, off, last_for_get_extent,
+                                          get_extent);
+               if (IS_ERR(em)) {
+                       ret = PTR_ERR(em);
+                       goto out;
+               }
+               if (!em) {
                        flags |= FIEMAP_EXTENT_LAST;
                        end = 1;
                }
-
-               if (!hole) {
-                       ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
-                                               em_len, flags);
-                       if (ret)
-                               goto out_free;
-               }
+               ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
+                                             em_len, flags);
+               if (ret)
+                       goto out_free;
        }
 out_free:
        free_extent_map(em);
index 7083cfafd061abf64f7b0c2252bd91324022b79a..9318dfefd59c50d8d87db399d70c5932e06f8d60 100644 (file)
@@ -191,7 +191,7 @@ void extent_io_exit(void);
 
 u64 count_range_bits(struct extent_io_tree *tree,
                     u64 *start, u64 search_end,
-                    u64 max_bytes, unsigned long bits);
+                    u64 max_bytes, unsigned long bits, int contig);
 
 void free_extent_state(struct extent_state *state);
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
index 7084140d5940e68f653ad8919a0084549d6b8e1b..f447b783bb84ce25af44cb2276b3d49684948166 100644 (file)
@@ -70,6 +70,19 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
 
                /* Flush processor's dcache for this page */
                flush_dcache_page(page);
+
+               /*
+                * if we get a partial write, we can end up with
+                * partially up to date pages.  These add
+                * a lot of complexity, so make sure they don't
+                * happen by forcing this copy to be retried.
+                *
+                * The rest of the btrfs_file_write code will fall
+                * back to page at a time copies after we return 0.
+                */
+               if (!PageUptodate(page) && copied < count)
+                       copied = 0;
+
                iov_iter_advance(i, copied);
                write_bytes -= copied;
                total_copied += copied;
@@ -762,6 +775,27 @@ out:
        return 0;
 }
 
+/*
+ * on error we return an unlocked page and the error value
+ * on success we return a locked page and 0
+ */
+static int prepare_uptodate_page(struct page *page, u64 pos)
+{
+       int ret = 0;
+
+       if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) {
+               ret = btrfs_readpage(NULL, page);
+               if (ret)
+                       return ret;
+               lock_page(page);
+               if (!PageUptodate(page)) {
+                       unlock_page(page);
+                       return -EIO;
+               }
+       }
+       return 0;
+}
+
 /*
  * this gets pages into the page cache and locks them down, it also properly
  * waits for data=ordered extents to finish before allowing the pages to be
@@ -777,6 +811,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
        unsigned long index = pos >> PAGE_CACHE_SHIFT;
        struct inode *inode = fdentry(file)->d_inode;
        int err = 0;
+       int faili = 0;
        u64 start_pos;
        u64 last_pos;
 
@@ -794,15 +829,24 @@ again:
        for (i = 0; i < num_pages; i++) {
                pages[i] = grab_cache_page(inode->i_mapping, index + i);
                if (!pages[i]) {
-                       int c;
-                       for (c = i - 1; c >= 0; c--) {
-                               unlock_page(pages[c]);
-                               page_cache_release(pages[c]);
-                       }
-                       return -ENOMEM;
+                       faili = i - 1;
+                       err = -ENOMEM;
+                       goto fail;
+               }
+
+               if (i == 0)
+                       err = prepare_uptodate_page(pages[i], pos);
+               if (i == num_pages - 1)
+                       err = prepare_uptodate_page(pages[i],
+                                                   pos + write_bytes);
+               if (err) {
+                       page_cache_release(pages[i]);
+                       faili = i - 1;
+                       goto fail;
                }
                wait_on_page_writeback(pages[i]);
        }
+       err = 0;
        if (start_pos < inode->i_size) {
                struct btrfs_ordered_extent *ordered;
                lock_extent_bits(&BTRFS_I(inode)->io_tree,
@@ -842,6 +886,14 @@ again:
                WARN_ON(!PageLocked(pages[i]));
        }
        return 0;
+fail:
+       while (faili >= 0) {
+               unlock_page(pages[faili]);
+               page_cache_release(pages[faili]);
+               faili--;
+       }
+       return err;
+
 }
 
 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
@@ -851,7 +903,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        struct file *file = iocb->ki_filp;
        struct inode *inode = fdentry(file)->d_inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct page *pinned[2];
        struct page **pages = NULL;
        struct iov_iter i;
        loff_t *ppos = &iocb->ki_pos;
@@ -872,9 +923,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
                      (file->f_flags & O_DIRECT));
 
-       pinned[0] = NULL;
-       pinned[1] = NULL;
-
        start_pos = pos;
 
        vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
@@ -962,32 +1010,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        first_index = pos >> PAGE_CACHE_SHIFT;
        last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
 
-       /*
-        * there are lots of better ways to do this, but this code
-        * makes sure the first and last page in the file range are
-        * up to date and ready for cow
-        */
-       if ((pos & (PAGE_CACHE_SIZE - 1))) {
-               pinned[0] = grab_cache_page(inode->i_mapping, first_index);
-               if (!PageUptodate(pinned[0])) {
-                       ret = btrfs_readpage(NULL, pinned[0]);
-                       BUG_ON(ret);
-                       wait_on_page_locked(pinned[0]);
-               } else {
-                       unlock_page(pinned[0]);
-               }
-       }
-       if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
-               pinned[1] = grab_cache_page(inode->i_mapping, last_index);
-               if (!PageUptodate(pinned[1])) {
-                       ret = btrfs_readpage(NULL, pinned[1]);
-                       BUG_ON(ret);
-                       wait_on_page_locked(pinned[1]);
-               } else {
-                       unlock_page(pinned[1]);
-               }
-       }
-
        while (iov_iter_count(&i) > 0) {
                size_t offset = pos & (PAGE_CACHE_SIZE - 1);
                size_t write_bytes = min(iov_iter_count(&i),
@@ -1024,8 +1046,20 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
 
                copied = btrfs_copy_from_user(pos, num_pages,
                                           write_bytes, pages, &i);
-               dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >>
-                               PAGE_CACHE_SHIFT;
+
+               /*
+                * if we have trouble faulting in the pages, fall
+                * back to one page at a time
+                */
+               if (copied < write_bytes)
+                       nrptrs = 1;
+
+               if (copied == 0)
+                       dirty_pages = 0;
+               else
+                       dirty_pages = (copied + offset +
+                                      PAGE_CACHE_SIZE - 1) >>
+                                      PAGE_CACHE_SHIFT;
 
                if (num_pages > dirty_pages) {
                        if (copied > 0)
@@ -1069,10 +1103,6 @@ out:
                err = ret;
 
        kfree(pages);
-       if (pinned[0])
-               page_cache_release(pinned[0]);
-       if (pinned[1])
-               page_cache_release(pinned[1]);
        *ppos = pos;
 
        /*
index fb9bd7832b6db20fb5331d6008ec6a49db394a0a..4a0107e18747865e0d884351015638e92503e32b 100644 (file)
@@ -1913,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start)
 
        private = 0;
        if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
-                            (u64)-1, 1, EXTENT_DIRTY)) {
+                            (u64)-1, 1, EXTENT_DIRTY, 0)) {
                ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
                                        start, &private_failure);
                if (ret == 0) {
@@ -4806,9 +4806,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        int err;
        int drop_inode = 0;
 
-       if (inode->i_nlink == 0)
-               return -ENOENT;
-
        /* do not allow sys_link's with other subvols of the same device */
        if (root->objectid != BTRFS_I(inode)->root->objectid)
                return -EPERM;
@@ -4821,10 +4818,11 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                goto fail;
 
        /*
-        * 1 item for inode ref
+        * 2 items for inode and inode ref
         * 2 items for dir items
+        * 1 item for parent inode
         */
-       trans = btrfs_start_transaction(root, 3);
+       trans = btrfs_start_transaction(root, 5);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
                goto fail;
@@ -5280,6 +5278,128 @@ out:
        return em;
 }
 
+struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
+                                          size_t pg_offset, u64 start, u64 len,
+                                          int create)
+{
+       struct extent_map *em;
+       struct extent_map *hole_em = NULL;
+       u64 range_start = start;
+       u64 end;
+       u64 found;
+       u64 found_end;
+       int err = 0;
+
+       em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
+       if (IS_ERR(em))
+               return em;
+       if (em) {
+               /*
+                * if our em maps to a hole, there might
+                * actually be delalloc bytes behind it
+                */
+               if (em->block_start != EXTENT_MAP_HOLE)
+                       return em;
+               else
+                       hole_em = em;
+       }
+
+       /* check to see if we've wrapped (len == -1 or similar) */
+       end = start + len;
+       if (end < start)
+               end = (u64)-1;
+       else
+               end -= 1;
+
+       em = NULL;
+
+       /* ok, we didn't find anything, lets look for delalloc */
+       found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
+                                end, len, EXTENT_DELALLOC, 1);
+       found_end = range_start + found;
+       if (found_end < range_start)
+               found_end = (u64)-1;
+
+       /*
+        * we didn't find anything useful, return
+        * the original results from get_extent()
+        */
+       if (range_start > end || found_end <= start) {
+               em = hole_em;
+               hole_em = NULL;
+               goto out;
+       }
+
+       /* adjust the range_start to make sure it doesn't
+        * go backwards from the start they passed in
+        */
+       range_start = max(start,range_start);
+       found = found_end - range_start;
+
+       if (found > 0) {
+               u64 hole_start = start;
+               u64 hole_len = len;
+
+               em = alloc_extent_map(GFP_NOFS);
+               if (!em) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+               /*
+                * when btrfs_get_extent can't find anything it
+                * returns one huge hole
+                *
+                * make sure what it found really fits our range, and
+                * adjust to make sure it is based on the start from
+                * the caller
+                */
+               if (hole_em) {
+                       u64 calc_end = extent_map_end(hole_em);
+
+                       if (calc_end <= start || (hole_em->start > end)) {
+                               free_extent_map(hole_em);
+                               hole_em = NULL;
+                       } else {
+                               hole_start = max(hole_em->start, start);
+                               hole_len = calc_end - hole_start;
+                       }
+               }
+               em->bdev = NULL;
+               if (hole_em && range_start > hole_start) {
+                       /* our hole starts before our delalloc, so we
+                        * have to return just the parts of the hole
+                        * that go until  the delalloc starts
+                        */
+                       em->len = min(hole_len,
+                                     range_start - hole_start);
+                       em->start = hole_start;
+                       em->orig_start = hole_start;
+                       /*
+                        * don't adjust block start at all,
+                        * it is fixed at EXTENT_MAP_HOLE
+                        */
+                       em->block_start = hole_em->block_start;
+                       em->block_len = hole_len;
+               } else {
+                       em->start = range_start;
+                       em->len = found;
+                       em->orig_start = range_start;
+                       em->block_start = EXTENT_MAP_DELALLOC;
+                       em->block_len = found;
+               }
+       } else if (hole_em) {
+               return hole_em;
+       }
+out:
+
+       free_extent_map(hole_em);
+       if (err) {
+               free_extent_map(em);
+               return ERR_PTR(err);
+       }
+       return em;
+}
+
 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
                                                  u64 start, u64 len)
 {
@@ -5934,6 +6054,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
        if (!skip_sum) {
                dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
                if (!dip->csums) {
+                       kfree(dip);
                        ret = -ENOMEM;
                        goto free_ordered;
                }
@@ -6102,7 +6223,7 @@ out:
 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len)
 {
-       return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
+       return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
 }
 
 int btrfs_readpage(struct file *file, struct page *page)
index be2d4f6aaa5eef34d877002bcbf524813f0a0405..5fdb2abc4fa789d49db9b76cd4e459d8c0d3bc85 100644 (file)
@@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
        if (copy_from_user(&flags, arg, sizeof(flags)))
                return -EFAULT;
 
-       if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC)
+       if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
                return -EINVAL;
 
        if (flags & ~BTRFS_SUBVOL_RDONLY)
                return -EOPNOTSUPP;
 
+       if (!is_owner_or_cap(inode))
+               return -EACCES;
+
        down_write(&root->fs_info->subvol_sem);
 
        /* nothing to do */
@@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
                goto out_reset;
        }
 
-       ret = btrfs_update_root(trans, root,
+       ret = btrfs_update_root(trans, root->fs_info->tree_root,
                                &root->root_key, &root->root_item);
 
        btrfs_commit_transaction(trans, root);
index cc9b450399df29d94c670391c9772bc06761c051..a178f5ebea78986c3d1614b5ffed51797dd1faa5 100644 (file)
@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
        unsigned long tot_out;
        unsigned long tot_len;
        char *buf;
+       bool may_late_unmap, need_unmap;
 
        data_in = kmap(pages_in[0]);
        tot_len = read_compress_length(data_in);
@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws,
 
                tot_in += in_len;
                working_bytes = in_len;
+               may_late_unmap = need_unmap = false;
 
                /* fast path: avoid using the working buffer */
                if (in_page_bytes_left >= in_len) {
                        buf = data_in + in_offset;
                        bytes = in_len;
+                       may_late_unmap = true;
                        goto cont;
                }
 
@@ -329,14 +332,17 @@ cont:
                                if (working_bytes == 0 && tot_in >= tot_len)
                                        break;
 
-                               kunmap(pages_in[page_in_index]);
-                               page_in_index++;
-                               if (page_in_index >= total_pages_in) {
+                               if (page_in_index + 1 >= total_pages_in) {
                                        ret = -1;
-                                       data_in = NULL;
                                        goto done;
                                }
-                               data_in = kmap(pages_in[page_in_index]);
+
+                               if (may_late_unmap)
+                                       need_unmap = true;
+                               else
+                                       kunmap(pages_in[page_in_index]);
+
+                               data_in = kmap(pages_in[++page_in_index]);
 
                                in_page_bytes_left = PAGE_CACHE_SIZE;
                                in_offset = 0;
@@ -346,6 +352,8 @@ cont:
                out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
                ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
                                            &out_len);
+               if (need_unmap)
+                       kunmap(pages_in[page_in_index - 1]);
                if (ret != LZO_E_OK) {
                        printk(KERN_WARNING "btrfs decompress failed\n");
                        ret = -1;
@@ -363,8 +371,7 @@ cont:
                        break;
        }
 done:
-       if (data_in)
-               kunmap(pages_in[page_in_index]);
+       kunmap(pages_in[page_in_index]);
        return ret;
 }
 
index 0825e4ed9447a6a9bf8b825d211b2965c8299e88..31ade5802ae8a31f8ab48c5bdcbba0cebcddfe96 100644 (file)
@@ -3654,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
        u32 item_size;
        int ret;
        int err = 0;
+       int progress = 0;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3666,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
        }
 
        while (1) {
+               progress++;
                trans = btrfs_start_transaction(rc->extent_root, 0);
                BUG_ON(IS_ERR(trans));
-
+restart:
                if (update_backref_cache(trans, &rc->backref_cache)) {
                        btrfs_end_transaction(trans, rc->extent_root);
                        continue;
@@ -3781,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
                        }
                }
        }
+       if (trans && progress && err == -ENOSPC) {
+               ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
+                                             rc->block_group->flags);
+               if (ret == 0) {
+                       err = 0;
+                       progress = 0;
+                       goto restart;
+               }
+       }
 
        btrfs_release_path(rc->extent_root, path);
        clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
index a004008f7d28e4c924d3a706eb737ad5f0f62e50..d39a9895d93288a6315e838cfe05dc1e1a65e7f4 100644 (file)
@@ -155,7 +155,8 @@ enum {
        Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
        Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
        Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
-       Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err,
+       Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
+       Opt_enospc_debug, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -184,6 +185,7 @@ static match_table_t tokens = {
        {Opt_space_cache, "space_cache"},
        {Opt_clear_cache, "clear_cache"},
        {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
+       {Opt_enospc_debug, "enospc_debug"},
        {Opt_err, NULL},
 };
 
@@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                case Opt_user_subvol_rm_allowed:
                        btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
                        break;
+               case Opt_enospc_debug:
+                       btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
+                       break;
                case Opt_err:
                        printk(KERN_INFO "btrfs: unrecognized mount option "
                               "'%s'\n", p);
index af7dbca1527629417ed14a67aff93fc86fe4d8a9..dd13eb81ee4011df4d52103665d30465fe696306 100644 (file)
@@ -1338,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
 
        ret = btrfs_shrink_device(device, 0);
        if (ret)
-               goto error_brelse;
+               goto error_undo;
 
        ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
        if (ret)
-               goto error_brelse;
+               goto error_undo;
 
        device->in_fs_metadata = 0;
 
@@ -1416,6 +1416,13 @@ out:
        mutex_unlock(&root->fs_info->volume_mutex);
        mutex_unlock(&uuid_mutex);
        return ret;
+error_undo:
+       if (device->writeable) {
+               list_add(&device->dev_alloc_list,
+                        &root->fs_info->fs_devices->alloc_list);
+               root->fs_info->fs_devices->rw_devices++;
+       }
+       goto error_brelse;
 }
 
 /*
@@ -1633,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        device->dev_root = root->fs_info->dev_root;
        device->bdev = bdev;
        device->in_fs_metadata = 1;
-       device->mode = 0;
+       device->mode = FMODE_EXCL;
        set_blocksize(device->bdev, 4096);
 
        if (seeding_dev) {
index f0aef787a1026cb96587054d19bc01c6eaf2e03b..ebafa65a29b6580619c75256195547a5107cb86d 100644 (file)
@@ -60,7 +60,6 @@ int ceph_init_dentry(struct dentry *dentry)
        }
        di->dentry = dentry;
        di->lease_session = NULL;
-       di->parent_inode = igrab(dentry->d_parent->d_inode);
        dentry->d_fsdata = di;
        dentry->d_time = jiffies;
        ceph_dentry_lru_add(dentry);
@@ -410,7 +409,7 @@ more:
        spin_lock(&inode->i_lock);
        if (ci->i_release_count == fi->dir_release_count) {
                dout(" marking %p complete\n", inode);
-               ci->i_ceph_flags |= CEPH_I_COMPLETE;
+               /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
                ci->i_max_offset = filp->f_pos;
        }
        spin_unlock(&inode->i_lock);
@@ -497,6 +496,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
 
        /* .snap dir? */
        if (err == -ENOENT &&
+           ceph_snap(parent) == CEPH_NOSNAP &&
            strcmp(dentry->d_name.name,
                   fsc->mount_options->snapdir_name) == 0) {
                struct inode *inode = ceph_get_snapdir(parent);
@@ -993,7 +993,7 @@ static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
        struct inode *dir;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        dir = dentry->d_parent->d_inode;
@@ -1030,28 +1030,8 @@ out_touch:
 static void ceph_dentry_release(struct dentry *dentry)
 {
        struct ceph_dentry_info *di = ceph_dentry(dentry);
-       struct inode *parent_inode = NULL;
-       u64 snapid = CEPH_NOSNAP;
 
-       if (!IS_ROOT(dentry)) {
-               parent_inode = di->parent_inode;
-               if (parent_inode)
-                       snapid = ceph_snap(parent_inode);
-       }
-       dout("dentry_release %p parent %p\n", dentry, parent_inode);
-       if (parent_inode && snapid != CEPH_SNAPDIR) {
-               struct ceph_inode_info *ci = ceph_inode(parent_inode);
-
-               spin_lock(&parent_inode->i_lock);
-               if (ci->i_shared_gen == di->lease_shared_gen ||
-                   snapid <= CEPH_MAXSNAP) {
-                       dout(" clearing %p complete (d_release)\n",
-                            parent_inode);
-                       ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
-                       ci->i_release_count++;
-               }
-               spin_unlock(&parent_inode->i_lock);
-       }
+       dout("dentry_release %p\n", dentry);
        if (di) {
                ceph_dentry_lru_del(dentry);
                if (di->lease_session)
@@ -1059,8 +1039,6 @@ static void ceph_dentry_release(struct dentry *dentry)
                kmem_cache_free(ceph_dentry_cachep, di);
                dentry->d_fsdata = NULL;
        }
-       if (parent_inode)
-               iput(parent_inode);
 }
 
 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
index 5625463aa4796f3df3678dd29ed1bfb1135a32af..193bfa5e9cbd7943bcb2a8124b5fa9ed6717cac4 100644 (file)
@@ -707,7 +707,7 @@ static int fill_inode(struct inode *inode,
                    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
                    (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
                        dout(" marking %p complete (empty)\n", inode);
-                       ci->i_ceph_flags |= CEPH_I_COMPLETE;
+                       /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
                        ci->i_max_offset = 2;
                }
                break;
index 88fcaa21b801a8c10ba2be23c41967b95ddfcef6..20b907d76ae2f1be3b9e5ef4859bb1866b833c8a 100644 (file)
@@ -207,7 +207,6 @@ struct ceph_dentry_info {
        struct dentry *dentry;
        u64 time;
        u64 offset;
-       struct inode *parent_inode;
 };
 
 struct ceph_inode_xattrs_info {
index f6fd0a00e6cc4737c23cd7b80c107401c422b993..c6d31a3bab8863af2c5403e487f1cfc23bf63310 100644 (file)
@@ -262,35 +262,19 @@ static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs *
  */
 asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_statfs __user *buf)
 {
-       struct path path;
-       int error;
-
-       error = user_path(pathname, &path);
-       if (!error) {
-               struct kstatfs tmp;
-               error = vfs_statfs(&path, &tmp);
-               if (!error)
-                       error = put_compat_statfs(buf, &tmp);
-               path_put(&path);
-       }
+       struct kstatfs tmp;
+       int error = user_statfs(pathname, &tmp);
+       if (!error)
+               error = put_compat_statfs(buf, &tmp);
        return error;
 }
 
 asmlinkage long compat_sys_fstatfs(unsigned int fd, struct compat_statfs __user *buf)
 {
-       struct file * file;
        struct kstatfs tmp;
-       int error;
-
-       error = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-       error = vfs_statfs(&file->f_path, &tmp);
+       int error = fd_statfs(fd, &tmp);
        if (!error)
                error = put_compat_statfs(buf, &tmp);
-       fput(file);
-out:
        return error;
 }
 
@@ -329,41 +313,29 @@ static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstat
 
 asmlinkage long compat_sys_statfs64(const char __user *pathname, compat_size_t sz, struct compat_statfs64 __user *buf)
 {
-       struct path path;
+       struct kstatfs tmp;
        int error;
 
        if (sz != sizeof(*buf))
                return -EINVAL;
 
-       error = user_path(pathname, &path);
-       if (!error) {
-               struct kstatfs tmp;
-               error = vfs_statfs(&path, &tmp);
-               if (!error)
-                       error = put_compat_statfs64(buf, &tmp);
-               path_put(&path);
-       }
+       error = user_statfs(pathname, &tmp);
+       if (!error)
+               error = put_compat_statfs64(buf, &tmp);
        return error;
 }
 
 asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user *buf)
 {
-       struct file * file;
        struct kstatfs tmp;
        int error;
 
        if (sz != sizeof(*buf))
                return -EINVAL;
 
-       error = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-       error = vfs_statfs(&file->f_path, &tmp);
+       error = fd_statfs(fd, &tmp);
        if (!error)
                error = put_compat_statfs64(buf, &tmp);
-       fput(file);
-out:
        return error;
 }
 
@@ -1228,7 +1200,9 @@ compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec,
        file = fget_light(fd, &fput_needed);
        if (!file)
                return -EBADF;
-       ret = compat_readv(file, vec, vlen, &pos);
+       ret = -ESPIPE;
+       if (file->f_mode & FMODE_PREAD)
+               ret = compat_readv(file, vec, vlen, &pos);
        fput_light(file, fput_needed);
        return ret;
 }
@@ -1285,7 +1259,9 @@ compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec,
        file = fget_light(fd, &fput_needed);
        if (!file)
                return -EBADF;
-       ret = compat_writev(file, vec, vlen, &pos);
+       ret = -ESPIPE;
+       if (file->f_mode & FMODE_PWRITE)
+               ret = compat_writev(file, vec, vlen, &pos);
        fput_light(file, fput_needed);
        return ret;
 }
@@ -2308,3 +2284,16 @@ asmlinkage long compat_sys_timerfd_gettime(int ufd,
 }
 
 #endif /* CONFIG_TIMERFD */
+
+#ifdef CONFIG_FHANDLE
+/*
+ * Exactly like fs/open.c:sys_open_by_handle_at(), except that it
+ * doesn't set the O_LARGEFILE flag.
+ */
+asmlinkage long
+compat_sys_open_by_handle_at(int mountdirfd,
+                            struct file_handle __user *handle, int flags)
+{
+       return do_handle_open(mountdirfd, handle, flags);
+}
+#endif
index 2a6bd9a4ae975fdc3eedf9723b551fc9e5f65836..a39fe47c466f794cb4f794ccd2863f2e165fc03e 100644 (file)
@@ -296,8 +296,12 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
        __releases(parent->d_lock)
        __releases(dentry->d_inode->i_lock)
 {
-       dentry->d_parent = NULL;
        list_del(&dentry->d_u.d_child);
+       /*
+        * Inform try_to_ascend() that we are no longer attached to the
+        * dentry tree
+        */
+       dentry->d_flags |= DCACHE_DISCONNECTED;
        if (parent)
                spin_unlock(&parent->d_lock);
        dentry_iput(dentry);
@@ -1011,6 +1015,35 @@ void shrink_dcache_for_umount(struct super_block *sb)
        }
 }
 
+/*
+ * This tries to ascend one level of parenthood, but
+ * we can race with renaming, so we need to re-check
+ * the parenthood after dropping the lock and check
+ * that the sequence number still matches.
+ */
+static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
+{
+       struct dentry *new = old->d_parent;
+
+       rcu_read_lock();
+       spin_unlock(&old->d_lock);
+       spin_lock(&new->d_lock);
+
+       /*
+        * might go back up the wrong parent if we have had a rename
+        * or deletion
+        */
+       if (new != old->d_parent ||
+                (old->d_flags & DCACHE_DISCONNECTED) ||
+                (!locked && read_seqretry(&rename_lock, seq))) {
+               spin_unlock(&new->d_lock);
+               new = NULL;
+       }
+       rcu_read_unlock();
+       return new;
+}
+
+
 /*
  * Search for at least 1 mount point in the dentry's subdirs.
  * We descend to the next level whenever the d_subdirs
@@ -1066,24 +1099,10 @@ resume:
         * All done at this level ... ascend and resume the search.
         */
        if (this_parent != parent) {
-               struct dentry *tmp;
-               struct dentry *child;
-
-               tmp = this_parent->d_parent;
-               rcu_read_lock();
-               spin_unlock(&this_parent->d_lock);
-               child = this_parent;
-               this_parent = tmp;
-               spin_lock(&this_parent->d_lock);
-               /* might go back up the wrong parent if we have had a rename
-                * or deletion */
-               if (this_parent != child->d_parent ||
-                        (!locked && read_seqretry(&rename_lock, seq))) {
-                       spin_unlock(&this_parent->d_lock);
-                       rcu_read_unlock();
+               struct dentry *child = this_parent;
+               this_parent = try_to_ascend(this_parent, locked, seq);
+               if (!this_parent)
                        goto rename_retry;
-               }
-               rcu_read_unlock();
                next = child->d_u.d_child.next;
                goto resume;
        }
@@ -1181,24 +1200,10 @@ resume:
         * All done at this level ... ascend and resume the search.
         */
        if (this_parent != parent) {
-               struct dentry *tmp;
-               struct dentry *child;
-
-               tmp = this_parent->d_parent;
-               rcu_read_lock();
-               spin_unlock(&this_parent->d_lock);
-               child = this_parent;
-               this_parent = tmp;
-               spin_lock(&this_parent->d_lock);
-               /* might go back up the wrong parent if we have had a rename
-                * or deletion */
-               if (this_parent != child->d_parent ||
-                       (!locked && read_seqretry(&rename_lock, seq))) {
-                       spin_unlock(&this_parent->d_lock);
-                       rcu_read_unlock();
+               struct dentry *child = this_parent;
+               this_parent = try_to_ascend(this_parent, locked, seq);
+               if (!this_parent)
                        goto rename_retry;
-               }
-               rcu_read_unlock();
                next = child->d_u.d_child.next;
                goto resume;
        }
@@ -1523,6 +1528,28 @@ struct dentry * d_alloc_root(struct inode * root_inode)
 }
 EXPORT_SYMBOL(d_alloc_root);
 
+static struct dentry * __d_find_any_alias(struct inode *inode)
+{
+       struct dentry *alias;
+
+       if (list_empty(&inode->i_dentry))
+               return NULL;
+       alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
+       __dget(alias);
+       return alias;
+}
+
+static struct dentry * d_find_any_alias(struct inode *inode)
+{
+       struct dentry *de;
+
+       spin_lock(&inode->i_lock);
+       de = __d_find_any_alias(inode);
+       spin_unlock(&inode->i_lock);
+       return de;
+}
+
+
 /**
  * d_obtain_alias - find or allocate a dentry for a given inode
  * @inode: inode to allocate the dentry for
@@ -1552,7 +1579,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
        if (IS_ERR(inode))
                return ERR_CAST(inode);
 
-       res = d_find_alias(inode);
+       res = d_find_any_alias(inode);
        if (res)
                goto out_iput;
 
@@ -1565,7 +1592,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
 
 
        spin_lock(&inode->i_lock);
-       res = __d_find_alias(inode, 0);
+       res = __d_find_any_alias(inode);
        if (res) {
                spin_unlock(&inode->i_lock);
                dput(tmp);
@@ -2920,28 +2947,14 @@ resume:
                spin_unlock(&dentry->d_lock);
        }
        if (this_parent != root) {
-               struct dentry *tmp;
-               struct dentry *child;
-
-               tmp = this_parent->d_parent;
+               struct dentry *child = this_parent;
                if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
                        this_parent->d_flags |= DCACHE_GENOCIDE;
                        this_parent->d_count--;
                }
-               rcu_read_lock();
-               spin_unlock(&this_parent->d_lock);
-               child = this_parent;
-               this_parent = tmp;
-               spin_lock(&this_parent->d_lock);
-               /* might go back up the wrong parent if we have had a rename
-                * or deletion */
-               if (this_parent != child->d_parent ||
-                        (!locked && read_seqretry(&rename_lock, seq))) {
-                       spin_unlock(&this_parent->d_lock);
-                       rcu_read_unlock();
+               this_parent = try_to_ascend(this_parent, locked, seq);
+               if (!this_parent)
                        goto rename_retry;
-               }
-               rcu_read_unlock();
                next = child->d_u.d_child.next;
                goto resume;
        }
index 267d0ada45414a6f9cd8e90efeb9714e52e65de9..4a09af9e9a6387869bf398dbda88723b97d319cf 100644 (file)
  * cleanup path and it is also acquired by eventpoll_release_file()
  * if a file has been pushed inside an epoll set and it is then
  * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
+ * It is also acquired when inserting an epoll fd onto another epoll
+ * fd. We do this so that we walk the epoll tree and ensure that this
+ * insertion does not create a cycle of epoll file descriptors, which
+ * could lead to deadlock. We need a global mutex to prevent two
+ * simultaneous inserts (A into B and B into A) from racing and
+ * constructing a cycle without either insert observing that it is
+ * going to.
  * It is possible to drop the "ep->mtx" and to use the global
  * mutex "epmutex" (together with "ep->lock") to have it working,
  * but having "ep->mtx" will make the interface more scalable.
@@ -224,6 +231,9 @@ static long max_user_watches __read_mostly;
  */
 static DEFINE_MUTEX(epmutex);
 
+/* Used to check for epoll file descriptor inclusion loops */
+static struct nested_calls poll_loop_ncalls;
+
 /* Used for safe wake up implementation */
 static struct nested_calls poll_safewake_ncalls;
 
@@ -1198,6 +1208,62 @@ retry:
        return res;
 }
 
+/**
+ * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
+ *                      API, to verify that adding an epoll file inside another
+ *                      epoll structure, does not violate the constraints, in
+ *                      terms of closed loops, or too deep chains (which can
+ *                      result in excessive stack usage).
+ *
+ * @priv: Pointer to the epoll file to be currently checked.
+ * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
+ *          data structure pointer.
+ * @call_nests: Current dept of the @ep_call_nested() call stack.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ *          structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
+{
+       int error = 0;
+       struct file *file = priv;
+       struct eventpoll *ep = file->private_data;
+       struct rb_node *rbp;
+       struct epitem *epi;
+
+       mutex_lock(&ep->mtx);
+       for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+               epi = rb_entry(rbp, struct epitem, rbn);
+               if (unlikely(is_file_epoll(epi->ffd.file))) {
+                       error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+                                              ep_loop_check_proc, epi->ffd.file,
+                                              epi->ffd.file->private_data, current);
+                       if (error != 0)
+                               break;
+               }
+       }
+       mutex_unlock(&ep->mtx);
+
+       return error;
+}
+
+/**
+ * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
+ *                 another epoll file (represented by @ep) does not create
+ *                 closed loops or too deep chains.
+ *
+ * @ep: Pointer to the epoll private data structure.
+ * @file: Pointer to the epoll file to be checked.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ *          structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check(struct eventpoll *ep, struct file *file)
+{
+       return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+                             ep_loop_check_proc, file, ep, current);
+}
+
 /*
  * Open an eventpoll file descriptor.
  */
@@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                struct epoll_event __user *, event)
 {
        int error;
+       int did_lock_epmutex = 0;
        struct file *file, *tfile;
        struct eventpoll *ep;
        struct epitem *epi;
@@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
         */
        ep = file->private_data;
 
+       /*
+        * When we insert an epoll file descriptor, inside another epoll file
+        * descriptor, there is the change of creating closed loops, which are
+        * better be handled here, than in more critical paths.
+        *
+        * We hold epmutex across the loop check and the insert in this case, in
+        * order to prevent two separate inserts from racing and each doing the
+        * insert "at the same time" such that ep_loop_check passes on both
+        * before either one does the insert, thereby creating a cycle.
+        */
+       if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
+               mutex_lock(&epmutex);
+               did_lock_epmutex = 1;
+               error = -ELOOP;
+               if (ep_loop_check(ep, tfile) != 0)
+                       goto error_tgt_fput;
+       }
+
+
        mutex_lock(&ep->mtx);
 
        /*
@@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
        mutex_unlock(&ep->mtx);
 
 error_tgt_fput:
+       if (unlikely(did_lock_epmutex))
+               mutex_unlock(&epmutex);
+
        fput(tfile);
 error_fput:
        fput(file);
@@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void)
                EP_ITEM_COST;
        BUG_ON(max_user_watches < 0);
 
+       /*
+        * Initialize the structure used to perform epoll file descriptor
+        * inclusion loops checks.
+        */
+       ep_nested_calls_init(&poll_loop_ncalls);
+
        /* Initialize the structure used to perform safe poll wait head wake ups */
        ep_nested_calls_init(&poll_safewake_ncalls);
 
index 52a447d9b6abd3495b4c79ec19259455c652f786..ba99e1abb1aa3427aca0e95f116e4deaa85ff793 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -115,13 +115,16 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
        struct file *file;
        char *tmp = getname(library);
        int error = PTR_ERR(tmp);
+       static const struct open_flags uselib_flags = {
+               .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
+               .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
+               .intent = LOOKUP_OPEN
+       };
 
        if (IS_ERR(tmp))
                goto out;
 
-       file = do_filp_open(AT_FDCWD, tmp,
-                               O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
-                               MAY_READ | MAY_EXEC | MAY_OPEN);
+       file = do_filp_open(AT_FDCWD, tmp, &uselib_flags, LOOKUP_FOLLOW);
        putname(tmp);
        error = PTR_ERR(file);
        if (IS_ERR(file))
@@ -721,10 +724,13 @@ struct file *open_exec(const char *name)
 {
        struct file *file;
        int err;
+       static const struct open_flags open_exec_flags = {
+               .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
+               .acc_mode = MAY_EXEC | MAY_OPEN,
+               .intent = LOOKUP_OPEN
+       };
 
-       file = do_filp_open(AT_FDCWD, name,
-                               O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
-                               MAY_EXEC | MAY_OPEN);
+       file = do_filp_open(AT_FDCWD, name, &open_exec_flags, LOOKUP_FOLLOW);
        if (IS_ERR(file))
                goto out;
 
index 264e95d02830f28d992eefb717460ccd0be09d4d..4d70db110cfc4d8585bf77f1f02061f66fab4127 100644 (file)
@@ -272,7 +272,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_de = exofs_find_entry(new_dir, new_dentry, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                err = exofs_set_link(new_dir, new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME;
                if (dir_de)
@@ -286,12 +285,9 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= EXOFS_LINK_MAX)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = exofs_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
@@ -299,7 +295,7 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
        old_inode->i_ctime = CURRENT_TIME;
 
        exofs_delete_entry(old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
index 4b6825740dd5e6cfcf38918d3c8e477a677e94a4..b05acb7961355dfb680e49f3145a11065f6ac851 100644 (file)
@@ -320,9 +320,14 @@ static int export_encode_fh(struct dentry *dentry, struct fid *fid,
        struct inode * inode = dentry->d_inode;
        int len = *max_len;
        int type = FILEID_INO32_GEN;
-       
-       if (len < 2 || (connectable && len < 4))
+
+       if (connectable && (len < 4)) {
+               *max_len = 4;
+               return 255;
+       } else if (len < 2) {
+               *max_len = 2;
                return 255;
+       }
 
        len = 2;
        fid->i32.ino = inode->i_ino;
@@ -369,6 +374,8 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
        /*
         * Try to get any dentry for the given file handle from the filesystem.
         */
+       if (!nop || !nop->fh_to_dentry)
+               return ERR_PTR(-ESTALE);
        result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
        if (!result)
                result = ERR_PTR(-ESTALE);
index 2e1d8341d827b173b44b4f034106a7db7f2be8da..adb91855ccd092c86f220161c922873bee7fce0c 100644 (file)
@@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
                new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
                        if (new_dir->i_nlink >= EXT2_LINK_MAX)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = ext2_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
@@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
        /*
         * Like most other Unix systems, set the ctime for inodes on a
         * rename.
-        * inode_dec_link_count() will mark the inode dirty.
         */
        old_inode->i_ctime = CURRENT_TIME_SEC;
+       mark_inode_dirty(old_inode);
 
        ext2_delete_entry (old_de, old_page);
-       inode_dec_link_count(old_inode);
 
        if (dir_de) {
                if (old_dir != new_dir)
index b27ba71810ecdb57efbf7739559eb7bb76b1126e..561f692562663a4c42e93e223e67a5eb9a7914ee 100644 (file)
@@ -2253,13 +2253,6 @@ static int ext3_link (struct dentry * old_dentry,
 
        dquot_initialize(dir);
 
-       /*
-        * Return -ENOENT if we've raced with unlink and i_nlink is 0.  Doing
-        * otherwise has the potential to corrupt the orphan inode list.
-        */
-       if (inode->i_nlink == 0)
-               return -ENOENT;
-
 retry:
        handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
                                        EXT3_INDEX_EXTRA_TRANS_BLOCKS);
index 85c8cc8f24732c59c52943b6b8810780c2e94428..9cc19a1dea8ecb4276715bc920f02956290f74f7 100644 (file)
@@ -1936,6 +1936,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
        sb->s_qcop = &ext3_qctl_operations;
        sb->dq_op = &ext3_quota_operations;
 #endif
+       memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
        INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
        mutex_init(&sbi->s_orphan_lock);
        mutex_init(&sbi->s_resize_lock);
index 5485390d32c56e1b9b4ed7bd3802213827f8894c..e781b7ea56305dfde5c7458c78294a5dcc6d9361 100644 (file)
@@ -2304,13 +2304,6 @@ static int ext4_link(struct dentry *old_dentry,
 
        dquot_initialize(dir);
 
-       /*
-        * Return -ENOENT if we've raced with unlink and i_nlink is 0.  Doing
-        * otherwise has the potential to corrupt the orphan inode list.
-        */
-       if (inode->i_nlink == 0)
-               return -ENOENT;
-
 retry:
        handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
                                        EXT4_INDEX_EXTRA_TRANS_BLOCKS);
index f6a318f836b2cd7d5c2e70af3ae1bde6b6d301ce..5977b356a43531e60adcc54cf46dff972e379899 100644 (file)
@@ -3415,6 +3415,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_qcop = &ext4_qctl_operations;
        sb->dq_op = &ext4_quota_operations;
 #endif
+       memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
+
        INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
        mutex_init(&sbi->s_orphan_lock);
        mutex_init(&sbi->s_resize_lock);
index 86753fe10bd1d7a47d551dba030ee45013e5fbbe..0e277ec4b6120663795086b3a0cc80c4d1fcc813 100644 (file)
@@ -757,8 +757,10 @@ fat_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable)
        struct inode *inode =  de->d_inode;
        u32 ipos_h, ipos_m, ipos_l;
 
-       if (len < 5)
+       if (len < 5) {
+               *lenp = 5;
                return 255; /* no room */
+       }
 
        ipos_h = MSDOS_I(inode)->i_pos >> 8;
        ipos_m = (MSDOS_I(inode)->i_pos & 0xf0) << 24;
index f88f752babd9c4e2e1054fde3dd7306ec6e457fe..adae3fb7451aa52670d07fdf34381defe133e73a 100644 (file)
@@ -43,7 +43,7 @@ static int vfat_revalidate_shortname(struct dentry *dentry)
 
 static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        /* This is not negative dentry. Always valid. */
@@ -54,7 +54,7 @@ static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
 
 static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
 {
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        /*
index cb1026181bdcc29683edfaf3236f49260c0399a7..6c82e5bac03932bf11e154b9c7c9e86b774e93cd 100644 (file)
@@ -131,7 +131,7 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
 SYSCALL_DEFINE1(dup, unsigned int, fildes)
 {
        int ret = -EBADF;
-       struct file *file = fget(fildes);
+       struct file *file = fget_raw(fildes);
 
        if (file) {
                ret = get_unused_fd();
@@ -426,15 +426,35 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
        return err;
 }
 
+static int check_fcntl_cmd(unsigned cmd)
+{
+       switch (cmd) {
+       case F_DUPFD:
+       case F_DUPFD_CLOEXEC:
+       case F_GETFD:
+       case F_SETFD:
+       case F_GETFL:
+               return 1;
+       }
+       return 0;
+}
+
 SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 {      
        struct file *filp;
        long err = -EBADF;
 
-       filp = fget(fd);
+       filp = fget_raw(fd);
        if (!filp)
                goto out;
 
+       if (unlikely(filp->f_mode & FMODE_PATH)) {
+               if (!check_fcntl_cmd(cmd)) {
+                       fput(filp);
+                       goto out;
+               }
+       }
+
        err = security_file_fcntl(filp, cmd, arg);
        if (err) {
                fput(filp);
@@ -456,10 +476,17 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
        long err;
 
        err = -EBADF;
-       filp = fget(fd);
+       filp = fget_raw(fd);
        if (!filp)
                goto out;
 
+       if (unlikely(filp->f_mode & FMODE_PATH)) {
+               if (!check_fcntl_cmd(cmd)) {
+                       fput(filp);
+                       goto out;
+               }
+       }
+
        err = security_file_fcntl(filp, cmd, arg);
        if (err) {
                fput(filp);
@@ -808,14 +835,14 @@ static int __init fcntl_init(void)
         * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
         * is defined as O_NONBLOCK on some platforms and not on others.
         */
-       BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+       BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
                O_RDONLY        | O_WRONLY      | O_RDWR        |
                O_CREAT         | O_EXCL        | O_NOCTTY      |
                O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
-               __FMODE_EXEC
+               __FMODE_EXEC    | O_PATH
                ));
 
        fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/fhandle.c b/fs/fhandle.c
new file mode 100644 (file)
index 0000000..bf93ad2
--- /dev/null
@@ -0,0 +1,265 @@
+#include <linux/syscalls.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/exportfs.h>
+#include <linux/fs_struct.h>
+#include <linux/fsnotify.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+
+static long do_sys_name_to_handle(struct path *path,
+                                 struct file_handle __user *ufh,
+                                 int __user *mnt_id)
+{
+       long retval;
+       struct file_handle f_handle;
+       int handle_dwords, handle_bytes;
+       struct file_handle *handle = NULL;
+
+       /*
+        * We need t make sure wether the file system
+        * support decoding of the file handle
+        */
+       if (!path->mnt->mnt_sb->s_export_op ||
+           !path->mnt->mnt_sb->s_export_op->fh_to_dentry)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle)))
+               return -EFAULT;
+
+       if (f_handle.handle_bytes > MAX_HANDLE_SZ)
+               return -EINVAL;
+
+       handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
+                        GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       /* convert handle size to  multiple of sizeof(u32) */
+       handle_dwords = f_handle.handle_bytes >> 2;
+
+       /* we ask for a non connected handle */
+       retval = exportfs_encode_fh(path->dentry,
+                                   (struct fid *)handle->f_handle,
+                                   &handle_dwords,  0);
+       handle->handle_type = retval;
+       /* convert handle size to bytes */
+       handle_bytes = handle_dwords * sizeof(u32);
+       handle->handle_bytes = handle_bytes;
+       if ((handle->handle_bytes > f_handle.handle_bytes) ||
+           (retval == 255) || (retval == -ENOSPC)) {
+               /* As per old exportfs_encode_fh documentation
+                * we could return ENOSPC to indicate overflow
+                * But file system returned 255 always. So handle
+                * both the values
+                */
+               /*
+                * set the handle size to zero so we copy only
+                * non variable part of the file_handle
+                */
+               handle_bytes = 0;
+               retval = -EOVERFLOW;
+       } else
+               retval = 0;
+       /* copy the mount id */
+       if (copy_to_user(mnt_id, &path->mnt->mnt_id, sizeof(*mnt_id)) ||
+           copy_to_user(ufh, handle,
+                        sizeof(struct file_handle) + handle_bytes))
+               retval = -EFAULT;
+       kfree(handle);
+       return retval;
+}
+
+/**
+ * sys_name_to_handle_at: convert name to handle
+ * @dfd: directory relative to which name is interpreted if not absolute
+ * @name: name that should be converted to handle.
+ * @handle: resulting file handle
+ * @mnt_id: mount id of the file system containing the file
+ * @flag: flag value to indicate whether to follow symlink or not
+ *
+ * @handle->handle_size indicate the space available to store the
+ * variable part of the file handle in bytes. If there is not
+ * enough space, the field is updated to return the minimum
+ * value required.
+ */
+SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name,
+               struct file_handle __user *, handle, int __user *, mnt_id,
+               int, flag)
+{
+       struct path path;
+       int lookup_flags;
+       int err;
+
+       if ((flag & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
+               return -EINVAL;
+
+       lookup_flags = (flag & AT_SYMLINK_FOLLOW) ? LOOKUP_FOLLOW : 0;
+       if (flag & AT_EMPTY_PATH)
+               lookup_flags |= LOOKUP_EMPTY;
+       err = user_path_at(dfd, name, lookup_flags, &path);
+       if (!err) {
+               err = do_sys_name_to_handle(&path, handle, mnt_id);
+               path_put(&path);
+       }
+       return err;
+}
+
+static struct vfsmount *get_vfsmount_from_fd(int fd)
+{
+       struct path path;
+
+       if (fd == AT_FDCWD) {
+               struct fs_struct *fs = current->fs;
+               spin_lock(&fs->lock);
+               path = fs->pwd;
+               mntget(path.mnt);
+               spin_unlock(&fs->lock);
+       } else {
+               int fput_needed;
+               struct file *file = fget_light(fd, &fput_needed);
+               if (!file)
+                       return ERR_PTR(-EBADF);
+               path = file->f_path;
+               mntget(path.mnt);
+               fput_light(file, fput_needed);
+       }
+       return path.mnt;
+}
+
+static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
+{
+       return 1;
+}
+
+static int do_handle_to_path(int mountdirfd, struct file_handle *handle,
+                            struct path *path)
+{
+       int retval = 0;
+       int handle_dwords;
+
+       path->mnt = get_vfsmount_from_fd(mountdirfd);
+       if (IS_ERR(path->mnt)) {
+               retval = PTR_ERR(path->mnt);
+               goto out_err;
+       }
+       /* change the handle size to multiple of sizeof(u32) */
+       handle_dwords = handle->handle_bytes >> 2;
+       path->dentry = exportfs_decode_fh(path->mnt,
+                                         (struct fid *)handle->f_handle,
+                                         handle_dwords, handle->handle_type,
+                                         vfs_dentry_acceptable, NULL);
+       if (IS_ERR(path->dentry)) {
+               retval = PTR_ERR(path->dentry);
+               goto out_mnt;
+       }
+       return 0;
+out_mnt:
+       mntput(path->mnt);
+out_err:
+       return retval;
+}
+
+static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
+                  struct path *path)
+{
+       int retval = 0;
+       struct file_handle f_handle;
+       struct file_handle *handle = NULL;
+
+       /*
+        * With handle we don't look at the execute bit on the
+        * the directory. Ideally we would like CAP_DAC_SEARCH.
+        * But we don't have that
+        */
+       if (!capable(CAP_DAC_READ_SEARCH)) {
+               retval = -EPERM;
+               goto out_err;
+       }
+       if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) {
+               retval = -EFAULT;
+               goto out_err;
+       }
+       if ((f_handle.handle_bytes > MAX_HANDLE_SZ) ||
+           (f_handle.handle_bytes == 0)) {
+               retval = -EINVAL;
+               goto out_err;
+       }
+       handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
+                        GFP_KERNEL);
+       if (!handle) {
+               retval = -ENOMEM;
+               goto out_err;
+       }
+       /* copy the full handle */
+       if (copy_from_user(handle, ufh,
+                          sizeof(struct file_handle) +
+                          f_handle.handle_bytes)) {
+               retval = -EFAULT;
+               goto out_handle;
+       }
+
+       retval = do_handle_to_path(mountdirfd, handle, path);
+
+out_handle:
+       kfree(handle);
+out_err:
+       return retval;
+}
+
+long do_handle_open(int mountdirfd,
+                   struct file_handle __user *ufh, int open_flag)
+{
+       long retval = 0;
+       struct path path;
+       struct file *file;
+       int fd;
+
+       retval = handle_to_path(mountdirfd, ufh, &path);
+       if (retval)
+               return retval;
+
+       fd = get_unused_fd_flags(open_flag);
+       if (fd < 0) {
+               path_put(&path);
+               return fd;
+       }
+       file = file_open_root(path.dentry, path.mnt, "", open_flag);
+       if (IS_ERR(file)) {
+               put_unused_fd(fd);
+               retval =  PTR_ERR(file);
+       } else {
+               retval = fd;
+               fsnotify_open(file);
+               fd_install(fd, file);
+       }
+       path_put(&path);
+       return retval;
+}
+
+/**
+ * sys_open_by_handle_at: Open the file handle
+ * @mountdirfd: directory file descriptor
+ * @handle: file handle to be opened
+ * @flag: open flags.
+ *
+ * @mountdirfd indicate the directory file descriptor
+ * of the mount point. file handle is decoded relative
+ * to the vfsmount pointed by the @mountdirfd. @flags
+ * value is same as the open(2) flags.
+ */
+SYSCALL_DEFINE3(open_by_handle_at, int, mountdirfd,
+               struct file_handle __user *, handle,
+               int, flags)
+{
+       long ret;
+
+       if (force_o_largefile())
+               flags |= O_LARGEFILE;
+
+       ret = do_handle_open(mountdirfd, handle, flags);
+       return ret;
+}
index eb36b6b17e26b0add06f7182bbaf2eeb4648bac8..74a9544ac770a9870e9efb4d489fbe41394934e7 100644 (file)
@@ -276,11 +276,10 @@ struct file *fget(unsigned int fd)
        rcu_read_lock();
        file = fcheck_files(files, fd);
        if (file) {
-               if (!atomic_long_inc_not_zero(&file->f_count)) {
-                       /* File object ref couldn't be taken */
-                       rcu_read_unlock();
-                       return NULL;
-               }
+               /* File object ref couldn't be taken */
+               if (file->f_mode & FMODE_PATH ||
+                   !atomic_long_inc_not_zero(&file->f_count))
+                       file = NULL;
        }
        rcu_read_unlock();
 
@@ -289,6 +288,25 @@ struct file *fget(unsigned int fd)
 
 EXPORT_SYMBOL(fget);
 
+struct file *fget_raw(unsigned int fd)
+{
+       struct file *file;
+       struct files_struct *files = current->files;
+
+       rcu_read_lock();
+       file = fcheck_files(files, fd);
+       if (file) {
+               /* File object ref couldn't be taken */
+               if (!atomic_long_inc_not_zero(&file->f_count))
+                       file = NULL;
+       }
+       rcu_read_unlock();
+
+       return file;
+}
+
+EXPORT_SYMBOL(fget_raw);
+
 /*
  * Lightweight file lookup - no refcnt increment if fd table isn't shared.
  *
@@ -310,6 +328,33 @@ struct file *fget_light(unsigned int fd, int *fput_needed)
        struct file *file;
        struct files_struct *files = current->files;
 
+       *fput_needed = 0;
+       if (atomic_read(&files->count) == 1) {
+               file = fcheck_files(files, fd);
+               if (file && (file->f_mode & FMODE_PATH))
+                       file = NULL;
+       } else {
+               rcu_read_lock();
+               file = fcheck_files(files, fd);
+               if (file) {
+                       if (!(file->f_mode & FMODE_PATH) &&
+                           atomic_long_inc_not_zero(&file->f_count))
+                               *fput_needed = 1;
+                       else
+                               /* Didn't get the reference, someone's freed */
+                               file = NULL;
+               }
+               rcu_read_unlock();
+       }
+
+       return file;
+}
+
+struct file *fget_raw_light(unsigned int fd, int *fput_needed)
+{
+       struct file *file;
+       struct files_struct *files = current->files;
+
        *fput_needed = 0;
        if (atomic_read(&files->count) == 1) {
                file = fcheck_files(files, fd);
index bfed8447ed8090d8e233ef44f6d8eb13a440de10..8bd0ef9286c376cf980e83b2513764d8bb683eef 100644 (file)
@@ -158,7 +158,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
 {
        struct inode *inode;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        inode = entry->d_inode;
@@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
        if (err)
                return err;
 
-       if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc)
-               return 0;
+       if (attr->ia_valid & ATTR_OPEN) {
+               if (fc->atomic_o_trunc)
+                       return 0;
+               file = NULL;
+       }
 
        if (attr->ia_valid & ATTR_SIZE)
                is_truncate = true;
index 95da1bc1c8267a78efce954c56e7f48eb1338d42..9e0832dbb1e3dfb8e0872273fc53800f60a9251d 100644 (file)
@@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff)
        return ff;
 }
 
+static void fuse_release_async(struct work_struct *work)
+{
+       struct fuse_req *req;
+       struct fuse_conn *fc;
+       struct path path;
+
+       req = container_of(work, struct fuse_req, misc.release.work);
+       path = req->misc.release.path;
+       fc = get_fuse_conn(path.dentry->d_inode);
+
+       fuse_put_request(fc, req);
+       path_put(&path);
+}
+
 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
 {
-       path_put(&req->misc.release.path);
+       if (fc->destroy_req) {
+               /*
+                * If this is a fuseblk mount, then it's possible that
+                * releasing the path will result in releasing the
+                * super block and sending the DESTROY request.  If
+                * the server is single threaded, this would hang.
+                * For this reason do the path_put() in a separate
+                * thread.
+                */
+               atomic_inc(&req->count);
+               INIT_WORK(&req->misc.release.work, fuse_release_async);
+               schedule_work(&req->misc.release.work);
+       } else {
+               path_put(&req->misc.release.path);
+       }
 }
 
-static void fuse_file_put(struct fuse_file *ff)
+static void fuse_file_put(struct fuse_file *ff, bool sync)
 {
        if (atomic_dec_and_test(&ff->count)) {
                struct fuse_req *req = ff->reserved_req;
 
-               req->end = fuse_release_end;
-               fuse_request_send_background(ff->fc, req);
+               if (sync) {
+                       fuse_request_send(ff->fc, req);
+                       path_put(&req->misc.release.path);
+                       fuse_put_request(ff->fc, req);
+               } else {
+                       req->end = fuse_release_end;
+                       fuse_request_send_background(ff->fc, req);
+               }
                kfree(ff);
        }
 }
@@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode)
         * Normally this will send the RELEASE request, however if
         * some asynchronous READ or WRITE requests are outstanding,
         * the sending will be delayed.
+        *
+        * Make the release synchronous if this is a fuseblk mount,
+        * synchronous RELEASE is allowed (and desirable) in this case
+        * because the server can be trusted not to screw up.
         */
-       fuse_file_put(ff);
+       fuse_file_put(ff, ff->fc->destroy_req != NULL);
 }
 
 static int fuse_open(struct inode *inode, struct file *file)
@@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
                page_cache_release(page);
        }
        if (req->ff)
-               fuse_file_put(req->ff);
+               fuse_file_put(req->ff, false);
 }
 
 static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
 {
        __free_page(req->pages[0]);
-       fuse_file_put(req->ff);
+       fuse_file_put(req->ff, false);
 }
 
 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
index ae5744a2f9e99a531551eb3704395d64ae08aee4..d4286947bc2cf57a6aba78109d3592ceb988205e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/rwsem.h>
 #include <linux/rbtree.h>
 #include <linux/poll.h>
+#include <linux/workqueue.h>
 
 /** Max number of pages that can be used in a single read request */
 #define FUSE_MAX_PAGES_PER_REQ 32
@@ -262,7 +263,10 @@ struct fuse_req {
        /** Data for asynchronous requests */
        union {
                struct {
-                       struct fuse_release_in in;
+                       union {
+                               struct fuse_release_in in;
+                               struct work_struct work;
+                       };
                        struct path path;
                } release;
                struct fuse_init_in init_in;
index 9e3f68cc1bd1338e2c8a6f73de6646fface53be7..051b1a084528b382201ae2a5dd3f7c536a60122c 100644 (file)
@@ -637,8 +637,10 @@ static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
        u64 nodeid;
        u32 generation;
 
-       if (*max_len < len)
+       if (*max_len < len) {
+               *max_len = len;
                return  255;
+       }
 
        nodeid = get_fuse_inode(inode)->nodeid;
        generation = inode->i_generation;
index 4a456338b8733bdc26864d183c12239f290dd404..0da8da2c991d30a906f954ce021f5373092298f9 100644 (file)
@@ -44,7 +44,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
        int error;
        int had_lock = 0;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        parent = dget_parent(dentry);
index 9023db8184f91ba758bae3c2a981e0a8180d2dfb..b5a5e60df0d5294ea237da99a231416fe800e5b4 100644 (file)
@@ -36,9 +36,13 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
        struct super_block *sb = inode->i_sb;
        struct gfs2_inode *ip = GFS2_I(inode);
 
-       if (*len < GFS2_SMALL_FH_SIZE ||
-           (connectable && *len < GFS2_LARGE_FH_SIZE))
+       if (connectable && (*len < GFS2_LARGE_FH_SIZE)) {
+               *len = GFS2_LARGE_FH_SIZE;
                return 255;
+       } else if (*len < GFS2_SMALL_FH_SIZE) {
+               *len = GFS2_SMALL_FH_SIZE;
+               return 255;
+       }
 
        fh[0] = cpu_to_be32(ip->i_no_formal_ino >> 32);
        fh[1] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF);
index 85ba027d1c4d5a9209d2cb22b7829ea7b08bb3cd..72c31a315d9658e1a03ca830ebbf7a14d06d08f4 100644 (file)
@@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo)
        struct address_space *mapping = (struct address_space *)(gl + 1);
 
        gfs2_init_glock_once(gl);
-       memset(mapping, 0, sizeof(*mapping));
-       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
-       spin_lock_init(&mapping->tree_lock);
-       spin_lock_init(&mapping->i_mmap_lock);
-       INIT_LIST_HEAD(&mapping->private_list);
-       spin_lock_init(&mapping->private_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
-       INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+       address_space_init_once(mapping);
 }
 
 /**
index afa66aaa2237940b9e97ec47d47bd0cea16181b9..b4d70b13be92548c6ac2cd72ae5e34f3ab53ec21 100644 (file)
@@ -238,46 +238,22 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 }
 
 /*
- * hfs_unlink()
+ * hfs_remove()
  *
- * This is the unlink() entry in the inode_operations structure for
- * regular HFS directories.  The purpose is to delete an existing
- * file, given the inode for the parent directory and the name
- * (and its length) of the existing file.
- */
-static int hfs_unlink(struct inode *dir, struct dentry *dentry)
-{
-       struct inode *inode;
-       int res;
-
-       inode = dentry->d_inode;
-       res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
-       if (res)
-               return res;
-
-       drop_nlink(inode);
-       hfs_delete_inode(inode);
-       inode->i_ctime = CURRENT_TIME_SEC;
-       mark_inode_dirty(inode);
-
-       return res;
-}
-
-/*
- * hfs_rmdir()
+ * This serves as both unlink() and rmdir() in the inode_operations
+ * structure for regular HFS directories.  The purpose is to delete
+ * an existing child, given the inode for the parent directory and
+ * the name (and its length) of the existing directory.
  *
- * This is the rmdir() entry in the inode_operations structure for
- * regular HFS directories.  The purpose is to delete an existing
- * directory, given the inode for the parent directory and the name
- * (and its length) of the existing directory.
+ * HFS does not have hardlinks, so both rmdir and unlink set the
+ * link count to 0.  The only difference is the emptiness check.
  */
-static int hfs_rmdir(struct inode *dir, struct dentry *dentry)
+static int hfs_remove(struct inode *dir, struct dentry *dentry)
 {
-       struct inode *inode;
+       struct inode *inode = dentry->d_inode;
        int res;
 
-       inode = dentry->d_inode;
-       if (inode->i_size != 2)
+       if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
                return -ENOTEMPTY;
        res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
        if (res)
@@ -307,7 +283,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        /* Unlink destination if it already exists */
        if (new_dentry->d_inode) {
-               res = hfs_unlink(new_dir, new_dentry);
+               res = hfs_remove(new_dir, new_dentry);
                if (res)
                        return res;
        }
@@ -332,9 +308,9 @@ const struct file_operations hfs_dir_operations = {
 const struct inode_operations hfs_dir_inode_operations = {
        .create         = hfs_create,
        .lookup         = hfs_lookup,
-       .unlink         = hfs_unlink,
+       .unlink         = hfs_remove,
        .mkdir          = hfs_mkdir,
-       .rmdir          = hfs_rmdir,
+       .rmdir          = hfs_remove,
        .rename         = hfs_rename,
        .setattr        = hfs_inode_setattr,
 };
index da85e56378f3e1d98c241901171e24e83882f189..0647d80accf6fb51274fe3e8489075e582ba5c16 100644 (file)
@@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode)
                call_rcu(&inode->i_rcu, i_callback);
 }
 
+void address_space_init_once(struct address_space *mapping)
+{
+       memset(mapping, 0, sizeof(*mapping));
+       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+       spin_lock_init(&mapping->tree_lock);
+       spin_lock_init(&mapping->i_mmap_lock);
+       INIT_LIST_HEAD(&mapping->private_list);
+       spin_lock_init(&mapping->private_lock);
+       INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+       INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+       mutex_init(&mapping->unmap_mutex);
+}
+EXPORT_SYMBOL(address_space_init_once);
+
 /*
  * These are initializations that only need to be done
  * once, because the fields are idempotent across use
@@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode)
        INIT_LIST_HEAD(&inode->i_devices);
        INIT_LIST_HEAD(&inode->i_wb_list);
        INIT_LIST_HEAD(&inode->i_lru);
-       INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
-       spin_lock_init(&inode->i_data.tree_lock);
-       spin_lock_init(&inode->i_data.i_mmap_lock);
-       INIT_LIST_HEAD(&inode->i_data.private_list);
-       spin_lock_init(&inode->i_data.private_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
-       INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
+       address_space_init_once(&inode->i_data);
        i_size_ordered_init(inode);
 #ifdef CONFIG_FSNOTIFY
        INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
@@ -540,11 +548,14 @@ void evict_inodes(struct super_block *sb)
 /**
  * invalidate_inodes   - attempt to free all inodes on a superblock
  * @sb:                superblock to operate on
+ * @kill_dirty: flag to guide handling of dirty inodes
  *
  * Attempts to free all inodes for a given superblock.  If there were any
  * busy inodes return a non-zero value, else zero.
+ * If @kill_dirty is set, discard dirty inodes too, otherwise treat
+ * them as busy.
  */
-int invalidate_inodes(struct super_block *sb)
+int invalidate_inodes(struct super_block *sb, bool kill_dirty)
 {
        int busy = 0;
        struct inode *inode, *next;
@@ -556,6 +567,10 @@ int invalidate_inodes(struct super_block *sb)
        list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
                        continue;
+               if (inode->i_state & I_DIRTY && !kill_dirty) {
+                       busy = 1;
+                       continue;
+               }
                if (atomic_read(&inode->i_count)) {
                        busy = 1;
                        continue;
index 0663568b1247f88a0ca57ad19b8f9c7d36680f8e..f3d15de44b15a73b4cdc8525270bc08760202ed9 100644 (file)
@@ -106,10 +106,23 @@ extern void put_super(struct super_block *sb);
 struct nameidata;
 extern struct file *nameidata_to_filp(struct nameidata *);
 extern void release_open_intent(struct nameidata *);
+struct open_flags {
+       int open_flag;
+       int mode;
+       int acc_mode;
+       int intent;
+};
+extern struct file *do_filp_open(int dfd, const char *pathname,
+               const struct open_flags *op, int lookup_flags);
+extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
+               const char *, const struct open_flags *, int lookup_flags);
+
+extern long do_handle_open(int mountdirfd,
+                          struct file_handle __user *ufh, int open_flag);
 
 /*
  * inode.c
  */
 extern int get_nr_dirty_inodes(void);
 extern void evict_inodes(struct super_block *);
-extern int invalidate_inodes(struct super_block *);
+extern int invalidate_inodes(struct super_block *, bool);
index ed752cb3847426f108803af94566ba79d80786ed..dd4687ff30d09900a14f113aec870007cfcfb7f0 100644 (file)
@@ -124,9 +124,13 @@ isofs_export_encode_fh(struct dentry *dentry,
         * offset of the inode and the upper 16 bits of fh32[1] to
         * hold the offset of the parent.
         */
-
-       if (len < 3 || (connectable && len < 5))
+       if (connectable && (len < 5)) {
+               *max_len = 5;
+               return 255;
+       } else if (len < 3) {
+               *max_len = 3;
                return 255;
+       }
 
        len = 3;
        fh32[0] = ei->i_iget5_block;
index 81ead850ddb65c722fbd5d1fa8a3f7844428e033..3f04a1804931494f49bf0e7fe91d212721eb185d 100644 (file)
@@ -809,9 +809,6 @@ static int jfs_link(struct dentry *old_dentry,
        if (ip->i_nlink == JFS_LINK_MAX)
                return -EMLINK;
 
-       if (ip->i_nlink == 0)
-               return -ENOENT;
-
        dquot_initialize(dir);
 
        tid = txBegin(ip->i_sb, 0);
@@ -1600,7 +1597,7 @@ out:
 
 static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
        /*
         * This is not negative dentry. Always valid.
index ce7337ddfdbfd2c156ec012b5a8282381954c49b..6e6777f1b4b208eb5b2767ac4b215f781ad7385d 100644 (file)
@@ -213,7 +213,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
                new_de = minix_find_entry(new_dentry, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                minix_set_link(new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -225,18 +224,15 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= info->s_link_max)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = minix_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
 
        minix_delete_entry(old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                minix_set_link(dir_de, dir_page, new_dir);
index 0087cf9c2c6bccaf99000fbd0bfe95257549d81b..0a601cae23ded383e2b2c0e7296705fa8579d68a 100644 (file)
@@ -136,7 +136,7 @@ static int do_getname(const char __user *filename, char *page)
        return retval;
 }
 
-char * getname(const char __user * filename)
+static char *getname_flags(const char __user * filename, int flags)
 {
        char *tmp, *result;
 
@@ -147,14 +147,21 @@ char * getname(const char __user * filename)
 
                result = tmp;
                if (retval < 0) {
-                       __putname(tmp);
-                       result = ERR_PTR(retval);
+                       if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
+                               __putname(tmp);
+                               result = ERR_PTR(retval);
+                       }
                }
        }
        audit_getname(result);
        return result;
 }
 
+char *getname(const char __user * filename)
+{
+       return getname_flags(filename, 0);
+}
+
 #ifdef CONFIG_AUDITSYSCALL
 void putname(const char *name)
 {
@@ -401,9 +408,11 @@ static int nameidata_drop_rcu(struct nameidata *nd)
 {
        struct fs_struct *fs = current->fs;
        struct dentry *dentry = nd->path.dentry;
+       int want_root = 0;
 
        BUG_ON(!(nd->flags & LOOKUP_RCU));
-       if (nd->root.mnt) {
+       if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
+               want_root = 1;
                spin_lock(&fs->lock);
                if (nd->root.mnt != fs->root.mnt ||
                                nd->root.dentry != fs->root.dentry)
@@ -414,7 +423,7 @@ static int nameidata_drop_rcu(struct nameidata *nd)
                goto err;
        BUG_ON(nd->inode != dentry->d_inode);
        spin_unlock(&dentry->d_lock);
-       if (nd->root.mnt) {
+       if (want_root) {
                path_get(&nd->root);
                spin_unlock(&fs->lock);
        }
@@ -427,7 +436,7 @@ static int nameidata_drop_rcu(struct nameidata *nd)
 err:
        spin_unlock(&dentry->d_lock);
 err_root:
-       if (nd->root.mnt)
+       if (want_root)
                spin_unlock(&fs->lock);
        return -ECHILD;
 }
@@ -454,9 +463,11 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry
 {
        struct fs_struct *fs = current->fs;
        struct dentry *parent = nd->path.dentry;
+       int want_root = 0;
 
        BUG_ON(!(nd->flags & LOOKUP_RCU));
-       if (nd->root.mnt) {
+       if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
+               want_root = 1;
                spin_lock(&fs->lock);
                if (nd->root.mnt != fs->root.mnt ||
                                nd->root.dentry != fs->root.dentry)
@@ -476,7 +487,7 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry
        parent->d_count++;
        spin_unlock(&dentry->d_lock);
        spin_unlock(&parent->d_lock);
-       if (nd->root.mnt) {
+       if (want_root) {
                path_get(&nd->root);
                spin_unlock(&fs->lock);
        }
@@ -490,7 +501,7 @@ err:
        spin_unlock(&dentry->d_lock);
        spin_unlock(&parent->d_lock);
 err_root:
-       if (nd->root.mnt)
+       if (want_root)
                spin_unlock(&fs->lock);
        return -ECHILD;
 }
@@ -498,8 +509,16 @@ err_root:
 /* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing.  */
 static inline int nameidata_dentry_drop_rcu_maybe(struct nameidata *nd, struct dentry *dentry)
 {
-       if (nd->flags & LOOKUP_RCU)
-               return nameidata_dentry_drop_rcu(nd, dentry);
+       if (nd->flags & LOOKUP_RCU) {
+               if (unlikely(nameidata_dentry_drop_rcu(nd, dentry))) {
+                       nd->flags &= ~LOOKUP_RCU;
+                       if (!(nd->flags & LOOKUP_ROOT))
+                               nd->root.mnt = NULL;
+                       rcu_read_unlock();
+                       br_read_unlock(vfsmount_lock);
+                       return -ECHILD;
+               }
+       }
        return 0;
 }
 
@@ -518,7 +537,8 @@ static int nameidata_drop_rcu_last(struct nameidata *nd)
 
        BUG_ON(!(nd->flags & LOOKUP_RCU));
        nd->flags &= ~LOOKUP_RCU;
-       nd->root.mnt = NULL;
+       if (!(nd->flags & LOOKUP_ROOT))
+               nd->root.mnt = NULL;
        spin_lock(&dentry->d_lock);
        if (!__d_rcu_to_refcount(dentry, nd->seq))
                goto err_unlock;
@@ -539,14 +559,6 @@ err_unlock:
        return -ECHILD;
 }
 
-/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing.  */
-static inline int nameidata_drop_rcu_last_maybe(struct nameidata *nd)
-{
-       if (likely(nd->flags & LOOKUP_RCU))
-               return nameidata_drop_rcu_last(nd);
-       return 0;
-}
-
 /**
  * release_open_intent - free up open intent resources
  * @nd: pointer to nameidata
@@ -590,42 +602,8 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd)
        return dentry;
 }
 
-static inline struct dentry *
-do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd)
-{
-       int status = d_revalidate(dentry, nd);
-       if (likely(status > 0))
-               return dentry;
-       if (status == -ECHILD) {
-               if (nameidata_dentry_drop_rcu(nd, dentry))
-                       return ERR_PTR(-ECHILD);
-               return do_revalidate(dentry, nd);
-       }
-       if (status < 0)
-               return ERR_PTR(status);
-       /* Don't d_invalidate in rcu-walk mode */
-       if (nameidata_dentry_drop_rcu(nd, dentry))
-               return ERR_PTR(-ECHILD);
-       if (!d_invalidate(dentry)) {
-               dput(dentry);
-               dentry = NULL;
-       }
-       return dentry;
-}
-
-static inline int need_reval_dot(struct dentry *dentry)
-{
-       if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
-               return 0;
-
-       if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
-               return 0;
-
-       return 1;
-}
-
 /*
- * force_reval_path - force revalidation of a dentry
+ * handle_reval_path - force revalidation of a dentry
  *
  * In some situations the path walking code will trust dentries without
  * revalidating them. This causes problems for filesystems that depend on
@@ -639,27 +617,28 @@ static inline int need_reval_dot(struct dentry *dentry)
  * invalidate the dentry. It's up to the caller to handle putting references
  * to the path if necessary.
  */
-static int
-force_reval_path(struct path *path, struct nameidata *nd)
+static inline int handle_reval_path(struct nameidata *nd)
 {
+       struct dentry *dentry = nd->path.dentry;
        int status;
-       struct dentry *dentry = path->dentry;
 
-       /*
-        * only check on filesystems where it's possible for the dentry to
-        * become stale.
-        */
-       if (!need_reval_dot(dentry))
+       if (likely(!(nd->flags & LOOKUP_JUMPED)))
+               return 0;
+
+       if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
+               return 0;
+
+       if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
                return 0;
 
+       /* Note: we do not d_invalidate() */
        status = d_revalidate(dentry, nd);
        if (status > 0)
                return 0;
 
-       if (!status) {
-               d_invalidate(dentry);
+       if (!status)
                status = -ESTALE;
-       }
+
        return status;
 }
 
@@ -728,6 +707,7 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l
                path_put(&nd->path);
                nd->path = nd->root;
                path_get(&nd->root);
+               nd->flags |= LOOKUP_JUMPED;
        }
        nd->inode = nd->path.dentry->d_inode;
 
@@ -757,20 +737,44 @@ static inline void path_to_nameidata(const struct path *path,
        nd->path.dentry = path->dentry;
 }
 
+static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
+{
+       struct inode *inode = link->dentry->d_inode;
+       if (!IS_ERR(cookie) && inode->i_op->put_link)
+               inode->i_op->put_link(link->dentry, nd, cookie);
+       path_put(link);
+}
+
 static __always_inline int
-__do_follow_link(const struct path *link, struct nameidata *nd, void **p)
+follow_link(struct path *link, struct nameidata *nd, void **p)
 {
        int error;
        struct dentry *dentry = link->dentry;
 
        BUG_ON(nd->flags & LOOKUP_RCU);
 
+       if (unlikely(current->total_link_count >= 40)) {
+               *p = ERR_PTR(-ELOOP); /* no ->put_link(), please */
+               path_put_conditional(link, nd);
+               path_put(&nd->path);
+               return -ELOOP;
+       }
+       cond_resched();
+       current->total_link_count++;
+
        touch_atime(link->mnt, dentry);
        nd_set_link(nd, NULL);
 
        if (link->mnt == nd->path.mnt)
                mntget(link->mnt);
 
+       error = security_inode_follow_link(link->dentry, nd);
+       if (error) {
+               *p = ERR_PTR(error); /* no ->put_link(), please */
+               path_put(&nd->path);
+               return error;
+       }
+
        nd->last_type = LAST_BIND;
        *p = dentry->d_inode->i_op->follow_link(dentry, nd);
        error = PTR_ERR(*p);
@@ -780,56 +784,18 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p)
                if (s)
                        error = __vfs_follow_link(nd, s);
                else if (nd->last_type == LAST_BIND) {
-                       error = force_reval_path(&nd->path, nd);
-                       if (error)
+                       nd->flags |= LOOKUP_JUMPED;
+                       nd->inode = nd->path.dentry->d_inode;
+                       if (nd->inode->i_op->follow_link) {
+                               /* stepped on a _really_ weird one */
                                path_put(&nd->path);
+                               error = -ELOOP;
+                       }
                }
        }
        return error;
 }
 
-/*
- * This limits recursive symlink follows to 8, while
- * limiting consecutive symlinks to 40.
- *
- * Without that kind of total limit, nasty chains of consecutive
- * symlinks can cause almost arbitrarily long lookups. 
- */
-static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd)
-{
-       void *cookie;
-       int err = -ELOOP;
-
-       /* We drop rcu-walk here */
-       if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry))
-               return -ECHILD;
-       BUG_ON(inode != path->dentry->d_inode);
-
-       if (current->link_count >= MAX_NESTED_LINKS)
-               goto loop;
-       if (current->total_link_count >= 40)
-               goto loop;
-       BUG_ON(nd->depth >= MAX_NESTED_LINKS);
-       cond_resched();
-       err = security_inode_follow_link(path->dentry, nd);
-       if (err)
-               goto loop;
-       current->link_count++;
-       current->total_link_count++;
-       nd->depth++;
-       err = __do_follow_link(path, nd, &cookie);
-       if (!IS_ERR(cookie) && path->dentry->d_inode->i_op->put_link)
-               path->dentry->d_inode->i_op->put_link(path->dentry, nd, cookie);
-       path_put(path);
-       current->link_count--;
-       nd->depth--;
-       return err;
-loop:
-       path_put_conditional(path, nd);
-       path_put(&nd->path);
-       return err;
-}
-
 static int follow_up_rcu(struct path *path)
 {
        struct vfsmount *parent;
@@ -1068,7 +1034,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
 
                        seq = read_seqcount_begin(&parent->d_seq);
                        if (read_seqcount_retry(&old->d_seq, nd->seq))
-                               return -ECHILD;
+                               goto failed;
                        inode = parent->d_inode;
                        nd->path.dentry = parent;
                        nd->seq = seq;
@@ -1081,8 +1047,15 @@ static int follow_dotdot_rcu(struct nameidata *nd)
        }
        __follow_mount_rcu(nd, &nd->path, &inode, true);
        nd->inode = inode;
-
        return 0;
+
+failed:
+       nd->flags &= ~LOOKUP_RCU;
+       if (!(nd->flags & LOOKUP_ROOT))
+               nd->root.mnt = NULL;
+       rcu_read_unlock();
+       br_read_unlock(vfsmount_lock);
+       return -ECHILD;
 }
 
 /*
@@ -1216,19 +1189,10 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
 {
        struct vfsmount *mnt = nd->path.mnt;
        struct dentry *dentry, *parent = nd->path.dentry;
-       struct inode *dir;
+       int need_reval = 1;
+       int status = 1;
        int err;
 
-       /*
-        * See if the low-level filesystem might want
-        * to use its own hash..
-        */
-       if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
-               err = parent->d_op->d_hash(parent, nd->inode, name);
-               if (err < 0)
-                       return err;
-       }
-
        /*
         * Rename seqlock is not required here because in the off chance
         * of a false negative due to a concurrent rename, we're going to
@@ -1236,48 +1200,74 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
         */
        if (nd->flags & LOOKUP_RCU) {
                unsigned seq;
-
                *inode = nd->inode;
                dentry = __d_lookup_rcu(parent, name, &seq, inode);
-               if (!dentry) {
-                       if (nameidata_drop_rcu(nd))
-                               return -ECHILD;
-                       goto need_lookup;
-               }
+               if (!dentry)
+                       goto unlazy;
+
                /* Memory barrier in read_seqcount_begin of child is enough */
                if (__read_seqcount_retry(&parent->d_seq, nd->seq))
                        return -ECHILD;
-
                nd->seq = seq;
+
                if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
-                       dentry = do_revalidate_rcu(dentry, nd);
-                       if (!dentry)
-                               goto need_lookup;
-                       if (IS_ERR(dentry))
-                               goto fail;
-                       if (!(nd->flags & LOOKUP_RCU))
-                               goto done;
+                       status = d_revalidate(dentry, nd);
+                       if (unlikely(status <= 0)) {
+                               if (status != -ECHILD)
+                                       need_reval = 0;
+                               goto unlazy;
+                       }
                }
                path->mnt = mnt;
                path->dentry = dentry;
                if (likely(__follow_mount_rcu(nd, path, inode, false)))
                        return 0;
-               if (nameidata_drop_rcu(nd))
-                       return -ECHILD;
-               /* fallthru */
+unlazy:
+               if (dentry) {
+                       if (nameidata_dentry_drop_rcu(nd, dentry))
+                               return -ECHILD;
+               } else {
+                       if (nameidata_drop_rcu(nd))
+                               return -ECHILD;
+               }
+       } else {
+               dentry = __d_lookup(parent, name);
        }
-       dentry = __d_lookup(parent, name);
-       if (!dentry)
-               goto need_lookup;
-found:
-       if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
-               dentry = do_revalidate(dentry, nd);
-               if (!dentry)
-                       goto need_lookup;
-               if (IS_ERR(dentry))
-                       goto fail;
+
+retry:
+       if (unlikely(!dentry)) {
+               struct inode *dir = parent->d_inode;
+               BUG_ON(nd->inode != dir);
+
+               mutex_lock(&dir->i_mutex);
+               dentry = d_lookup(parent, name);
+               if (likely(!dentry)) {
+                       dentry = d_alloc_and_lookup(parent, name, nd);
+                       if (IS_ERR(dentry)) {
+                               mutex_unlock(&dir->i_mutex);
+                               return PTR_ERR(dentry);
+                       }
+                       /* known good */
+                       need_reval = 0;
+                       status = 1;
+               }
+               mutex_unlock(&dir->i_mutex);
        }
-done:
+       if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
+               status = d_revalidate(dentry, nd);
+       if (unlikely(status <= 0)) {
+               if (status < 0) {
+                       dput(dentry);
+                       return status;
+               }
+               if (!d_invalidate(dentry)) {
+                       dput(dentry);
+                       dentry = NULL;
+                       need_reval = 1;
+                       goto retry;
+               }
+       }
+
        path->mnt = mnt;
        path->dentry = dentry;
        err = follow_managed(path, nd->flags);
@@ -1287,39 +1277,113 @@ done:
        }
        *inode = path->dentry->d_inode;
        return 0;
+}
+
+static inline int may_lookup(struct nameidata *nd)
+{
+       if (nd->flags & LOOKUP_RCU) {
+               int err = exec_permission(nd->inode, IPERM_FLAG_RCU);
+               if (err != -ECHILD)
+                       return err;
+               if (nameidata_drop_rcu(nd))
+                       return -ECHILD;
+       }
+       return exec_permission(nd->inode, 0);
+}
 
-need_lookup:
-       dir = parent->d_inode;
-       BUG_ON(nd->inode != dir);
+static inline int handle_dots(struct nameidata *nd, int type)
+{
+       if (type == LAST_DOTDOT) {
+               if (nd->flags & LOOKUP_RCU) {
+                       if (follow_dotdot_rcu(nd))
+                               return -ECHILD;
+               } else
+                       follow_dotdot(nd);
+       }
+       return 0;
+}
 
-       mutex_lock(&dir->i_mutex);
-       /*
-        * First re-do the cached lookup just in case it was created
-        * while we waited for the directory semaphore, or the first
-        * lookup failed due to an unrelated rename.
-        *
-        * This could use version numbering or similar to avoid unnecessary
-        * cache lookups, but then we'd have to do the first lookup in the
-        * non-racy way. However in the common case here, everything should
-        * be hot in cache, so would it be a big win?
-        */
-       dentry = d_lookup(parent, name);
-       if (likely(!dentry)) {
-               dentry = d_alloc_and_lookup(parent, name, nd);
-               mutex_unlock(&dir->i_mutex);
-               if (IS_ERR(dentry))
-                       goto fail;
-               goto done;
+static void terminate_walk(struct nameidata *nd)
+{
+       if (!(nd->flags & LOOKUP_RCU)) {
+               path_put(&nd->path);
+       } else {
+               nd->flags &= ~LOOKUP_RCU;
+               if (!(nd->flags & LOOKUP_ROOT))
+                       nd->root.mnt = NULL;
+               rcu_read_unlock();
+               br_read_unlock(vfsmount_lock);
        }
+}
+
+static inline int walk_component(struct nameidata *nd, struct path *path,
+               struct qstr *name, int type, int follow)
+{
+       struct inode *inode;
+       int err;
        /*
-        * Uhhuh! Nasty case: the cache was re-populated while
-        * we waited on the semaphore. Need to revalidate.
+        * "." and ".." are special - ".." especially so because it has
+        * to be able to know about the current root directory and
+        * parent relationships.
         */
-       mutex_unlock(&dir->i_mutex);
-       goto found;
+       if (unlikely(type != LAST_NORM))
+               return handle_dots(nd, type);
+       err = do_lookup(nd, name, path, &inode);
+       if (unlikely(err)) {
+               terminate_walk(nd);
+               return err;
+       }
+       if (!inode) {
+               path_to_nameidata(path, nd);
+               terminate_walk(nd);
+               return -ENOENT;
+       }
+       if (unlikely(inode->i_op->follow_link) && follow) {
+               if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry))
+                       return -ECHILD;
+               BUG_ON(inode != path->dentry->d_inode);
+               return 1;
+       }
+       path_to_nameidata(path, nd);
+       nd->inode = inode;
+       return 0;
+}
 
-fail:
-       return PTR_ERR(dentry);
+/*
+ * This limits recursive symlink follows to 8, while
+ * limiting consecutive symlinks to 40.
+ *
+ * Without that kind of total limit, nasty chains of consecutive
+ * symlinks can cause almost arbitrarily long lookups.
+ */
+static inline int nested_symlink(struct path *path, struct nameidata *nd)
+{
+       int res;
+
+       BUG_ON(nd->depth >= MAX_NESTED_LINKS);
+       if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
+               path_put_conditional(path, nd);
+               path_put(&nd->path);
+               return -ELOOP;
+       }
+
+       nd->depth++;
+       current->link_count++;
+
+       do {
+               struct path link = *path;
+               void *cookie;
+
+               res = follow_link(&link, nd, &cookie);
+               if (!res)
+                       res = walk_component(nd, path, &nd->last,
+                                            nd->last_type, LOOKUP_FOLLOW);
+               put_link(nd, &link, cookie);
+       } while (res > 0);
+
+       current->link_count--;
+       nd->depth--;
+       return res;
 }
 
 /*
@@ -1339,30 +1403,18 @@ static int link_path_walk(const char *name, struct nameidata *nd)
        while (*name=='/')
                name++;
        if (!*name)
-               goto return_reval;
-
-       if (nd->depth)
-               lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE);
+               return 0;
 
        /* At this point we know we have a real path component. */
        for(;;) {
-               struct inode *inode;
                unsigned long hash;
                struct qstr this;
                unsigned int c;
+               int type;
 
                nd->flags |= LOOKUP_CONTINUE;
-               if (nd->flags & LOOKUP_RCU) {
-                       err = exec_permission(nd->inode, IPERM_FLAG_RCU);
-                       if (err == -ECHILD) {
-                               if (nameidata_drop_rcu(nd))
-                                       return -ECHILD;
-                               goto exec_again;
-                       }
-               } else {
-exec_again:
-                       err = exec_permission(nd->inode, 0);
-               }
+
+               err = may_lookup(nd);
                if (err)
                        break;
 
@@ -1378,52 +1430,43 @@ exec_again:
                this.len = name - (const char *) this.name;
                this.hash = end_name_hash(hash);
 
+               type = LAST_NORM;
+               if (this.name[0] == '.') switch (this.len) {
+                       case 2:
+                               if (this.name[1] == '.') {
+                                       type = LAST_DOTDOT;
+                                       nd->flags |= LOOKUP_JUMPED;
+                               }
+                               break;
+                       case 1:
+                               type = LAST_DOT;
+               }
+               if (likely(type == LAST_NORM)) {
+                       struct dentry *parent = nd->path.dentry;
+                       nd->flags &= ~LOOKUP_JUMPED;
+                       if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
+                               err = parent->d_op->d_hash(parent, nd->inode,
+                                                          &this);
+                               if (err < 0)
+                                       break;
+                       }
+               }
+
                /* remove trailing slashes? */
                if (!c)
                        goto last_component;
                while (*++name == '/');
                if (!*name)
-                       goto last_with_slashes;
+                       goto last_component;
 
-               /*
-                * "." and ".." are special - ".." especially so because it has
-                * to be able to know about the current root directory and
-                * parent relationships.
-                */
-               if (this.name[0] == '.') switch (this.len) {
-                       default:
-                               break;
-                       case 2:
-                               if (this.name[1] != '.')
-                                       break;
-                               if (nd->flags & LOOKUP_RCU) {
-                                       if (follow_dotdot_rcu(nd))
-                                               return -ECHILD;
-                               } else
-                                       follow_dotdot(nd);
-                               /* fallthrough */
-                       case 1:
-                               continue;
-               }
-               /* This does the actual lookups.. */
-               err = do_lookup(nd, &this, &next, &inode);
-               if (err)
-                       break;
-               err = -ENOENT;
-               if (!inode)
-                       goto out_dput;
+               err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW);
+               if (err < 0)
+                       return err;
 
-               if (inode->i_op->follow_link) {
-                       err = do_follow_link(inode, &next, nd);
+               if (err) {
+                       err = nested_symlink(&next, nd);
                        if (err)
-                               goto return_err;
-                       nd->inode = nd->path.dentry->d_inode;
-                       err = -ENOENT;
-                       if (!nd->inode)
-                               break;
-               } else {
-                       path_to_nameidata(&next, nd);
-                       nd->inode = inode;
+                               return err;
                }
                err = -ENOTDIR; 
                if (!nd->inode->i_op->lookup)
@@ -1431,209 +1474,109 @@ exec_again:
                continue;
                /* here ends the main loop */
 
-last_with_slashes:
-               lookup_flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
 last_component:
                /* Clear LOOKUP_CONTINUE iff it was previously unset */
                nd->flags &= lookup_flags | ~LOOKUP_CONTINUE;
-               if (lookup_flags & LOOKUP_PARENT)
-                       goto lookup_parent;
-               if (this.name[0] == '.') switch (this.len) {
-                       default:
-                               break;
-                       case 2:
-                               if (this.name[1] != '.')
-                                       break;
-                               if (nd->flags & LOOKUP_RCU) {
-                                       if (follow_dotdot_rcu(nd))
-                                               return -ECHILD;
-                               } else
-                                       follow_dotdot(nd);
-                               /* fallthrough */
-                       case 1:
-                               goto return_reval;
-               }
-               err = do_lookup(nd, &this, &next, &inode);
-               if (err)
-                       break;
-               if (inode && unlikely(inode->i_op->follow_link) &&
-                   (lookup_flags & LOOKUP_FOLLOW)) {
-                       err = do_follow_link(inode, &next, nd);
-                       if (err)
-                               goto return_err;
-                       nd->inode = nd->path.dentry->d_inode;
-               } else {
-                       path_to_nameidata(&next, nd);
-                       nd->inode = inode;
-               }
-               err = -ENOENT;
-               if (!nd->inode)
-                       break;
-               if (lookup_flags & LOOKUP_DIRECTORY) {
-                       err = -ENOTDIR; 
-                       if (!nd->inode->i_op->lookup)
-                               break;
-               }
-               goto return_base;
-lookup_parent:
                nd->last = this;
-               nd->last_type = LAST_NORM;
-               if (this.name[0] != '.')
-                       goto return_base;
-               if (this.len == 1)
-                       nd->last_type = LAST_DOT;
-               else if (this.len == 2 && this.name[1] == '.')
-                       nd->last_type = LAST_DOTDOT;
-               else
-                       goto return_base;
-return_reval:
-               /*
-                * We bypassed the ordinary revalidation routines.
-                * We may need to check the cached dentry for staleness.
-                */
-               if (need_reval_dot(nd->path.dentry)) {
-                       if (nameidata_drop_rcu_last_maybe(nd))
-                               return -ECHILD;
-                       /* Note: we do not d_invalidate() */
-                       err = d_revalidate(nd->path.dentry, nd);
-                       if (!err)
-                               err = -ESTALE;
-                       if (err < 0)
-                               break;
-                       return 0;
-               }
-return_base:
-               if (nameidata_drop_rcu_last_maybe(nd))
-                       return -ECHILD;
+               nd->last_type = type;
                return 0;
-out_dput:
-               if (!(nd->flags & LOOKUP_RCU))
-                       path_put_conditional(&next, nd);
-               break;
        }
-       if (!(nd->flags & LOOKUP_RCU))
-               path_put(&nd->path);
-return_err:
+       terminate_walk(nd);
        return err;
 }
 
-static inline int path_walk_rcu(const char *name, struct nameidata *nd)
-{
-       current->total_link_count = 0;
-
-       return link_path_walk(name, nd);
-}
-
-static inline int path_walk_simple(const char *name, struct nameidata *nd)
-{
-       current->total_link_count = 0;
-
-       return link_path_walk(name, nd);
-}
-
-static int path_walk(const char *name, struct nameidata *nd)
-{
-       struct path save = nd->path;
-       int result;
-
-       current->total_link_count = 0;
-
-       /* make sure the stuff we saved doesn't go away */
-       path_get(&save);
-
-       result = link_path_walk(name, nd);
-       if (result == -ESTALE) {
-               /* nd->path had been dropped */
-               current->total_link_count = 0;
-               nd->path = save;
-               path_get(&nd->path);
-               nd->flags |= LOOKUP_REVAL;
-               result = link_path_walk(name, nd);
-       }
-
-       path_put(&save);
-
-       return result;
-}
-
-static void path_finish_rcu(struct nameidata *nd)
-{
-       if (nd->flags & LOOKUP_RCU) {
-               /* RCU dangling. Cancel it. */
-               nd->flags &= ~LOOKUP_RCU;
-               nd->root.mnt = NULL;
-               rcu_read_unlock();
-               br_read_unlock(vfsmount_lock);
-       }
-       if (nd->file)
-               fput(nd->file);
-}
-
-static int path_init_rcu(int dfd, const char *name, unsigned int flags, struct nameidata *nd)
+static int path_init(int dfd, const char *name, unsigned int flags,
+                    struct nameidata *nd, struct file **fp)
 {
        int retval = 0;
        int fput_needed;
        struct file *file;
 
        nd->last_type = LAST_ROOT; /* if there are only slashes... */
-       nd->flags = flags | LOOKUP_RCU;
+       nd->flags = flags | LOOKUP_JUMPED;
        nd->depth = 0;
+       if (flags & LOOKUP_ROOT) {
+               struct inode *inode = nd->root.dentry->d_inode;
+               if (*name) {
+                       if (!inode->i_op->lookup)
+                               return -ENOTDIR;
+                       retval = inode_permission(inode, MAY_EXEC);
+                       if (retval)
+                               return retval;
+               }
+               nd->path = nd->root;
+               nd->inode = inode;
+               if (flags & LOOKUP_RCU) {
+                       br_read_lock(vfsmount_lock);
+                       rcu_read_lock();
+                       nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+               } else {
+                       path_get(&nd->path);
+               }
+               return 0;
+       }
+
        nd->root.mnt = NULL;
-       nd->file = NULL;
 
        if (*name=='/') {
-               struct fs_struct *fs = current->fs;
-               unsigned seq;
-
-               br_read_lock(vfsmount_lock);
-               rcu_read_lock();
-
-               do {
-                       seq = read_seqcount_begin(&fs->seq);
-                       nd->root = fs->root;
-                       nd->path = nd->root;
-                       nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
-               } while (read_seqcount_retry(&fs->seq, seq));
-
+               if (flags & LOOKUP_RCU) {
+                       br_read_lock(vfsmount_lock);
+                       rcu_read_lock();
+                       set_root_rcu(nd);
+               } else {
+                       set_root(nd);
+                       path_get(&nd->root);
+               }
+               nd->path = nd->root;
        } else if (dfd == AT_FDCWD) {
-               struct fs_struct *fs = current->fs;
-               unsigned seq;
-
-               br_read_lock(vfsmount_lock);
-               rcu_read_lock();
+               if (flags & LOOKUP_RCU) {
+                       struct fs_struct *fs = current->fs;
+                       unsigned seq;
 
-               do {
-                       seq = read_seqcount_begin(&fs->seq);
-                       nd->path = fs->pwd;
-                       nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
-               } while (read_seqcount_retry(&fs->seq, seq));
+                       br_read_lock(vfsmount_lock);
+                       rcu_read_lock();
 
+                       do {
+                               seq = read_seqcount_begin(&fs->seq);
+                               nd->path = fs->pwd;
+                               nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+                       } while (read_seqcount_retry(&fs->seq, seq));
+               } else {
+                       get_fs_pwd(current->fs, &nd->path);
+               }
        } else {
                struct dentry *dentry;
 
-               file = fget_light(dfd, &fput_needed);
+               file = fget_raw_light(dfd, &fput_needed);
                retval = -EBADF;
                if (!file)
                        goto out_fail;
 
                dentry = file->f_path.dentry;
 
-               retval = -ENOTDIR;
-               if (!S_ISDIR(dentry->d_inode->i_mode))
-                       goto fput_fail;
+               if (*name) {
+                       retval = -ENOTDIR;
+                       if (!S_ISDIR(dentry->d_inode->i_mode))
+                               goto fput_fail;
 
-               retval = file_permission(file, MAY_EXEC);
-               if (retval)
-                       goto fput_fail;
+                       retval = file_permission(file, MAY_EXEC);
+                       if (retval)
+                               goto fput_fail;
+               }
 
                nd->path = file->f_path;
-               if (fput_needed)
-                       nd->file = file;
-
-               nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
-               br_read_lock(vfsmount_lock);
-               rcu_read_lock();
+               if (flags & LOOKUP_RCU) {
+                       if (fput_needed)
+                               *fp = file;
+                       nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+                       br_read_lock(vfsmount_lock);
+                       rcu_read_lock();
+               } else {
+                       path_get(&file->f_path);
+                       fput_light(file, fput_needed);
+               }
        }
+
        nd->inode = nd->path.dentry->d_inode;
        return 0;
 
@@ -1643,60 +1586,23 @@ out_fail:
        return retval;
 }
 
-static int path_init(int dfd, const char *name, unsigned int flags, struct nameidata *nd)
+static inline int lookup_last(struct nameidata *nd, struct path *path)
 {
-       int retval = 0;
-       int fput_needed;
-       struct file *file;
-
-       nd->last_type = LAST_ROOT; /* if there are only slashes... */
-       nd->flags = flags;
-       nd->depth = 0;
-       nd->root.mnt = NULL;
-
-       if (*name=='/') {
-               set_root(nd);
-               nd->path = nd->root;
-               path_get(&nd->root);
-       } else if (dfd == AT_FDCWD) {
-               get_fs_pwd(current->fs, &nd->path);
-       } else {
-               struct dentry *dentry;
-
-               file = fget_light(dfd, &fput_needed);
-               retval = -EBADF;
-               if (!file)
-                       goto out_fail;
-
-               dentry = file->f_path.dentry;
-
-               retval = -ENOTDIR;
-               if (!S_ISDIR(dentry->d_inode->i_mode))
-                       goto fput_fail;
-
-               retval = file_permission(file, MAY_EXEC);
-               if (retval)
-                       goto fput_fail;
-
-               nd->path = file->f_path;
-               path_get(&file->f_path);
-
-               fput_light(file, fput_needed);
-       }
-       nd->inode = nd->path.dentry->d_inode;
-       return 0;
+       if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
+               nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
 
-fput_fail:
-       fput_light(file, fput_needed);
-out_fail:
-       return retval;
+       nd->flags &= ~LOOKUP_PARENT;
+       return walk_component(nd, path, &nd->last, nd->last_type,
+                                       nd->flags & LOOKUP_FOLLOW);
 }
 
 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
-static int do_path_lookup(int dfd, const char *name,
+static int path_lookupat(int dfd, const char *name,
                                unsigned int flags, struct nameidata *nd)
 {
-       int retval;
+       struct file *base = NULL;
+       struct path path;
+       int err;
 
        /*
         * Path walking is largely split up into 2 different synchronisation
@@ -1712,44 +1618,75 @@ static int do_path_lookup(int dfd, const char *name,
         * be handled by restarting a traditional ref-walk (which will always
         * be able to complete).
         */
-       retval = path_init_rcu(dfd, name, flags, nd);
-       if (unlikely(retval))
-               return retval;
-       retval = path_walk_rcu(name, nd);
-       path_finish_rcu(nd);
-       if (nd->root.mnt) {
-               path_put(&nd->root);
-               nd->root.mnt = NULL;
+       err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
+
+       if (unlikely(err))
+               return err;
+
+       current->total_link_count = 0;
+       err = link_path_walk(name, nd);
+
+       if (!err && !(flags & LOOKUP_PARENT)) {
+               err = lookup_last(nd, &path);
+               while (err > 0) {
+                       void *cookie;
+                       struct path link = path;
+                       nd->flags |= LOOKUP_PARENT;
+                       err = follow_link(&link, nd, &cookie);
+                       if (!err)
+                               err = lookup_last(nd, &path);
+                       put_link(nd, &link, cookie);
+               }
        }
 
-       if (unlikely(retval == -ECHILD || retval == -ESTALE)) {
-               /* slower, locked walk */
-               if (retval == -ESTALE)
-                       flags |= LOOKUP_REVAL;
-               retval = path_init(dfd, name, flags, nd);
-               if (unlikely(retval))
-                       return retval;
-               retval = path_walk(name, nd);
-               if (nd->root.mnt) {
-                       path_put(&nd->root);
-                       nd->root.mnt = NULL;
+       if (nd->flags & LOOKUP_RCU) {
+               /* went all way through without dropping RCU */
+               BUG_ON(err);
+               if (nameidata_drop_rcu_last(nd))
+                       err = -ECHILD;
+       }
+
+       if (!err)
+               err = handle_reval_path(nd);
+
+       if (!err && nd->flags & LOOKUP_DIRECTORY) {
+               if (!nd->inode->i_op->lookup) {
+                       path_put(&nd->path);
+                       return -ENOTDIR;
                }
        }
 
+       if (base)
+               fput(base);
+
+       if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
+               path_put(&nd->root);
+               nd->root.mnt = NULL;
+       }
+       return err;
+}
+
+static int do_path_lookup(int dfd, const char *name,
+                               unsigned int flags, struct nameidata *nd)
+{
+       int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
+       if (unlikely(retval == -ECHILD))
+               retval = path_lookupat(dfd, name, flags, nd);
+       if (unlikely(retval == -ESTALE))
+               retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
+
        if (likely(!retval)) {
                if (unlikely(!audit_dummy_context())) {
                        if (nd->path.dentry && nd->inode)
                                audit_inode(name, nd->path.dentry);
                }
        }
-
        return retval;
 }
 
-int path_lookup(const char *name, unsigned int flags,
-                       struct nameidata *nd)
+int kern_path_parent(const char *name, struct nameidata *nd)
 {
-       return do_path_lookup(AT_FDCWD, name, flags, nd);
+       return do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, nd);
 }
 
 int kern_path(const char *name, unsigned int flags, struct path *path)
@@ -1773,29 +1710,10 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
                    const char *name, unsigned int flags,
                    struct nameidata *nd)
 {
-       int retval;
-
-       /* same as do_path_lookup */
-       nd->last_type = LAST_ROOT;
-       nd->flags = flags;
-       nd->depth = 0;
-
-       nd->path.dentry = dentry;
-       nd->path.mnt = mnt;
-       path_get(&nd->path);
-       nd->root = nd->path;
-       path_get(&nd->root);
-       nd->inode = nd->path.dentry->d_inode;
-
-       retval = path_walk(name, nd);
-       if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
-                               nd->inode))
-               audit_inode(name, nd->path.dentry);
-
-       path_put(&nd->root);
-       nd->root.mnt = NULL;
-
-       return retval;
+       nd->root.dentry = dentry;
+       nd->root.mnt = mnt;
+       /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
+       return do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, nd);
 }
 
 static struct dentry *__lookup_hash(struct qstr *name,
@@ -1809,17 +1727,6 @@ static struct dentry *__lookup_hash(struct qstr *name,
        if (err)
                return ERR_PTR(err);
 
-       /*
-        * See if the low-level filesystem might want
-        * to use its own hash..
-        */
-       if (base->d_flags & DCACHE_OP_HASH) {
-               err = base->d_op->d_hash(base, inode, name);
-               dentry = ERR_PTR(err);
-               if (err < 0)
-                       goto out;
-       }
-
        /*
         * Don't bother with __d_lookup: callers are for creat as
         * well as unlink, so a lot of the time it would cost
@@ -1832,7 +1739,7 @@ static struct dentry *__lookup_hash(struct qstr *name,
 
        if (!dentry)
                dentry = d_alloc_and_lookup(base, name, nd);
-out:
+
        return dentry;
 }
 
@@ -1846,28 +1753,6 @@ static struct dentry *lookup_hash(struct nameidata *nd)
        return __lookup_hash(&nd->last, nd->path.dentry, nd);
 }
 
-static int __lookup_one_len(const char *name, struct qstr *this,
-               struct dentry *base, int len)
-{
-       unsigned long hash;
-       unsigned int c;
-
-       this->name = name;
-       this->len = len;
-       if (!len)
-               return -EACCES;
-
-       hash = init_name_hash();
-       while (len--) {
-               c = *(const unsigned char *)name++;
-               if (c == '/' || c == '\0')
-                       return -EACCES;
-               hash = partial_name_hash(c, hash);
-       }
-       this->hash = end_name_hash(hash);
-       return 0;
-}
-
 /**
  * lookup_one_len - filesystem helper to lookup single pathname component
  * @name:      pathname component to lookup
@@ -1881,14 +1766,34 @@ static int __lookup_one_len(const char *name, struct qstr *this,
  */
 struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
 {
-       int err;
        struct qstr this;
+       unsigned long hash;
+       unsigned int c;
 
        WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
 
-       err = __lookup_one_len(name, &this, base, len);
-       if (err)
-               return ERR_PTR(err);
+       this.name = name;
+       this.len = len;
+       if (!len)
+               return ERR_PTR(-EACCES);
+
+       hash = init_name_hash();
+       while (len--) {
+               c = *(const unsigned char *)name++;
+               if (c == '/' || c == '\0')
+                       return ERR_PTR(-EACCES);
+               hash = partial_name_hash(c, hash);
+       }
+       this.hash = end_name_hash(hash);
+       /*
+        * See if the low-level filesystem might want
+        * to use its own hash..
+        */
+       if (base->d_flags & DCACHE_OP_HASH) {
+               int err = base->d_op->d_hash(base, base->d_inode, &this);
+               if (err < 0)
+                       return ERR_PTR(err);
+       }
 
        return __lookup_hash(&this, base, NULL);
 }
@@ -1897,7 +1802,7 @@ int user_path_at(int dfd, const char __user *name, unsigned flags,
                 struct path *path)
 {
        struct nameidata nd;
-       char *tmp = getname(name);
+       char *tmp = getname_flags(name, flags);
        int err = PTR_ERR(tmp);
        if (!IS_ERR(tmp)) {
 
@@ -2077,12 +1982,16 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
        return error;
 }
 
-int may_open(struct path *path, int acc_mode, int flag)
+static int may_open(struct path *path, int acc_mode, int flag)
 {
        struct dentry *dentry = path->dentry;
        struct inode *inode = dentry->d_inode;
        int error;
 
+       /* O_PATH? */
+       if (!acc_mode)
+               return 0;
+
        if (!inode)
                return -ENOENT;
 
@@ -2150,34 +2059,6 @@ static int handle_truncate(struct file *filp)
        return error;
 }
 
-/*
- * Be careful about ever adding any more callers of this
- * function.  Its flags must be in the namei format, not
- * what get passed to sys_open().
- */
-static int __open_namei_create(struct nameidata *nd, struct path *path,
-                               int open_flag, int mode)
-{
-       int error;
-       struct dentry *dir = nd->path.dentry;
-
-       if (!IS_POSIXACL(dir->d_inode))
-               mode &= ~current_umask();
-       error = security_path_mknod(&nd->path, path->dentry, mode, 0);
-       if (error)
-               goto out_unlock;
-       error = vfs_create(dir->d_inode, path->dentry, mode, nd);
-out_unlock:
-       mutex_unlock(&dir->d_inode->i_mutex);
-       dput(nd->path.dentry);
-       nd->path.dentry = path->dentry;
-
-       if (error)
-               return error;
-       /* Don't check for write permission, don't truncate */
-       return may_open(&nd->path, 0, open_flag & ~O_TRUNC);
-}
-
 /*
  * Note that while the flag value (low two bits) for sys_open means:
  *     00 - read-only
@@ -2202,126 +2083,115 @@ static inline int open_to_namei_flags(int flag)
        return flag;
 }
 
-static int open_will_truncate(int flag, struct inode *inode)
-{
-       /*
-        * We'll never write to the fs underlying
-        * a device file.
-        */
-       if (special_file(inode->i_mode))
-               return 0;
-       return (flag & O_TRUNC);
-}
-
-static struct file *finish_open(struct nameidata *nd,
-                               int open_flag, int acc_mode)
-{
-       struct file *filp;
-       int will_truncate;
-       int error;
-
-       will_truncate = open_will_truncate(open_flag, nd->path.dentry->d_inode);
-       if (will_truncate) {
-               error = mnt_want_write(nd->path.mnt);
-               if (error)
-                       goto exit;
-       }
-       error = may_open(&nd->path, acc_mode, open_flag);
-       if (error) {
-               if (will_truncate)
-                       mnt_drop_write(nd->path.mnt);
-               goto exit;
-       }
-       filp = nameidata_to_filp(nd);
-       if (!IS_ERR(filp)) {
-               error = ima_file_check(filp, acc_mode);
-               if (error) {
-                       fput(filp);
-                       filp = ERR_PTR(error);
-               }
-       }
-       if (!IS_ERR(filp)) {
-               if (will_truncate) {
-                       error = handle_truncate(filp);
-                       if (error) {
-                               fput(filp);
-                               filp = ERR_PTR(error);
-                       }
-               }
-       }
-       /*
-        * It is now safe to drop the mnt write
-        * because the filp has had a write taken
-        * on its behalf.
-        */
-       if (will_truncate)
-               mnt_drop_write(nd->path.mnt);
-       path_put(&nd->path);
-       return filp;
-
-exit:
-       path_put(&nd->path);
-       return ERR_PTR(error);
-}
-
 /*
- * Handle O_CREAT case for do_filp_open
+ * Handle the last step of open()
  */
 static struct file *do_last(struct nameidata *nd, struct path *path,
-                           int open_flag, int acc_mode,
-                           int mode, const char *pathname)
+                           const struct open_flags *op, const char *pathname)
 {
        struct dentry *dir = nd->path.dentry;
+       struct dentry *dentry;
+       int open_flag = op->open_flag;
+       int will_truncate = open_flag & O_TRUNC;
+       int want_write = 0;
+       int acc_mode = op->acc_mode;
        struct file *filp;
-       int error = -EISDIR;
+       int error;
+
+       nd->flags &= ~LOOKUP_PARENT;
+       nd->flags |= op->intent;
 
        switch (nd->last_type) {
        case LAST_DOTDOT:
-               follow_dotdot(nd);
-               dir = nd->path.dentry;
        case LAST_DOT:
-               if (need_reval_dot(dir)) {
-                       int status = d_revalidate(nd->path.dentry, nd);
-                       if (!status)
-                               status = -ESTALE;
-                       if (status < 0) {
-                               error = status;
-                               goto exit;
-                       }
-               }
+               error = handle_dots(nd, nd->last_type);
+               if (error)
+                       return ERR_PTR(error);
                /* fallthrough */
        case LAST_ROOT:
-               goto exit;
+               if (nd->flags & LOOKUP_RCU) {
+                       if (nameidata_drop_rcu_last(nd))
+                               return ERR_PTR(-ECHILD);
+               }
+               error = handle_reval_path(nd);
+               if (error)
+                       goto exit;
+               audit_inode(pathname, nd->path.dentry);
+               if (open_flag & O_CREAT) {
+                       error = -EISDIR;
+                       goto exit;
+               }
+               goto ok;
        case LAST_BIND:
+               /* can't be RCU mode here */
+               error = handle_reval_path(nd);
+               if (error)
+                       goto exit;
                audit_inode(pathname, dir);
                goto ok;
        }
 
+       if (!(open_flag & O_CREAT)) {
+               int symlink_ok = 0;
+               if (nd->last.name[nd->last.len])
+                       nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+               if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
+                       symlink_ok = 1;
+               /* we _can_ be in RCU mode here */
+               error = walk_component(nd, path, &nd->last, LAST_NORM,
+                                       !symlink_ok);
+               if (error < 0)
+                       return ERR_PTR(error);
+               if (error) /* symlink */
+                       return NULL;
+               /* sayonara */
+               if (nd->flags & LOOKUP_RCU) {
+                       if (nameidata_drop_rcu_last(nd))
+                               return ERR_PTR(-ECHILD);
+               }
+
+               error = -ENOTDIR;
+               if (nd->flags & LOOKUP_DIRECTORY) {
+                       if (!nd->inode->i_op->lookup)
+                               goto exit;
+               }
+               audit_inode(pathname, nd->path.dentry);
+               goto ok;
+       }
+
+       /* create side of things */
+
+       if (nd->flags & LOOKUP_RCU) {
+               if (nameidata_drop_rcu_last(nd))
+                       return ERR_PTR(-ECHILD);
+       }
+
+       audit_inode(pathname, dir);
+       error = -EISDIR;
        /* trailing slashes? */
        if (nd->last.name[nd->last.len])
                goto exit;
 
        mutex_lock(&dir->d_inode->i_mutex);
 
-       path->dentry = lookup_hash(nd);
-       path->mnt = nd->path.mnt;
-
-       error = PTR_ERR(path->dentry);
-       if (IS_ERR(path->dentry)) {
+       dentry = lookup_hash(nd);
+       error = PTR_ERR(dentry);
+       if (IS_ERR(dentry)) {
                mutex_unlock(&dir->d_inode->i_mutex);
                goto exit;
        }
 
-       if (IS_ERR(nd->intent.open.file)) {
-               error = PTR_ERR(nd->intent.open.file);
-               goto exit_mutex_unlock;
-       }
+       path->dentry = dentry;
+       path->mnt = nd->path.mnt;
 
        /* Negative dentry, just create the file */
-       if (!path->dentry->d_inode) {
+       if (!dentry->d_inode) {
+               int mode = op->mode;
+               if (!IS_POSIXACL(dir->d_inode))
+                       mode &= ~current_umask();
                /*
                 * This write is needed to ensure that a
-                * ro->rw transition does not occur between
+                * rw->ro transition does not occur between
                 * the time when the file is created and when
                 * a permanent write count is taken through
                 * the 'struct file' in nameidata_to_filp().
@@ -2329,22 +2199,21 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
                error = mnt_want_write(nd->path.mnt);
                if (error)
                        goto exit_mutex_unlock;
-               error = __open_namei_create(nd, path, open_flag, mode);
-               if (error) {
-                       mnt_drop_write(nd->path.mnt);
-                       goto exit;
-               }
-               filp = nameidata_to_filp(nd);
-               mnt_drop_write(nd->path.mnt);
-               path_put(&nd->path);
-               if (!IS_ERR(filp)) {
-                       error = ima_file_check(filp, acc_mode);
-                       if (error) {
-                               fput(filp);
-                               filp = ERR_PTR(error);
-                       }
-               }
-               return filp;
+               want_write = 1;
+               /* Don't check for write permission, don't truncate */
+               open_flag &= ~O_TRUNC;
+               will_truncate = 0;
+               acc_mode = MAY_OPEN;
+               error = security_path_mknod(&nd->path, dentry, mode, 0);
+               if (error)
+                       goto exit_mutex_unlock;
+               error = vfs_create(dir->d_inode, dentry, mode, nd);
+               if (error)
+                       goto exit_mutex_unlock;
+               mutex_unlock(&dir->d_inode->i_mutex);
+               dput(nd->path.dentry);
+               nd->path.dentry = dentry;
+               goto common;
        }
 
        /*
@@ -2374,7 +2243,40 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        if (S_ISDIR(nd->inode->i_mode))
                goto exit;
 ok:
-       filp = finish_open(nd, open_flag, acc_mode);
+       if (!S_ISREG(nd->inode->i_mode))
+               will_truncate = 0;
+
+       if (will_truncate) {
+               error = mnt_want_write(nd->path.mnt);
+               if (error)
+                       goto exit;
+               want_write = 1;
+       }
+common:
+       error = may_open(&nd->path, acc_mode, open_flag);
+       if (error)
+               goto exit;
+       filp = nameidata_to_filp(nd);
+       if (!IS_ERR(filp)) {
+               error = ima_file_check(filp, op->acc_mode);
+               if (error) {
+                       fput(filp);
+                       filp = ERR_PTR(error);
+               }
+       }
+       if (!IS_ERR(filp)) {
+               if (will_truncate) {
+                       error = handle_truncate(filp);
+                       if (error) {
+                               fput(filp);
+                               filp = ERR_PTR(error);
+                       }
+               }
+       }
+out:
+       if (want_write)
+               mnt_drop_write(nd->path.mnt);
+       path_put(&nd->path);
        return filp;
 
 exit_mutex_unlock:
@@ -2382,197 +2284,103 @@ exit_mutex_unlock:
 exit_dput:
        path_put_conditional(path, nd);
 exit:
-       path_put(&nd->path);
-       return ERR_PTR(error);
+       filp = ERR_PTR(error);
+       goto out;
 }
 
-/*
- * Note that the low bits of the passed in "open_flag"
- * are not the same as in the local variable "flag". See
- * open_to_namei_flags() for more details.
- */
-struct file *do_filp_open(int dfd, const char *pathname,
-               int open_flag, int mode, int acc_mode)
+static struct file *path_openat(int dfd, const char *pathname,
+               struct nameidata *nd, const struct open_flags *op, int flags)
 {
+       struct file *base = NULL;
        struct file *filp;
-       struct nameidata nd;
-       int error;
        struct path path;
-       int count = 0;
-       int flag = open_to_namei_flags(open_flag);
-       int flags;
-
-       if (!(open_flag & O_CREAT))
-               mode = 0;
-
-       /* Must never be set by userspace */
-       open_flag &= ~FMODE_NONOTIFY;
-
-       /*
-        * O_SYNC is implemented as __O_SYNC|O_DSYNC.  As many places only
-        * check for O_DSYNC if the need any syncing at all we enforce it's
-        * always set instead of having to deal with possibly weird behaviour
-        * for malicious applications setting only __O_SYNC.
-        */
-       if (open_flag & __O_SYNC)
-               open_flag |= O_DSYNC;
-
-       if (!acc_mode)
-               acc_mode = MAY_OPEN | ACC_MODE(open_flag);
-
-       /* O_TRUNC implies we need access checks for write permissions */
-       if (open_flag & O_TRUNC)
-               acc_mode |= MAY_WRITE;
-
-       /* Allow the LSM permission hook to distinguish append 
-          access from general write access. */
-       if (open_flag & O_APPEND)
-               acc_mode |= MAY_APPEND;
-
-       flags = LOOKUP_OPEN;
-       if (open_flag & O_CREAT) {
-               flags |= LOOKUP_CREATE;
-               if (open_flag & O_EXCL)
-                       flags |= LOOKUP_EXCL;
-       }
-       if (open_flag & O_DIRECTORY)
-               flags |= LOOKUP_DIRECTORY;
-       if (!(open_flag & O_NOFOLLOW))
-               flags |= LOOKUP_FOLLOW;
+       int error;
 
        filp = get_empty_filp();
        if (!filp)
                return ERR_PTR(-ENFILE);
 
-       filp->f_flags = open_flag;
-       nd.intent.open.file = filp;
-       nd.intent.open.flags = flag;
-       nd.intent.open.create_mode = mode;
+       filp->f_flags = op->open_flag;
+       nd->intent.open.file = filp;
+       nd->intent.open.flags = open_to_namei_flags(op->open_flag);
+       nd->intent.open.create_mode = op->mode;
 
-       if (open_flag & O_CREAT)
-               goto creat;
-
-       /* !O_CREAT, simple open */
-       error = do_path_lookup(dfd, pathname, flags, &nd);
+       error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base);
        if (unlikely(error))
                goto out_filp;
-       error = -ELOOP;
-       if (!(nd.flags & LOOKUP_FOLLOW)) {
-               if (nd.inode->i_op->follow_link)
-                       goto out_path;
-       }
-       error = -ENOTDIR;
-       if (nd.flags & LOOKUP_DIRECTORY) {
-               if (!nd.inode->i_op->lookup)
-                       goto out_path;
-       }
-       audit_inode(pathname, nd.path.dentry);
-       filp = finish_open(&nd, open_flag, acc_mode);
-       release_open_intent(&nd);
-       return filp;
-
-creat:
-       /* OK, have to create the file. Find the parent. */
-       error = path_init_rcu(dfd, pathname,
-                       LOOKUP_PARENT | (flags & LOOKUP_REVAL), &nd);
-       if (error)
-               goto out_filp;
-       error = path_walk_rcu(pathname, &nd);
-       path_finish_rcu(&nd);
-       if (unlikely(error == -ECHILD || error == -ESTALE)) {
-               /* slower, locked walk */
-               if (error == -ESTALE) {
-reval:
-                       flags |= LOOKUP_REVAL;
-               }
-               error = path_init(dfd, pathname,
-                               LOOKUP_PARENT | (flags & LOOKUP_REVAL), &nd);
-               if (error)
-                       goto out_filp;
 
-               error = path_walk_simple(pathname, &nd);
-       }
+       current->total_link_count = 0;
+       error = link_path_walk(pathname, nd);
        if (unlikely(error))
                goto out_filp;
-       if (unlikely(!audit_dummy_context()))
-               audit_inode(pathname, nd.path.dentry);
 
-       /*
-        * We have the parent and last component.
-        */
-       nd.flags = flags;
-       filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
+       filp = do_last(nd, &path, op, pathname);
        while (unlikely(!filp)) { /* trailing symlink */
                struct path link = path;
-               struct inode *linki = link.dentry->d_inode;
                void *cookie;
-               error = -ELOOP;
-               if (!(nd.flags & LOOKUP_FOLLOW))
-                       goto exit_dput;
-               if (count++ == 32)
-                       goto exit_dput;
-               /*
-                * This is subtle. Instead of calling do_follow_link() we do
-                * the thing by hands. The reason is that this way we have zero
-                * link_count and path_walk() (called from ->follow_link)
-                * honoring LOOKUP_PARENT.  After that we have the parent and
-                * last component, i.e. we are in the same situation as after
-                * the first path_walk().  Well, almost - if the last component
-                * is normal we get its copy stored in nd->last.name and we will
-                * have to putname() it when we are done. Procfs-like symlinks
-                * just set LAST_BIND.
-                */
-               nd.flags |= LOOKUP_PARENT;
-               error = security_inode_follow_link(link.dentry, &nd);
-               if (error)
-                       goto exit_dput;
-               error = __do_follow_link(&link, &nd, &cookie);
-               if (unlikely(error)) {
-                       if (!IS_ERR(cookie) && linki->i_op->put_link)
-                               linki->i_op->put_link(link.dentry, &nd, cookie);
-                       /* nd.path had been dropped */
-                       nd.path = link;
-                       goto out_path;
+               if (!(nd->flags & LOOKUP_FOLLOW)) {
+                       path_put_conditional(&path, nd);
+                       path_put(&nd->path);
+                       filp = ERR_PTR(-ELOOP);
+                       break;
                }
-               nd.flags &= ~LOOKUP_PARENT;
-               filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
-               if (linki->i_op->put_link)
-                       linki->i_op->put_link(link.dentry, &nd, cookie);
-               path_put(&link);
+               nd->flags |= LOOKUP_PARENT;
+               nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
+               error = follow_link(&link, nd, &cookie);
+               if (unlikely(error))
+                       filp = ERR_PTR(error);
+               else
+                       filp = do_last(nd, &path, op, pathname);
+               put_link(nd, &link, cookie);
        }
 out:
-       if (nd.root.mnt)
-               path_put(&nd.root);
-       if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL))
-               goto reval;
-       release_open_intent(&nd);
+       if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
+               path_put(&nd->root);
+       if (base)
+               fput(base);
+       release_open_intent(nd);
        return filp;
 
-exit_dput:
-       path_put_conditional(&path, &nd);
-out_path:
-       path_put(&nd.path);
 out_filp:
        filp = ERR_PTR(error);
        goto out;
 }
 
-/**
- * filp_open - open file and return file pointer
- *
- * @filename:  path to open
- * @flags:     open flags as per the open(2) second argument
- * @mode:      mode for the new file if O_CREAT is set, else ignored
- *
- * This is the helper to open a file from kernelspace if you really
- * have to.  But in generally you should not do this, so please move
- * along, nothing to see here..
- */
-struct file *filp_open(const char *filename, int flags, int mode)
+struct file *do_filp_open(int dfd, const char *pathname,
+               const struct open_flags *op, int flags)
 {
-       return do_filp_open(AT_FDCWD, filename, flags, mode, 0);
+       struct nameidata nd;
+       struct file *filp;
+
+       filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU);
+       if (unlikely(filp == ERR_PTR(-ECHILD)))
+               filp = path_openat(dfd, pathname, &nd, op, flags);
+       if (unlikely(filp == ERR_PTR(-ESTALE)))
+               filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL);
+       return filp;
+}
+
+struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
+               const char *name, const struct open_flags *op, int flags)
+{
+       struct nameidata nd;
+       struct file *file;
+
+       nd.root.mnt = mnt;
+       nd.root.dentry = dentry;
+
+       flags |= LOOKUP_ROOT;
+
+       if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN)
+               return ERR_PTR(-ELOOP);
+
+       file = path_openat(-1, name, &nd, op, flags | LOOKUP_RCU);
+       if (unlikely(file == ERR_PTR(-ECHILD)))
+               file = path_openat(-1, name, &nd, op, flags);
+       if (unlikely(file == ERR_PTR(-ESTALE)))
+               file = path_openat(-1, name, &nd, op, flags | LOOKUP_REVAL);
+       return file;
 }
-EXPORT_SYMBOL(filp_open);
 
 /**
  * lookup_create - lookup a dentry, creating it if it doesn't exist
@@ -3111,7 +2919,11 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
                return error;
 
        mutex_lock(&inode->i_mutex);
-       error = dir->i_op->link(old_dentry, dir, new_dentry);
+       /* Make sure we don't allow creating hardlink to an unlinked file */
+       if (inode->i_nlink == 0)
+               error =  -ENOENT;
+       else
+               error = dir->i_op->link(old_dentry, dir, new_dentry);
        mutex_unlock(&inode->i_mutex);
        if (!error)
                fsnotify_link(dir, inode, new_dentry);
@@ -3133,15 +2945,27 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
        struct dentry *new_dentry;
        struct nameidata nd;
        struct path old_path;
+       int how = 0;
        int error;
        char *to;
 
-       if ((flags & ~AT_SYMLINK_FOLLOW) != 0)
+       if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
                return -EINVAL;
+       /*
+        * To use null names we require CAP_DAC_READ_SEARCH
+        * This ensures that not everyone will be able to create
+        * handlink using the passed filedescriptor.
+        */
+       if (flags & AT_EMPTY_PATH) {
+               if (!capable(CAP_DAC_READ_SEARCH))
+                       return -ENOENT;
+               how = LOOKUP_EMPTY;
+       }
+
+       if (flags & AT_SYMLINK_FOLLOW)
+               how |= LOOKUP_FOLLOW;
 
-       error = user_path_at(olddfd, oldname,
-                            flags & AT_SYMLINK_FOLLOW ? LOOKUP_FOLLOW : 0,
-                            &old_path);
+       error = user_path_at(olddfd, oldname, how, &old_path);
        if (error)
                return error;
 
@@ -3578,7 +3402,7 @@ EXPORT_SYMBOL(page_readlink);
 EXPORT_SYMBOL(__page_symlink);
 EXPORT_SYMBOL(page_symlink);
 EXPORT_SYMBOL(page_symlink_inode_operations);
-EXPORT_SYMBOL(path_lookup);
+EXPORT_SYMBOL(kern_path_parent);
 EXPORT_SYMBOL(kern_path);
 EXPORT_SYMBOL(vfs_path_lookup);
 EXPORT_SYMBOL(inode_permission);
index 7b0b95371696117494af637af71c24d6752a3939..dffe6f49ab93da1a30c767c5a8345bdf754f0102 100644 (file)
@@ -1002,6 +1002,18 @@ const struct seq_operations mounts_op = {
        .show   = show_vfsmnt
 };
 
+static int uuid_is_nil(u8 *uuid)
+{
+       int i;
+       u8  *cp = (u8 *)uuid;
+
+       for (i = 0; i < 16; i++) {
+               if (*cp++)
+                       return 0;
+       }
+       return 1;
+}
+
 static int show_mountinfo(struct seq_file *m, void *v)
 {
        struct proc_mounts *p = m->private;
@@ -1040,6 +1052,10 @@ static int show_mountinfo(struct seq_file *m, void *v)
        if (IS_MNT_UNBINDABLE(mnt))
                seq_puts(m, " unbindable");
 
+       if (!uuid_is_nil(mnt->mnt_sb->s_uuid))
+               /* print the uuid */
+               seq_printf(m, " uuid:%pU", mnt->mnt_sb->s_uuid);
+
        /* Filesystem specific data */
        seq_puts(m, " - ");
        show_type(m, sb);
@@ -1244,7 +1260,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
                 */
                br_write_lock(vfsmount_lock);
                if (mnt_get_count(mnt) != 2) {
-                       br_write_lock(vfsmount_lock);
+                       br_write_unlock(vfsmount_lock);
                        return -EBUSY;
                }
                br_write_unlock(vfsmount_lock);
index 1cc600e77bb43aa14ba3d1547d5780c2a0e7c0a5..2f8e61816d75dd5a1620c510ea09111dfe20b326 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/inet.h>
 #include <linux/nfs_xdr.h>
 #include <linux/slab.h>
+#include <linux/compat.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -89,7 +90,11 @@ int nfs_wait_bit_killable(void *word)
  */
 u64 nfs_compat_user_ino64(u64 fileid)
 {
-       int ino;
+#ifdef CONFIG_COMPAT
+       compat_ulong_t ino;
+#else  
+       unsigned long ino;
+#endif
 
        if (enable_ino64)
                return fileid;
index 7a74740731488ed0109ec7850afc2d93bc2060fb..1be36cf65bfc2968d567ae5b7f26b21ae60e951e 100644 (file)
@@ -298,6 +298,11 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
 #if defined(CONFIG_NFS_V4_1)
 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
+extern void nfs4_schedule_session_recovery(struct nfs4_session *);
+#else
+static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
+{
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
@@ -307,10 +312,9 @@ extern void nfs4_put_open_state(struct nfs4_state *);
 extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t);
 extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t);
 extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
-extern void nfs4_schedule_state_recovery(struct nfs_client *);
+extern void nfs4_schedule_lease_recovery(struct nfs_client *);
 extern void nfs4_schedule_state_manager(struct nfs_client *);
-extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
-extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
+extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
 extern void nfs41_handle_recall_slot(struct nfs_client *clp);
 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
index f5c9b125e8ccee997a456bea20e87ae4dade07dd..b73c34375f604b2565d64e84756d102bd76a884d 100644 (file)
@@ -219,6 +219,10 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
                goto out_err;
        }
        buf = kmalloc(rlen + 1, GFP_KERNEL);
+       if (!buf) {
+               dprintk("%s: Not enough memory\n", __func__);
+               goto out_err;
+       }
        buf[rlen] = '\0';
        memcpy(buf, r_addr, rlen);
 
index 78936a8f40ab43583dc5bdda2c06e433e294404b..0a07e353a9613f49d32508aa83d029c48f1055a6 100644 (file)
@@ -256,12 +256,13 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
                case -NFS4ERR_OPENMODE:
                        if (state == NULL)
                                break;
-                       nfs4_state_mark_reclaim_nograce(clp, state);
-                       goto do_state_recovery;
+                       nfs4_schedule_stateid_recovery(server, state);
+                       goto wait_on_recovery;
                case -NFS4ERR_STALE_STATEID:
                case -NFS4ERR_STALE_CLIENTID:
                case -NFS4ERR_EXPIRED:
-                       goto do_state_recovery;
+                       nfs4_schedule_lease_recovery(clp);
+                       goto wait_on_recovery;
 #if defined(CONFIG_NFS_V4_1)
                case -NFS4ERR_BADSESSION:
                case -NFS4ERR_BADSLOT:
@@ -272,7 +273,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
                case -NFS4ERR_SEQ_MISORDERED:
                        dprintk("%s ERROR: %d Reset session\n", __func__,
                                errorcode);
-                       nfs4_schedule_state_recovery(clp);
+                       nfs4_schedule_session_recovery(clp->cl_session);
                        exception->retry = 1;
                        break;
 #endif /* defined(CONFIG_NFS_V4_1) */
@@ -295,8 +296,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
        }
        /* We failed to handle the error */
        return nfs4_map_errors(ret);
-do_state_recovery:
-       nfs4_schedule_state_recovery(clp);
+wait_on_recovery:
        ret = nfs4_wait_clnt_recover(clp);
        if (ret == 0)
                exception->retry = 1;
@@ -435,8 +435,8 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *
                clp = res->sr_session->clp;
                do_renew_lease(clp, timestamp);
                /* Check sequence flags */
-               if (atomic_read(&clp->cl_count) > 1)
-                       nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
+               if (res->sr_status_flags != 0)
+                       nfs4_schedule_lease_recovery(clp);
                break;
        case -NFS4ERR_DELAY:
                /* The server detected a resend of the RPC call and
@@ -1255,14 +1255,13 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
                        case -NFS4ERR_BAD_HIGH_SLOT:
                        case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
                        case -NFS4ERR_DEADSESSION:
-                               nfs4_schedule_state_recovery(
-                                       server->nfs_client);
+                               nfs4_schedule_session_recovery(server->nfs_client->cl_session);
                                goto out;
                        case -NFS4ERR_STALE_CLIENTID:
                        case -NFS4ERR_STALE_STATEID:
                        case -NFS4ERR_EXPIRED:
                                /* Don't recall a delegation if it was lost */
-                               nfs4_schedule_state_recovery(server->nfs_client);
+                               nfs4_schedule_lease_recovery(server->nfs_client);
                                goto out;
                        case -ERESTARTSYS:
                                /*
@@ -1271,7 +1270,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
                                 */
                        case -NFS4ERR_ADMIN_REVOKED:
                        case -NFS4ERR_BAD_STATEID:
-                               nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+                               nfs4_schedule_stateid_recovery(server, state);
                        case -EKEYEXPIRED:
                                /*
                                 * User RPCSEC_GSS context has expired.
@@ -1587,7 +1586,7 @@ static int nfs4_recover_expired_lease(struct nfs_server *server)
                if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
                    !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
                        break;
-               nfs4_schedule_state_recovery(clp);
+               nfs4_schedule_state_manager(clp);
                ret = -EIO;
        }
        return ret;
@@ -3178,7 +3177,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
        if (task->tk_status < 0) {
                /* Unless we're shutting down, schedule state recovery! */
                if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
-                       nfs4_schedule_state_recovery(clp);
+                       nfs4_schedule_lease_recovery(clp);
                return;
        }
        do_renew_lease(clp, timestamp);
@@ -3252,6 +3251,35 @@ static void buf_to_pages(const void *buf, size_t buflen,
        }
 }
 
+static int buf_to_pages_noslab(const void *buf, size_t buflen,
+               struct page **pages, unsigned int *pgbase)
+{
+       struct page *newpage, **spages;
+       int rc = 0;
+       size_t len;
+       spages = pages;
+
+       do {
+               len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
+               newpage = alloc_page(GFP_KERNEL);
+
+               if (newpage == NULL)
+                       goto unwind;
+               memcpy(page_address(newpage), buf, len);
+                buf += len;
+                buflen -= len;
+               *pages++ = newpage;
+               rc++;
+       } while (buflen != 0);
+
+       return rc;
+
+unwind:
+       for(; rc > 0; rc--)
+               __free_page(spages[rc-1]);
+       return -ENOMEM;
+}
+
 struct nfs4_cached_acl {
        int cached;
        size_t len;
@@ -3420,13 +3448,23 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
                .rpc_argp       = &arg,
                .rpc_resp       = &res,
        };
-       int ret;
+       int ret, i;
 
        if (!nfs4_server_supports_acls(server))
                return -EOPNOTSUPP;
+       i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
+       if (i < 0)
+               return i;
        nfs_inode_return_delegation(inode);
-       buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
        ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
+
+       /*
+        * Free each page after tx, so the only ref left is
+        * held by the network stack
+        */
+       for (; i > 0; i--)
+               put_page(pages[i-1]);
+
        /*
         * Acl update can result in inode attribute update.
         * so mark the attribute cache invalid.
@@ -3464,12 +3502,13 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
                case -NFS4ERR_OPENMODE:
                        if (state == NULL)
                                break;
-                       nfs4_state_mark_reclaim_nograce(clp, state);
-                       goto do_state_recovery;
+                       nfs4_schedule_stateid_recovery(server, state);
+                       goto wait_on_recovery;
                case -NFS4ERR_STALE_STATEID:
                case -NFS4ERR_STALE_CLIENTID:
                case -NFS4ERR_EXPIRED:
-                       goto do_state_recovery;
+                       nfs4_schedule_lease_recovery(clp);
+                       goto wait_on_recovery;
 #if defined(CONFIG_NFS_V4_1)
                case -NFS4ERR_BADSESSION:
                case -NFS4ERR_BADSLOT:
@@ -3480,7 +3519,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
                case -NFS4ERR_SEQ_MISORDERED:
                        dprintk("%s ERROR %d, Reset session\n", __func__,
                                task->tk_status);
-                       nfs4_schedule_state_recovery(clp);
+                       nfs4_schedule_session_recovery(clp->cl_session);
                        task->tk_status = 0;
                        return -EAGAIN;
 #endif /* CONFIG_NFS_V4_1 */
@@ -3497,9 +3536,8 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
        }
        task->tk_status = nfs4_map_errors(task->tk_status);
        return 0;
-do_state_recovery:
+wait_on_recovery:
        rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
-       nfs4_schedule_state_recovery(clp);
        if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
                rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
        task->tk_status = 0;
@@ -4110,7 +4148,7 @@ static void nfs4_lock_release(void *calldata)
                task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
                                data->arg.lock_seqid);
                if (!IS_ERR(task))
-                       rpc_put_task(task);
+                       rpc_put_task_async(task);
                dprintk("%s: cancelling lock!\n", __func__);
        } else
                nfs_free_seqid(data->arg.lock_seqid);
@@ -4134,23 +4172,18 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = {
 
 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
 {
-       struct nfs_client *clp = server->nfs_client;
-       struct nfs4_state *state = lsp->ls_state;
-
        switch (error) {
        case -NFS4ERR_ADMIN_REVOKED:
        case -NFS4ERR_BAD_STATEID:
-       case -NFS4ERR_EXPIRED:
+               lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
                if (new_lock_owner != 0 ||
                   (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
-                       nfs4_state_mark_reclaim_nograce(clp, state);
-               lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+                       nfs4_schedule_stateid_recovery(server, lsp->ls_state);
                break;
        case -NFS4ERR_STALE_STATEID:
-               if (new_lock_owner != 0 ||
-                   (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
-                       nfs4_state_mark_reclaim_reboot(clp, state);
                lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+       case -NFS4ERR_EXPIRED:
+               nfs4_schedule_lease_recovery(server->nfs_client);
        };
 }
 
@@ -4366,12 +4399,14 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
                        case -NFS4ERR_EXPIRED:
                        case -NFS4ERR_STALE_CLIENTID:
                        case -NFS4ERR_STALE_STATEID:
+                               nfs4_schedule_lease_recovery(server->nfs_client);
+                               goto out;
                        case -NFS4ERR_BADSESSION:
                        case -NFS4ERR_BADSLOT:
                        case -NFS4ERR_BAD_HIGH_SLOT:
                        case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
                        case -NFS4ERR_DEADSESSION:
-                               nfs4_schedule_state_recovery(server->nfs_client);
+                               nfs4_schedule_session_recovery(server->nfs_client->cl_session);
                                goto out;
                        case -ERESTARTSYS:
                                /*
@@ -4381,7 +4416,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
                        case -NFS4ERR_ADMIN_REVOKED:
                        case -NFS4ERR_BAD_STATEID:
                        case -NFS4ERR_OPENMODE:
-                               nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+                               nfs4_schedule_stateid_recovery(server, state);
                                err = 0;
                                goto out;
                        case -EKEYEXPIRED:
@@ -4988,10 +5023,20 @@ int nfs4_proc_create_session(struct nfs_client *clp)
        int status;
        unsigned *ptr;
        struct nfs4_session *session = clp->cl_session;
+       long timeout = 0;
+       int err;
 
        dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
 
-       status = _nfs4_proc_create_session(clp);
+       do {
+               status = _nfs4_proc_create_session(clp);
+               if (status == -NFS4ERR_DELAY) {
+                       err = nfs4_delay(clp->cl_rpcclient, &timeout);
+                       if (err)
+                               status = err;
+               }
+       } while (status == -NFS4ERR_DELAY);
+
        if (status)
                goto out;
 
@@ -5100,7 +5145,7 @@ static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client
                rpc_delay(task, NFS4_POLL_RETRY_MAX);
                return -EAGAIN;
        default:
-               nfs4_schedule_state_recovery(clp);
+               nfs4_schedule_lease_recovery(clp);
        }
        return 0;
 }
@@ -5187,7 +5232,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
        if (IS_ERR(task))
                ret = PTR_ERR(task);
        else
-               rpc_put_task(task);
+               rpc_put_task_async(task);
        dprintk("<-- %s status=%d\n", __func__, ret);
        return ret;
 }
@@ -5203,8 +5248,13 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
                goto out;
        }
        ret = rpc_wait_for_completion_task(task);
-       if (!ret)
+       if (!ret) {
+               struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
+
+               if (task->tk_status == 0)
+                       nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
                ret = task->tk_status;
+       }
        rpc_put_task(task);
 out:
        dprintk("<-- %s status=%d\n", __func__, ret);
@@ -5241,7 +5291,7 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
                rpc_delay(task, NFS4_POLL_RETRY_MAX);
                return -EAGAIN;
        default:
-               nfs4_schedule_state_recovery(clp);
+               nfs4_schedule_lease_recovery(clp);
        }
        return 0;
 }
@@ -5309,6 +5359,9 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
                status = PTR_ERR(task);
                goto out;
        }
+       status = nfs4_wait_for_completion_rpc_task(task);
+       if (status == 0)
+               status = task->tk_status;
        rpc_put_task(task);
        return 0;
 out:
index e6742b57a04c725aeebd07ab6a2ad0e27bc95a9e..0592288f9f06744216fc497e85717cb637951f2c 100644 (file)
@@ -1007,9 +1007,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
 }
 
 /*
- * Schedule a state recovery attempt
+ * Schedule a lease recovery attempt
  */
-void nfs4_schedule_state_recovery(struct nfs_client *clp)
+void nfs4_schedule_lease_recovery(struct nfs_client *clp)
 {
        if (!clp)
                return;
@@ -1018,7 +1018,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp)
        nfs4_schedule_state_manager(clp);
 }
 
-int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
+static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
 {
 
        set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
@@ -1032,7 +1032,7 @@ int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *st
        return 1;
 }
 
-int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
+static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
 {
        set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
        clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
@@ -1041,6 +1041,14 @@ int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *s
        return 1;
 }
 
+void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
+{
+       struct nfs_client *clp = server->nfs_client;
+
+       nfs4_state_mark_reclaim_nograce(clp, state);
+       nfs4_schedule_state_manager(clp);
+}
+
 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
 {
        struct inode *inode = state->inode;
@@ -1436,10 +1444,15 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
 }
 
 #ifdef CONFIG_NFS_V4_1
+void nfs4_schedule_session_recovery(struct nfs4_session *session)
+{
+       nfs4_schedule_lease_recovery(session->clp);
+}
+
 void nfs41_handle_recall_slot(struct nfs_client *clp)
 {
        set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
-       nfs4_schedule_state_recovery(clp);
+       nfs4_schedule_state_manager(clp);
 }
 
 static void nfs4_reset_all_state(struct nfs_client *clp)
@@ -1447,7 +1460,7 @@ static void nfs4_reset_all_state(struct nfs_client *clp)
        if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
                clp->cl_boot_time = CURRENT_TIME;
                nfs4_state_start_reclaim_nograce(clp);
-               nfs4_schedule_state_recovery(clp);
+               nfs4_schedule_state_manager(clp);
        }
 }
 
@@ -1455,7 +1468,7 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
 {
        if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
                nfs4_state_start_reclaim_reboot(clp);
-               nfs4_schedule_state_recovery(clp);
+               nfs4_schedule_state_manager(clp);
        }
 }
 
@@ -1475,7 +1488,7 @@ static void nfs41_handle_cb_path_down(struct nfs_client *clp)
 {
        nfs_expire_all_delegations(clp);
        if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
-               nfs4_schedule_state_recovery(clp);
+               nfs4_schedule_state_manager(clp);
 }
 
 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
index 4e2c168b6ee96701e9e7d5fd9ffca742d3a115ed..94d50e86a12408faecd30033ea8a035d7659d37e 100644 (file)
@@ -1660,7 +1660,7 @@ static void encode_create_session(struct xdr_stream *xdr,
 
        p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12);
        *p++ = cpu_to_be32(OP_CREATE_SESSION);
-       p = xdr_encode_hyper(p, clp->cl_ex_clid);
+       p = xdr_encode_hyper(p, clp->cl_clientid);
        *p++ = cpu_to_be32(clp->cl_seqid);                      /*Sequence id */
        *p++ = cpu_to_be32(args->flags);                        /*flags */
 
@@ -4694,7 +4694,7 @@ static int decode_exchange_id(struct xdr_stream *xdr,
        p = xdr_inline_decode(xdr, 8);
        if (unlikely(!p))
                goto out_overflow;
-       xdr_decode_hyper(p, &clp->cl_ex_clid);
+       xdr_decode_hyper(p, &clp->cl_clientid);
        p = xdr_inline_decode(xdr, 12);
        if (unlikely(!p))
                goto out_overflow;
index 903908a20023bf83b08d7fc6c808cbd4f20e0aa2..c541093a5bf2cf058521a1bddd82c404befb5f38 100644 (file)
 /* Default path we try to mount. "%s" gets replaced by our IP address */
 #define NFS_ROOT               "/tftpboot/%s"
 
+/* Default NFSROOT mount options. */
+#define NFS_DEF_OPTIONS                "udp"
+
 /* Parameters passed from the kernel command line */
 static char nfs_root_parms[256] __initdata = "";
 
 /* Text-based mount options passed to super.c */
-static char nfs_root_options[256] __initdata = "";
+static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS;
 
 /* Address of NFS server */
 static __be32 servaddr __initdata = htonl(INADDR_NONE);
@@ -160,8 +163,14 @@ static int __init root_nfs_copy(char *dest, const char *src,
 }
 
 static int __init root_nfs_cat(char *dest, const char *src,
-                                 const size_t destlen)
+                              const size_t destlen)
 {
+       size_t len = strlen(dest);
+
+       if (len && dest[len - 1] != ',')
+               if (strlcat(dest, ",", destlen) > destlen)
+                       return -1;
+
        if (strlcat(dest, src, destlen) > destlen)
                return -1;
        return 0;
@@ -194,16 +203,6 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath,
                if (root_nfs_cat(nfs_root_options, incoming,
                                                sizeof(nfs_root_options)))
                        return -1;
-
-       /*
-        * Possibly prepare for more options to be appended
-        */
-       if (nfs_root_options[0] != '\0' &&
-           nfs_root_options[strlen(nfs_root_options)] != ',')
-               if (root_nfs_cat(nfs_root_options, ",",
-                                               sizeof(nfs_root_options)))
-                       return -1;
-
        return 0;
 }
 
@@ -217,7 +216,7 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath,
  */
 static int __init root_nfs_data(char *cmdline)
 {
-       char addr_option[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1];
+       char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1];
        int len, retval = -1;
        char *tmp = NULL;
        const size_t tmplen = sizeof(nfs_export_path);
@@ -244,9 +243,9 @@ static int __init root_nfs_data(char *cmdline)
         * Append mandatory options for nfsroot so they override
         * what has come before
         */
-       snprintf(addr_option, sizeof(addr_option), "nolock,addr=%pI4",
+       snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4",
                        &servaddr);
-       if (root_nfs_cat(nfs_root_options, addr_option,
+       if (root_nfs_cat(nfs_root_options, mand_options,
                                                sizeof(nfs_root_options)))
                goto out_optionstoolong;
 
index e313a51acdd18cd090427365fcf7dd209847be81..6481d537d69dcb0c06f11248dc4298cee557015a 100644 (file)
@@ -180,7 +180,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
        task_setup_data.rpc_client = NFS_CLIENT(dir);
        task = rpc_run_task(&task_setup_data);
        if (!IS_ERR(task))
-               rpc_put_task(task);
+               rpc_put_task_async(task);
        return 1;
 }
 
index c8278f4046cba5a957705c1e43ee675dba388529..42b92d7a9cc4ed7569c6115997a455816f3f09ac 100644 (file)
@@ -1292,6 +1292,8 @@ static int nfs_commit_rpcsetup(struct list_head *head,
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return PTR_ERR(task);
+       if (how & FLUSH_SYNC)
+               rpc_wait_for_completion_task(task);
        rpc_put_task(task);
        return 0;
 }
index bf9cbd242dddbe5f7cda1969fc3eeebbd6e023ad..124e8fcb0dd6ad1fbe55c166ec30019abdd9c410 100644 (file)
 
 static struct file *do_open(char *name, int flags)
 {
-       struct nameidata nd;
        struct vfsmount *mnt;
-       int error;
+       struct file *file;
 
        mnt = do_kern_mount("nfsd", 0, "nfsd", NULL);
        if (IS_ERR(mnt))
                return (struct file *)mnt;
 
-       error = vfs_path_lookup(mnt->mnt_root, mnt, name, 0, &nd);
-       mntput(mnt);    /* drop do_kern_mount reference */
-       if (error)
-               return ERR_PTR(error);
-
-       if (flags == O_RDWR)
-               error = may_open(&nd.path, MAY_READ|MAY_WRITE, flags);
-       else
-               error = may_open(&nd.path, MAY_WRITE, flags);
+       file = file_open_root(mnt->mnt_root, mnt, name, flags);
 
-       if (!error)
-               return dentry_open(nd.path.dentry, nd.path.mnt, flags,
-                                  current_cred());
-
-       path_put(&nd.path);
-       return ERR_PTR(error);
+       mntput(mnt);    /* drop do_kern_mount reference */
+       return file;
 }
 
 static struct {
index cde36cb0f3489f4d0ac29a06310654ec0f177f8f..02eb4edf0ece1ab240c86aaf1c9afc025a805199 100644 (file)
@@ -432,7 +432,7 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr,
         * If the server returns different values for sessionID, slotID or
         * sequence number, the server is looney tunes.
         */
-       p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4);
+       p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
        if (unlikely(p == NULL))
                goto out_overflow;
        memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
index 54b60bfceb8d0c6b8d4ec5100a2aedb6600a1d5d..7b566ec14e1833cac3f7c61d5ab6b5bb169e3abb 100644 (file)
@@ -2445,15 +2445,16 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
 static struct nfs4_delegation *
 find_delegation_file(struct nfs4_file *fp, stateid_t *stid)
 {
-       struct nfs4_delegation *dp = NULL;
+       struct nfs4_delegation *dp;
 
        spin_lock(&recall_lock);
-       list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) {
-               if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid)
-                       break;
-       }
+       list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
+               if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) {
+                       spin_unlock(&recall_lock);
+                       return dp;
+               }
        spin_unlock(&recall_lock);
-       return dp;
+       return NULL;
 }
 
 int share_access_to_flags(u32 share_access)
index 1275b86550701812ccea7c0cc80759eb63fd019b..615f0a9f06008e54ceda4ff4804b295fdf3a8b13 100644 (file)
@@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
 
        u32 dummy;
        char *machine_name;
-       int i;
+       int i, j;
        int nr_secflavs;
 
        READ_BUF(16);
@@ -1215,7 +1215,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
                        READ_BUF(4);
                        READ32(dummy);
                        READ_BUF(dummy * 4);
-                       for (i = 0; i < dummy; ++i)
+                       for (j = 0; j < dummy; ++j)
                                READ32(dummy);
                        break;
                case RPC_AUTH_GSS:
index 388e9e8f5286f737970ccefa36e919556235e4bf..85f7baa15f5dd8fa1eac905b8c8840c73b0294fa 100644 (file)
 #include "btnode.h"
 
 
-void nilfs_btnode_cache_init_once(struct address_space *btnc)
-{
-       nilfs_mapping_init_once(btnc);
-}
-
 static const struct address_space_operations def_btnode_aops = {
        .sync_page              = block_sync_page,
 };
index 79037494f1e0408c42c41e3147204e480e765b4e..1b8ebd888c2844348a128a37e0781c1b82915aea 100644 (file)
@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
        struct buffer_head *newbh;
 };
 
-void nilfs_btnode_cache_init_once(struct address_space *);
 void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
 void nilfs_btnode_cache_clear(struct address_space *);
 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
index 6a0e2a189f60650b3399f4b9d3732ac6037f791f..a0babd2bff6a2e03a924e45110ff1a8698f59fd6 100644 (file)
@@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
        struct backing_dev_info *bdi = inode->i_sb->s_bdi;
 
        INIT_LIST_HEAD(&shadow->frozen_buffers);
-       nilfs_mapping_init_once(&shadow->frozen_data);
+       address_space_init_once(&shadow->frozen_data);
        nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
-       nilfs_mapping_init_once(&shadow->frozen_btnodes);
+       address_space_init_once(&shadow->frozen_btnodes);
        nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
        mi->mi_shadow = shadow;
        return 0;
index 98034271cd02e06194e28701aa63c8086fe0ed7e..161791d26458b3c6669dbb11cdd6741a8ae9facb 100644 (file)
@@ -397,7 +397,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
-               inc_nlink(old_inode);
                nilfs_set_link(new_dir, new_de, new_page, old_inode);
                nilfs_mark_inode_dirty(new_dir);
                new_inode->i_ctime = CURRENT_TIME;
@@ -411,13 +410,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= NILFS_LINK_MAX)
                                goto out_dir;
                }
-               inc_nlink(old_inode);
                err = nilfs_add_link(new_dentry, old_inode);
-               if (err) {
-                       drop_nlink(old_inode);
-                       nilfs_mark_inode_dirty(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de) {
                        inc_nlink(new_dir);
                        nilfs_mark_inode_dirty(new_dir);
@@ -431,7 +426,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        old_inode->i_ctime = CURRENT_TIME;
 
        nilfs_delete_entry(old_de, old_page);
-       drop_nlink(old_inode);
 
        if (dir_de) {
                nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
index 0c432416cfefc608383dc8f7ee58d88140a26f59..a585b35fd6bc201c9d3063005792101da769ab01 100644 (file)
@@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
        return nc;
 }
 
-void nilfs_mapping_init_once(struct address_space *mapping)
-{
-       memset(mapping, 0, sizeof(*mapping));
-       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
-       spin_lock_init(&mapping->tree_lock);
-       INIT_LIST_HEAD(&mapping->private_list);
-       spin_lock_init(&mapping->private_lock);
-
-       spin_lock_init(&mapping->i_mmap_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
-       INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
-}
-
 void nilfs_mapping_init(struct address_space *mapping,
                        struct backing_dev_info *bdi,
                        const struct address_space_operations *aops)
index 622df27cd89155d74a94ef786910715f9f3a11ad..2a00953ebd5f1b58b92494dbd684eb6128d76598 100644 (file)
@@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *);
 int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
 void nilfs_copy_back_pages(struct address_space *, struct address_space *);
 void nilfs_clear_dirty_pages(struct address_space *);
-void nilfs_mapping_init_once(struct address_space *mapping);
 void nilfs_mapping_init(struct address_space *mapping,
                        struct backing_dev_info *bdi,
                        const struct address_space_operations *aops);
index 55ebae5c7f39f58d4065850b9fabebb66a93b0cf..2de9f636792a7545290bbd95ce90b80f28345063 100644 (file)
@@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
        nilfs_segctor_map_segsum_entry(
                sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
 
-       if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
+       if (NILFS_I(inode)->i_root &&
+           !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
                set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
        /* skip finfo */
 }
index 58fd707174e108714091c081604f7daa781d10f5..1673b3d99842018206640c77ea9922840c5a2bc4 100644 (file)
@@ -1279,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj)
 #ifdef CONFIG_NILFS_XATTR
        init_rwsem(&ii->xattr_sem);
 #endif
-       nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
+       address_space_init_once(&ii->i_btnode_cache);
        ii->i_bmap = &ii->i_bmap_data;
        inode_init_once(&ii->vfs_inode);
 }
index 6d80ecc7834f55ff2e7151b38cc6540ad1b08b51..7eb90403fc8af0521b7d3df89d37956ccad054a4 100644 (file)
@@ -56,7 +56,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
        int ret = 0;    /* if all else fails, just return false */
        struct ocfs2_super *osb;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        inode = dentry->d_inode;
index 5dbc3062b4fd0757792c2f048d8a8a2c647b5b34..254652a9b542687e1a08e0e52d7b28db20ed6ba3 100644 (file)
@@ -197,8 +197,12 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
                   dentry->d_name.len, dentry->d_name.name,
                   fh, len, connectable);
 
-       if (len < 3 || (connectable && len < 6)) {
-               mlog(ML_ERROR, "fh buffer is too small for encoding\n");
+       if (connectable && (len < 6)) {
+               *max_len = 6;
+               type = 255;
+               goto bail;
+       } else if (len < 3) {
+               *max_len = 3;
                type = 255;
                goto bail;
        }
index 43e56b97f9c016c923614f887bcce3e9ba58da04..6180da1e37e65df216bf5856600e07a93bc96104 100644 (file)
@@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb)
               ocfs2_quota_trans_credits(sb);
 }
 
-/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe +
- * bitmap block for the new bit) dx_root update for free list */
-#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1)
+/* data block for new dir/symlink, allocation of directory block, dx_root
+ * update for free list */
+#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1)
 
 static inline int ocfs2_add_dir_index_credits(struct super_block *sb)
 {
index b5f9160e93e9119b949451bb3e5db66b3c1df62c..29623da133ccbf5c524c336b247b7e8af66913b8 100644 (file)
@@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
                                        u32 num_clusters, unsigned int e_flags)
 {
        int ret, delete, index, credits =  0;
-       u32 new_bit, new_len;
+       u32 new_bit, new_len, orig_num_clusters;
        unsigned int set_len;
        struct ocfs2_super *osb = OCFS2_SB(sb);
        handle_t *handle;
@@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
                goto out;
        }
 
+       orig_num_clusters = num_clusters;
+
        while (num_clusters) {
                ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
                                             p_cluster, num_clusters,
@@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
         * in write-back mode.
         */
        if (context->get_clusters == ocfs2_di_get_clusters) {
-               ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
+               ret = ocfs2_cow_sync_writeback(sb, context, cpos,
+                                              orig_num_clusters);
                if (ret)
                        mlog_errno(ret);
        }
@@ -4376,7 +4379,7 @@ static int ocfs2_user_path_parent(const char __user *path,
        if (IS_ERR(s))
                return PTR_ERR(s);
 
-       error = path_lookup(s, LOOKUP_PARENT, nd);
+       error = kern_path_parent(s, nd);
        if (error)
                putname(s);
        else
index 38f986d2447ea9ba0c7a657b0500c421bf2abbaa..36c423fb063523e8a5b4791d9bc483e3fd7909e7 100644 (file)
@@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb,
                               struct mount_options *mopt,
                               int is_remount)
 {
-       int status;
+       int status, user_stack = 0;
        char *p;
        u32 tmp;
 
@@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb,
                        memcpy(mopt->cluster_stack, args[0].from,
                               OCFS2_STACK_LABEL_LEN);
                        mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
+                       /*
+                        * Open code the memcmp here as we don't have
+                        * an osb to pass to
+                        * ocfs2_userspace_stack().
+                        */
+                       if (memcmp(mopt->cluster_stack,
+                                  OCFS2_CLASSIC_CLUSTER_STACK,
+                                  OCFS2_STACK_LABEL_LEN))
+                               user_stack = 1;
                        break;
                case Opt_inode64:
                        mopt->mount_opt |= OCFS2_MOUNT_INODE64;
@@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb,
                }
        }
 
-       /* Ensure only one heartbeat mode */
-       tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
-                                OCFS2_MOUNT_HB_NONE);
-       if (hweight32(tmp) != 1) {
-               mlog(ML_ERROR, "Invalid heartbeat mount options\n");
-               status = 0;
-               goto bail;
+       if (user_stack == 0) {
+               /* Ensure only one heartbeat mode */
+               tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
+                                        OCFS2_MOUNT_HB_GLOBAL |
+                                        OCFS2_MOUNT_HB_NONE);
+               if (hweight32(tmp) != 1) {
+                       mlog(ML_ERROR, "Invalid heartbeat mount options\n");
+                       status = 0;
+                       goto bail;
+               }
        }
 
        status = 1;
index 5a2c6ebc22b5d9a1e355050cb14d3218f7a9d0d3..3cac0bda46df8511e03ddc3cf7631fc33b989645 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -233,6 +233,14 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 
        if (!(file->f_mode & FMODE_WRITE))
                return -EBADF;
+
+       /* It's not possible punch hole on append only file */
+       if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode))
+               return -EPERM;
+
+       if (IS_IMMUTABLE(inode))
+               return -EPERM;
+
        /*
         * Revalidate the write permissions, in case security policy has
         * changed since the files were opened.
@@ -565,13 +573,15 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
 {
        struct path path;
        int error = -EINVAL;
-       int follow;
+       int lookup_flags;
 
-       if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
+       if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
                goto out;
 
-       follow = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
-       error = user_path_at(dfd, filename, follow, &path);
+       lookup_flags = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
+       if (flag & AT_EMPTY_PATH)
+               lookup_flags |= LOOKUP_EMPTY;
+       error = user_path_at(dfd, filename, lookup_flags, &path);
        if (error)
                goto out;
        error = mnt_want_write(path.mnt);
@@ -661,11 +671,16 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
                                        int (*open)(struct inode *, struct file *),
                                        const struct cred *cred)
 {
+       static const struct file_operations empty_fops = {};
        struct inode *inode;
        int error;
 
        f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
                                FMODE_PREAD | FMODE_PWRITE;
+
+       if (unlikely(f->f_flags & O_PATH))
+               f->f_mode = FMODE_PATH;
+
        inode = dentry->d_inode;
        if (f->f_mode & FMODE_WRITE) {
                error = __get_file_write_access(inode, mnt);
@@ -679,9 +694,15 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
        f->f_path.dentry = dentry;
        f->f_path.mnt = mnt;
        f->f_pos = 0;
-       f->f_op = fops_get(inode->i_fop);
        file_sb_list_add(f, inode->i_sb);
 
+       if (unlikely(f->f_mode & FMODE_PATH)) {
+               f->f_op = &empty_fops;
+               return f;
+       }
+
+       f->f_op = fops_get(inode->i_fop);
+
        error = security_dentry_open(f, cred);
        if (error)
                goto cleanup_all;
@@ -882,15 +903,110 @@ void fd_install(unsigned int fd, struct file *file)
 
 EXPORT_SYMBOL(fd_install);
 
+static inline int build_open_flags(int flags, int mode, struct open_flags *op)
+{
+       int lookup_flags = 0;
+       int acc_mode;
+
+       if (!(flags & O_CREAT))
+               mode = 0;
+       op->mode = mode;
+
+       /* Must never be set by userspace */
+       flags &= ~FMODE_NONOTIFY;
+
+       /*
+        * O_SYNC is implemented as __O_SYNC|O_DSYNC.  As many places only
+        * check for O_DSYNC if the need any syncing at all we enforce it's
+        * always set instead of having to deal with possibly weird behaviour
+        * for malicious applications setting only __O_SYNC.
+        */
+       if (flags & __O_SYNC)
+               flags |= O_DSYNC;
+
+       /*
+        * If we have O_PATH in the open flag. Then we
+        * cannot have anything other than the below set of flags
+        */
+       if (flags & O_PATH) {
+               flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH;
+               acc_mode = 0;
+       } else {
+               acc_mode = MAY_OPEN | ACC_MODE(flags);
+       }
+
+       op->open_flag = flags;
+
+       /* O_TRUNC implies we need access checks for write permissions */
+       if (flags & O_TRUNC)
+               acc_mode |= MAY_WRITE;
+
+       /* Allow the LSM permission hook to distinguish append
+          access from general write access. */
+       if (flags & O_APPEND)
+               acc_mode |= MAY_APPEND;
+
+       op->acc_mode = acc_mode;
+
+       op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN;
+
+       if (flags & O_CREAT) {
+               op->intent |= LOOKUP_CREATE;
+               if (flags & O_EXCL)
+                       op->intent |= LOOKUP_EXCL;
+       }
+
+       if (flags & O_DIRECTORY)
+               lookup_flags |= LOOKUP_DIRECTORY;
+       if (!(flags & O_NOFOLLOW))
+               lookup_flags |= LOOKUP_FOLLOW;
+       return lookup_flags;
+}
+
+/**
+ * filp_open - open file and return file pointer
+ *
+ * @filename:  path to open
+ * @flags:     open flags as per the open(2) second argument
+ * @mode:      mode for the new file if O_CREAT is set, else ignored
+ *
+ * This is the helper to open a file from kernelspace if you really
+ * have to.  But in generally you should not do this, so please move
+ * along, nothing to see here..
+ */
+struct file *filp_open(const char *filename, int flags, int mode)
+{
+       struct open_flags op;
+       int lookup = build_open_flags(flags, mode, &op);
+       return do_filp_open(AT_FDCWD, filename, &op, lookup);
+}
+EXPORT_SYMBOL(filp_open);
+
+struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
+                           const char *filename, int flags)
+{
+       struct open_flags op;
+       int lookup = build_open_flags(flags, 0, &op);
+       if (flags & O_CREAT)
+               return ERR_PTR(-EINVAL);
+       if (!filename && (flags & O_DIRECTORY))
+               if (!dentry->d_inode->i_op->lookup)
+                       return ERR_PTR(-ENOTDIR);
+       return do_file_open_root(dentry, mnt, filename, &op, lookup);
+}
+EXPORT_SYMBOL(file_open_root);
+
 long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
 {
+       struct open_flags op;
+       int lookup = build_open_flags(flags, mode, &op);
        char *tmp = getname(filename);
        int fd = PTR_ERR(tmp);
 
        if (!IS_ERR(tmp)) {
                fd = get_unused_fd_flags(flags);
                if (fd >= 0) {
-                       struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
+                       struct file *f = do_filp_open(dfd, tmp, &op, lookup);
                        if (IS_ERR(f)) {
                                put_unused_fd(fd);
                                fd = PTR_ERR(f);
@@ -960,8 +1076,10 @@ int filp_close(struct file *filp, fl_owner_t id)
        if (filp->f_op && filp->f_op->flush)
                retval = filp->f_op->flush(filp, id);
 
-       dnotify_flush(filp, id);
-       locks_remove_posix(filp, id);
+       if (likely(!(filp->f_mode & FMODE_PATH))) {
+               dnotify_flush(filp, id);
+               locks_remove_posix(filp, id);
+       }
        fput(filp);
        return retval;
 }
index 789c625c7aa56e1c4b64dd3d483aca22e2c8fb29..b10e3540d5b711e3077d44b9ad1a5b44f210c19a 100644 (file)
@@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
        }
 
        vm->vblk_size     = get_unaligned_be32(data + 0x08);
+       if (vm->vblk_size == 0) {
+               ldm_error ("Illegal VBLK size");
+               return false;
+       }
+
        vm->vblk_offset   = get_unaligned_be32(data + 0x0C);
        vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
 
index 48cec7cbca176f80d8bedcf8e99f41bc977c6d4a..be03a0b08b47af8bc0761f9bbcca30846ce8a2f4 100644 (file)
 #include "check.h"
 #include "osf.h"
 
+#define MAX_OSF_PARTITIONS 8
+
 int osf_partition(struct parsed_partitions *state)
 {
        int i;
        int slot = 1;
+       unsigned int npartitions;
        Sector sect;
        unsigned char *data;
        struct disklabel {
@@ -45,7 +48,7 @@ int osf_partition(struct parsed_partitions *state)
                        u8  p_fstype;
                        u8  p_frag;
                        __le16 p_cpg;
-               } d_partitions[8];
+               } d_partitions[MAX_OSF_PARTITIONS];
        } * label;
        struct d_partition * partition;
 
@@ -63,7 +66,12 @@ int osf_partition(struct parsed_partitions *state)
                put_dev_sector(sect);
                return 0;
        }
-       for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) {
+       npartitions = le16_to_cpu(label->d_npartitions);
+       if (npartitions > MAX_OSF_PARTITIONS) {
+               put_dev_sector(sect);
+               return 0;
+       }
+       for (i = 0 ; i < npartitions; i++, partition++) {
                if (slot == state->limit)
                        break;
                if (le32_to_cpu(partition->p_size))
index 9d096e82b201090b5a1fdac646f811cc6fa8bc57..d49c4b5d2c3e92c1ed8a4be1ef979df91762c323 100644 (file)
@@ -2620,35 +2620,6 @@ static const struct pid_entry proc_base_stuff[] = {
                &proc_self_inode_operations, NULL, {}),
 };
 
-/*
- *     Exceptional case: normally we are not allowed to unhash a busy
- * directory. In this case, however, we can do it - no aliasing problems
- * due to the way we treat inodes.
- */
-static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
-{
-       struct inode *inode;
-       struct task_struct *task;
-
-       if (nd->flags & LOOKUP_RCU)
-               return -ECHILD;
-
-       inode = dentry->d_inode;
-       task = get_proc_task(inode);
-       if (task) {
-               put_task_struct(task);
-               return 1;
-       }
-       d_drop(dentry);
-       return 0;
-}
-
-static const struct dentry_operations proc_base_dentry_operations =
-{
-       .d_revalidate   = proc_base_revalidate,
-       .d_delete       = pid_delete_dentry,
-};
-
 static struct dentry *proc_base_instantiate(struct inode *dir,
        struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
@@ -2685,7 +2656,6 @@ static struct dentry *proc_base_instantiate(struct inode *dir,
        if (p->fop)
                inode->i_fop = p->fop;
        ei->op = p->op;
-       d_set_d_op(dentry, &proc_base_dentry_operations);
        d_add(dentry, inode);
        error = NULL;
 out:
index 176ce4cda68a113d10e3107830730848dc0cd7f5..d6a7ca1fdac53dfdf47fb7109207c0ec7597a3e7 100644 (file)
@@ -27,6 +27,7 @@
 static void proc_evict_inode(struct inode *inode)
 {
        struct proc_dir_entry *de;
+       struct ctl_table_header *head;
 
        truncate_inode_pages(&inode->i_data, 0);
        end_writeback(inode);
@@ -38,8 +39,11 @@ static void proc_evict_inode(struct inode *inode)
        de = PROC_I(inode)->pde;
        if (de)
                pde_put(de);
-       if (PROC_I(inode)->sysctl)
-               sysctl_head_put(PROC_I(inode)->sysctl);
+       head = PROC_I(inode)->sysctl;
+       if (head) {
+               rcu_assign_pointer(PROC_I(inode)->sysctl, NULL);
+               sysctl_head_put(head);
+       }
 }
 
 struct vfsmount *proc_mnt;
index d9396a4fc7ff2dc209269e70fe7170d07eb1f5bb..927cbd115e532857936a0ddc4d51de14149a37b7 100644 (file)
@@ -233,7 +233,7 @@ void __init proc_device_tree_init(void)
                return;
        root = of_find_node_by_path("/");
        if (root == NULL) {
-               printk(KERN_ERR "/proc/device-tree: can't find root\n");
+               pr_debug("/proc/device-tree: can't find root\n");
                return;
        }
        proc_device_tree_add_node(root, proc_device_tree);
index 09a1f92a34ef2fce52b07cf394d040708e86a22b..8eb2522111c5d8033a404a0d366832ea66e2d3fc 100644 (file)
@@ -408,15 +408,18 @@ static int proc_sys_compare(const struct dentry *parent,
                const struct dentry *dentry, const struct inode *inode,
                unsigned int len, const char *str, const struct qstr *name)
 {
+       struct ctl_table_header *head;
        /* Although proc doesn't have negative dentries, rcu-walk means
         * that inode here can be NULL */
+       /* AV: can it, indeed? */
        if (!inode)
-               return 0;
+               return 1;
        if (name->len != len)
                return 1;
        if (memcmp(name->name, str, len))
                return 1;
-       return !sysctl_is_seen(PROC_I(inode)->sysctl);
+       head = rcu_dereference(PROC_I(inode)->sysctl);
+       return !head || !sysctl_is_seen(head);
 }
 
 static const struct dentry_operations proc_sys_dentry_operations = {
index 0bae036831e2ca2aeeae94eb78d74f2aa769b54d..1bba24bad82080382e3007a10da67b85305d88db 100644 (file)
@@ -1593,8 +1593,13 @@ int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
        struct inode *inode = dentry->d_inode;
        int maxlen = *lenp;
 
-       if (maxlen < 3)
+       if (need_parent && (maxlen < 5)) {
+               *lenp = 5;
                return 255;
+       } else if (maxlen < 3) {
+               *lenp = 3;
+               return 255;
+       }
 
        data[0] = inode->i_ino;
        data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
index ba5f51ec345829499982f17196fc61ac444429a0..4b2eb564fdadf66a677a22c719e7dea8277b9ad3 100644 (file)
@@ -771,7 +771,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
                                        EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
                                        dentry, inode, &security);
        if (retval) {
-               dir->i_nlink--;
+               DEC_DIR_INODE_NLINK(dir)
                goto out_failed;
        }
 
@@ -1122,10 +1122,6 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
                reiserfs_write_unlock(dir->i_sb);
                return -EMLINK;
        }
-       if (inode->i_nlink == 0) {
-               reiserfs_write_unlock(dir->i_sb);
-               return -ENOENT;
-       }
 
        /* inc before scheduling so reiserfs_unlink knows we are here */
        inc_nlink(inode);
index 3cfb2e93364424886f94807a15031dc90362142d..5c11ca82b7821c55050f0032f55e81d8cdacdcdf 100644 (file)
@@ -978,8 +978,6 @@ int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
 
 static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-       if (nd->flags & LOOKUP_RCU)
-               return -ECHILD;
        return -EPERM;
 }
 
index d5c61cf2b7033cb459920b556b235d38b865596c..961039121cb8cbde185bf1b8399c6e4ccc71cdf5 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -75,13 +75,16 @@ int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
        int error = -EINVAL;
        int lookup_flags = 0;
 
-       if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT)) != 0)
+       if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
+                     AT_EMPTY_PATH)) != 0)
                goto out;
 
        if (!(flag & AT_SYMLINK_NOFOLLOW))
                lookup_flags |= LOOKUP_FOLLOW;
        if (flag & AT_NO_AUTOMOUNT)
                lookup_flags |= LOOKUP_NO_AUTOMOUNT;
+       if (flag & AT_EMPTY_PATH)
+               lookup_flags |= LOOKUP_EMPTY;
 
        error = user_path_at(dfd, filename, lookup_flags, &path);
        if (error)
@@ -297,7 +300,7 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
        if (bufsiz <= 0)
                return -EINVAL;
 
-       error = user_path_at(dfd, pathname, 0, &path);
+       error = user_path_at(dfd, pathname, LOOKUP_EMPTY, &path);
        if (!error) {
                struct inode *inode = path.dentry->d_inode;
 
index 30ea8c8a996b1d16b568e021c79c05b993c746c7..8244924dec55fd863bc7e0649e1686f78b336c8d 100644 (file)
@@ -73,149 +73,135 @@ int vfs_statfs(struct path *path, struct kstatfs *buf)
 }
 EXPORT_SYMBOL(vfs_statfs);
 
-static int do_statfs_native(struct path *path, struct statfs *buf)
+int user_statfs(const char __user *pathname, struct kstatfs *st)
 {
-       struct kstatfs st;
-       int retval;
+       struct path path;
+       int error = user_path(pathname, &path);
+       if (!error) {
+               error = vfs_statfs(&path, st);
+               path_put(&path);
+       }
+       return error;
+}
 
-       retval = vfs_statfs(path, &st);
-       if (retval)
-               return retval;
+int fd_statfs(int fd, struct kstatfs *st)
+{
+       struct file *file = fget(fd);
+       int error = -EBADF;
+       if (file) {
+               error = vfs_statfs(&file->f_path, st);
+               fput(file);
+       }
+       return error;
+}
 
-       if (sizeof(*buf) == sizeof(st))
-               memcpy(buf, &st, sizeof(st));
+static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
+{
+       struct statfs buf;
+
+       if (sizeof(buf) == sizeof(*st))
+               memcpy(&buf, st, sizeof(*st));
        else {
-               if (sizeof buf->f_blocks == 4) {
-                       if ((st.f_blocks | st.f_bfree | st.f_bavail |
-                            st.f_bsize | st.f_frsize) &
+               if (sizeof buf.f_blocks == 4) {
+                       if ((st->f_blocks | st->f_bfree | st->f_bavail |
+                            st->f_bsize | st->f_frsize) &
                            0xffffffff00000000ULL)
                                return -EOVERFLOW;
                        /*
                         * f_files and f_ffree may be -1; it's okay to stuff
                         * that into 32 bits
                         */
-                       if (st.f_files != -1 &&
-                           (st.f_files & 0xffffffff00000000ULL))
+                       if (st->f_files != -1 &&
+                           (st->f_files & 0xffffffff00000000ULL))
                                return -EOVERFLOW;
-                       if (st.f_ffree != -1 &&
-                           (st.f_ffree & 0xffffffff00000000ULL))
+                       if (st->f_ffree != -1 &&
+                           (st->f_ffree & 0xffffffff00000000ULL))
                                return -EOVERFLOW;
                }
 
-               buf->f_type = st.f_type;
-               buf->f_bsize = st.f_bsize;
-               buf->f_blocks = st.f_blocks;
-               buf->f_bfree = st.f_bfree;
-               buf->f_bavail = st.f_bavail;
-               buf->f_files = st.f_files;
-               buf->f_ffree = st.f_ffree;
-               buf->f_fsid = st.f_fsid;
-               buf->f_namelen = st.f_namelen;
-               buf->f_frsize = st.f_frsize;
-               buf->f_flags = st.f_flags;
-               memset(buf->f_spare, 0, sizeof(buf->f_spare));
+               buf.f_type = st->f_type;
+               buf.f_bsize = st->f_bsize;
+               buf.f_blocks = st->f_blocks;
+               buf.f_bfree = st->f_bfree;
+               buf.f_bavail = st->f_bavail;
+               buf.f_files = st->f_files;
+               buf.f_ffree = st->f_ffree;
+               buf.f_fsid = st->f_fsid;
+               buf.f_namelen = st->f_namelen;
+               buf.f_frsize = st->f_frsize;
+               buf.f_flags = st->f_flags;
+               memset(buf.f_spare, 0, sizeof(buf.f_spare));
        }
+       if (copy_to_user(p, &buf, sizeof(buf)))
+               return -EFAULT;
        return 0;
 }
 
-static int do_statfs64(struct path *path, struct statfs64 *buf)
+static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
 {
-       struct kstatfs st;
-       int retval;
-
-       retval = vfs_statfs(path, &st);
-       if (retval)
-               return retval;
-
-       if (sizeof(*buf) == sizeof(st))
-               memcpy(buf, &st, sizeof(st));
+       struct statfs64 buf;
+       if (sizeof(buf) == sizeof(*st))
+               memcpy(&buf, st, sizeof(*st));
        else {
-               buf->f_type = st.f_type;
-               buf->f_bsize = st.f_bsize;
-               buf->f_blocks = st.f_blocks;
-               buf->f_bfree = st.f_bfree;
-               buf->f_bavail = st.f_bavail;
-               buf->f_files = st.f_files;
-               buf->f_ffree = st.f_ffree;
-               buf->f_fsid = st.f_fsid;
-               buf->f_namelen = st.f_namelen;
-               buf->f_frsize = st.f_frsize;
-               buf->f_flags = st.f_flags;
-               memset(buf->f_spare, 0, sizeof(buf->f_spare));
+               buf.f_type = st->f_type;
+               buf.f_bsize = st->f_bsize;
+               buf.f_blocks = st->f_blocks;
+               buf.f_bfree = st->f_bfree;
+               buf.f_bavail = st->f_bavail;
+               buf.f_files = st->f_files;
+               buf.f_ffree = st->f_ffree;
+               buf.f_fsid = st->f_fsid;
+               buf.f_namelen = st->f_namelen;
+               buf.f_frsize = st->f_frsize;
+               buf.f_flags = st->f_flags;
+               memset(buf.f_spare, 0, sizeof(buf.f_spare));
        }
+       if (copy_to_user(p, &buf, sizeof(buf)))
+               return -EFAULT;
        return 0;
 }
 
 SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct statfs __user *, buf)
 {
-       struct path path;
-       int error;
-
-       error = user_path(pathname, &path);
-       if (!error) {
-               struct statfs tmp;
-               error = do_statfs_native(&path, &tmp);
-               if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
-                       error = -EFAULT;
-               path_put(&path);
-       }
+       struct kstatfs st;
+       int error = user_statfs(pathname, &st);
+       if (!error)
+               error = do_statfs_native(&st, buf);
        return error;
 }
 
 SYSCALL_DEFINE3(statfs64, const char __user *, pathname, size_t, sz, struct statfs64 __user *, buf)
 {
-       struct path path;
-       long error;
-
+       struct kstatfs st;
+       int error;
        if (sz != sizeof(*buf))
                return -EINVAL;
-       error = user_path(pathname, &path);
-       if (!error) {
-               struct statfs64 tmp;
-               error = do_statfs64(&path, &tmp);
-               if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
-                       error = -EFAULT;
-               path_put(&path);
-       }
+       error = user_statfs(pathname, &st);
+       if (!error)
+               error = do_statfs64(&st, buf);
        return error;
 }
 
 SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct statfs __user *, buf)
 {
-       struct file *file;
-       struct statfs tmp;
-       int error;
-
-       error = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-       error = do_statfs_native(&file->f_path, &tmp);
-       if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
-               error = -EFAULT;
-       fput(file);
-out:
+       struct kstatfs st;
+       int error = fd_statfs(fd, &st);
+       if (!error)
+               error = do_statfs_native(&st, buf);
        return error;
 }
 
 SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, size_t, sz, struct statfs64 __user *, buf)
 {
-       struct file *file;
-       struct statfs64 tmp;
+       struct kstatfs st;
        int error;
 
        if (sz != sizeof(*buf))
                return -EINVAL;
 
-       error = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-       error = do_statfs64(&file->f_path, &tmp);
-       if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
-               error = -EFAULT;
-       fput(file);
-out:
+       error = fd_statfs(fd, &st);
+       if (!error)
+               error = do_statfs64(&st, buf);
        return error;
 }
 
index b427b1208c26323eff8b734a84b6feb272332d4c..e474fbcf8bde991924da62e321d2be7eae8ac5f6 100644 (file)
@@ -245,7 +245,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
                new_de = sysv_find_entry(new_dentry, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                sysv_set_link(new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -257,18 +256,15 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
                        if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = sysv_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
 
        sysv_delete_entry(old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                sysv_set_link(dir_de, dir_page, new_dir);
index 14f64b689d7f58edef4be03d58a8712c442f62ef..7217d67a80a691a8114e4d7f04d2f98037d03997 100644 (file)
@@ -522,24 +522,6 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
        ubifs_assert(mutex_is_locked(&dir->i_mutex));
        ubifs_assert(mutex_is_locked(&inode->i_mutex));
 
-       /*
-        * Return -ENOENT if we've raced with unlink and i_nlink is 0.  Doing
-        * otherwise has the potential to corrupt the orphan inode list.
-        *
-        * Indeed, consider a scenario when 'vfs_link(dirA/fileA)' and
-        * 'vfs_unlink(dirA/fileA, dirB/fileB)' race. 'vfs_link()' does not
-        * lock 'dirA->i_mutex', so this is possible. Both of the functions
-        * lock 'fileA->i_mutex' though. Suppose 'vfs_unlink()' wins, and takes
-        * 'fileA->i_mutex' mutex first. Suppose 'fileA->i_nlink' is 1. In this
-        * case 'ubifs_unlink()' will drop the last reference, and put 'inodeA'
-        * to the list of orphans. After this, 'vfs_link()' will link
-        * 'dirB/fileB' to 'inodeA'. This is a problem because, for example,
-        * the subsequent 'vfs_unlink(dirB/fileB)' will add the same inode
-        * to the list of orphans.
-        */
-        if (inode->i_nlink == 0)
-                return -ENOENT;
-
        err = dbg_check_synced_i_size(inode);
        if (err)
                return err;
index 2be0f9eb86d2d6878943ad4030667ec2a8a2ef0b..f1dce848ef966ea1853c9f50ce2d9cc997114a18 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/crc-itu-t.h>
 #include <linux/exportfs.h>
 
+enum { UDF_MAX_LINKS = 0xffff };
+
 static inline int udf_match(int len1, const unsigned char *name1, int len2,
                            const unsigned char *name2)
 {
@@ -650,7 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        struct udf_inode_info *iinfo;
 
        err = -EMLINK;
-       if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
+       if (dir->i_nlink >= UDF_MAX_LINKS)
                goto out;
 
        err = -EIO;
@@ -1034,9 +1036,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
        struct fileIdentDesc cfi, *fi;
        int err;
 
-       if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
+       if (inode->i_nlink >= UDF_MAX_LINKS)
                return -EMLINK;
-       }
 
        fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
        if (!fi) {
@@ -1131,9 +1132,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto end_rename;
 
                retval = -EMLINK;
-               if (!new_inode &&
-                       new_dir->i_nlink >=
-                               (256 << sizeof(new_dir->i_nlink)) - 1)
+               if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS)
                        goto end_rename;
        }
        if (!nfi) {
@@ -1287,8 +1286,13 @@ static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
        struct fid *fid = (struct fid *)fh;
        int type = FILEID_UDF_WITHOUT_PARENT;
 
-       if (len < 3 || (connectable && len < 5))
+       if (connectable && (len < 5)) {
+               *lenp = 5;
                return 255;
+       } else if (len < 3) {
+               *lenp = 3;
+               return 255;
+       }
 
        *lenp = 3;
        fid->udf.block = location.logicalBlockNum;
index 12f39b9e4437db73784cd77b8e7f898457d157db..d6f681535eb83f32d91a681d49044fca00de25f4 100644 (file)
@@ -306,7 +306,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                ufs_set_link(new_dir, new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -318,12 +317,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= UFS_LINK_MAX)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = ufs_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
@@ -331,12 +327,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
        /*
         * Like most other Unix systems, set the ctime for inodes on a
         * rename.
-        * inode_dec_link_count() will mark the inode dirty.
         */
        old_inode->i_ctime = CURRENT_TIME_SEC;
 
        ufs_delete_entry(old_dir, old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                ufs_set_link(old_inode, dir_de, dir_page, new_dir);
index 05201ae719e5f2054bb3527e30ead2e8a41f12ff..d61611c88012c8daaab1840f9534983fac62bd2d 100644 (file)
@@ -152,6 +152,8 @@ xfs_ioc_trim(
 
        if (!capable(CAP_SYS_ADMIN))
                return -XFS_ERROR(EPERM);
+       if (!blk_queue_discard(q))
+               return -XFS_ERROR(EOPNOTSUPP);
        if (copy_from_user(&range, urange, sizeof(range)))
                return -XFS_ERROR(EFAULT);
 
index fc0114da7fdd07b86440b2326c9b76a97c96b229..f4f878fc008316e816c24de3cdb1b86908ce9b52 100644 (file)
@@ -89,8 +89,10 @@ xfs_fs_encode_fh(
         * seven combinations work.  The real answer is "don't use v2".
         */
        len = xfs_fileid_length(fileid_type);
-       if (*max_len < len)
+       if (*max_len < len) {
+               *max_len = len;
                return 255;
+       }
        *max_len = len;
 
        switch (fileid_type) {
index f5e2a19e0f8eb25114fa8de3478b289d4972b8a9..0ca0e3c024d7bac3993707dd7309b3fda9664a07 100644 (file)
@@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1(
        xfs_mount_t             *mp,
        void                    __user *arg)
 {
-       xfs_fsop_geom_v1_t      fsgeo;
+       xfs_fsop_geom_t         fsgeo;
        int                     error;
 
-       error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
+       error = xfs_fs_geometry(mp, &fsgeo, 3);
        if (error)
                return -error;
 
-       if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
+       /*
+        * Caller should have passed an argument of type
+        * xfs_fsop_geom_v1_t.  This is a proper subset of the
+        * xfs_fsop_geom_t that xfs_fs_geometry() fills in.
+        */
+       if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t)))
                return -XFS_ERROR(EFAULT);
        return 0;
 }
index cec89dd5d7d28262ae064e257ec941a815cbb09f..85668efb3e3e03221b5e6dd3f517ad2e38b16f94 100644 (file)
@@ -53,6 +53,9 @@ xfs_fs_geometry(
        xfs_fsop_geom_t         *geo,
        int                     new_version)
 {
+
+       memset(geo, 0, sizeof(*geo));
+
        geo->blocksize = mp->m_sb.sb_blocksize;
        geo->rtextsize = mp->m_sb.sb_rextsize;
        geo->agblocks = mp->m_sb.sb_agblocks;
index 2bcc5c7c22a6329752c666f409a1a2abefbbc430..61e03dd7939e68482189dfa063e66259abb965a3 100644 (file)
@@ -30,6 +30,9 @@ typedef u64 cputime64_t;
 #define cputime64_to_jiffies64(__ct)   (__ct)
 #define jiffies64_to_cputime64(__jif)  (__jif)
 #define cputime_to_cputime64(__ct)     ((u64) __ct)
+#define cputime64_gt(__a, __b)         ((__a) >  (__b))
+
+#define nsecs_to_cputime64(__ct)       nsecs_to_jiffies64(__ct)
 
 
 /*
index 0fc16e3f0bfcc01e0f4e9016f82b0b5387d5317a..84793c7025e2604f08e354d09f3db5aff5467248 100644 (file)
 #define O_SYNC         (__O_SYNC|O_DSYNC)
 #endif
 
+#ifndef O_PATH
+#define O_PATH         010000000
+#endif
+
 #ifndef O_NDELAY
 #define O_NDELAY       O_NONBLOCK
 #endif
index 3c2344f48136d98351f92b6e9d678e29c052658c..01f227e14254f21b86e8c4b44d84ed3f81a55850 100644 (file)
@@ -6,7 +6,7 @@
 #include <asm/errno.h>
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
        pagefault_disable();
@@ -48,7 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
 {
        return -ENOSYS;
 }
index 31b6188df221f6114eb5c54bdc83673e8130e525..b4bfe338ea0e568258bc66ffffa4b03c6e51ca6d 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef __ASSEMBLY__
 #ifdef CONFIG_MMU
 
+#include <linux/mm_types.h>
+
 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 extern int ptep_set_access_flags(struct vm_area_struct *vma,
                                 unsigned long address, pte_t *ptep,
index b3bfabc258f369b576d3660c41955521dbeb21ac..c1a1216e29ced8ad92dac7c089991e7bf97bd2c3 100644 (file)
@@ -11,6 +11,7 @@ extern char _sinittext[], _einittext[];
 extern char _end[];
 extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
 extern char __kprobes_text_start[], __kprobes_text_end[];
+extern char __entry_text_start[], __entry_text_end[];
 extern char __initdata_begin[], __initdata_end[];
 extern char __start_rodata[], __end_rodata[];
 
index b969770196c2194ae5f259228b2edce69418b3dd..57af0338d2709972d7dfee827f67da05e2efdb7d 100644 (file)
@@ -646,9 +646,13 @@ __SYSCALL(__NR_prlimit64, sys_prlimit64)
 __SYSCALL(__NR_fanotify_init, sys_fanotify_init)
 #define __NR_fanotify_mark 263
 __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
+#define __NR_name_to_handle_at         264
+__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
+#define __NR_open_by_handle_at         265
+__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
 
 #undef __NR_syscalls
-#define __NR_syscalls 264
+#define __NR_syscalls 266
 
 /*
  * All syscalls below here should go away really,
index fe77e3395b40b0544ff9f939524ead2cb2263aba..906c3ceca9a2b245eeb8456e035f77a75b104af4 100644 (file)
                *(.kprobes.text)                                        \
                VMLINUX_SYMBOL(__kprobes_text_end) = .;
 
+#define ENTRY_TEXT                                                     \
+               ALIGN_FUNCTION();                                       \
+               VMLINUX_SYMBOL(__entry_text_start) = .;                 \
+               *(.entry.text)                                          \
+               VMLINUX_SYMBOL(__entry_text_end) = .;
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #define IRQENTRY_TEXT                                                  \
                ALIGN_FUNCTION();                                       \
index fe29aadb129d9d7e8e6b67a5d98459039859dc7d..348843b80150b1aa4a365ac95b469c1acb7a6453 100644 (file)
@@ -1101,7 +1101,7 @@ struct drm_device {
        struct platform_device *platformdev; /**< Platform device struture */
 
        struct drm_sg_mem *sg;  /**< Scatter gather memory */
-       int num_crtcs;                  /**< Number of CRTCs on this device */
+       unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
        void *dev_private;              /**< device private data */
        void *mm_private;
        struct address_space *dev_mapping;
index 5cb86c307f5d791298ae0add65bcd1f98dd781fb..fc487543381799c4c4eb421d6dd2f802b9543d99 100644 (file)
@@ -99,7 +99,6 @@ struct rxrpc_key_token {
  * structure of raw payloads passed to add_key() or instantiate key
  */
 struct rxrpc_key_data_v1 {
-       u32             kif_version;            /* 1 */
        u16             security_index;
        u16             ticket_length;
        u32             expiry;                 /* time_t */
index 4d18ff34670a4a882e5d08e83b1633ecd2973610..d5063e1b55559f0ecfa1e0e757d136510c99b4bf 100644 (file)
@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q);
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *);
+extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
 extern void blk_run_queue(struct request_queue *);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
@@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p)
 
 struct work_struct;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
 
 #ifdef CONFIG_BLK_CGROUP
 /*
@@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
 extern int blk_throtl_init(struct request_queue *q);
 extern void blk_throtl_exit(struct request_queue *q);
 extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
-extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
 extern void throtl_shutdown_timer_wq(struct request_queue *q);
 #else /* CONFIG_BLK_DEV_THROTTLING */
 static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
@@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
 
 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
-static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
 static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
 #endif /* CONFIG_BLK_DEV_THROTTLING */
 
index 3395cf7130f5dbdd4e29863c3c43e03fafe5e615..b22fb0d3db0f5fb0fa364bbae9e12ee46b8e918a 100644 (file)
@@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq)
 
 extern void blk_dump_cmd(char *buf, struct request *rq);
 extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
-extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
 
 #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
 
index c3011beac30d18023ba8a7a8d8305050cb6a37df..31d91a64838be0326ee3d8d76032f67181008dbc 100644 (file)
@@ -123,6 +123,7 @@ struct ceph_msg_pos {
 #define SOCK_CLOSED    11 /* socket state changed to closed */
 #define OPENING         13 /* open connection w/ (possibly new) peer */
 #define DEAD            14 /* dead, about to kfree */
+#define BACKOFF         15
 
 /*
  * A single connection with another host.
@@ -160,7 +161,6 @@ struct ceph_connection {
        struct list_head out_queue;
        struct list_head out_sent;   /* sending or sent but unacked */
        u64 out_seq;                 /* last message queued for send */
-       bool out_keepalive_pending;
 
        u64 in_seq, in_seq_acked;  /* last message received, acked */
 
index ce104e33cd22e393a15cd7b566381393804eeaf4..e654fa239916e691c24317d43fc88aea5fef571a 100644 (file)
@@ -474,7 +474,8 @@ struct cgroup_subsys {
                        struct cgroup *old_cgrp, struct task_struct *tsk,
                        bool threadgroup);
        void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
-       void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
+       void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
+                       struct cgroup *old_cgrp, struct task_struct *task);
        int (*populate)(struct cgroup_subsys *ss,
                        struct cgroup *cgrp);
        void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
@@ -626,6 +627,7 @@ bool css_is_ancestor(struct cgroup_subsys_state *cg,
 /* Get id and depth of css */
 unsigned short css_id(struct cgroup_subsys_state *css);
 unsigned short css_depth(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
 
 #else /* !CONFIG_CGROUPS */
 
index ccefff02b6cb005123c4b904e06116cb3a850474..cdbfcb8780ec94d4e69ea9ab0826e31d49ca8cfe 100644 (file)
@@ -65,4 +65,8 @@ SUBSYS(net_cls)
 SUBSYS(blkio)
 #endif
 
+#ifdef CONFIG_CGROUP_PERF
+SUBSYS(perf)
+#endif
+
 /* */
index 68cd248f6d3e20acba330fc7f9a92df1e3f278ab..66900e3c6eb1ba0c33d741725e7a532f2b555012 100644 (file)
@@ -101,8 +101,8 @@ struct ieee_pfc {
  */
 struct dcb_app {
        __u8    selector;
-       __u32   protocol;
        __u8    priority;
+       __u16   protocol;
 };
 
 struct dcbmsg {
index 597692f1fc8dfb05383914af2b2a254d944fbc48..65970b811e22359a7e94afbbf1e0e204ce4ad4dc 100644 (file)
@@ -34,7 +34,10 @@ struct debug_obj {
 
 /**
  * struct debug_obj_descr - object type specific debug description structure
+ *
  * @name:              name of the object typee
+ * @debug_hint:                function returning address, which have associated
+ *                     kernel symbol, to allow identify the object
  * @fixup_init:                fixup function, which is called when the init check
  *                     fails
  * @fixup_activate:    fixup function, which is called when the activate check
@@ -46,7 +49,7 @@ struct debug_obj {
  */
 struct debug_obj_descr {
        const char              *name;
-
+       void *(*debug_hint)     (void *addr);
        int (*fixup_init)       (void *addr, enum debug_obj_state state);
        int (*fixup_activate)   (void *addr, enum debug_obj_state state);
        int (*fixup_destroy)    (void *addr, enum debug_obj_state state);
index 28028988c8627dbc6c065335ba6ca7d9c5bb64ee..33a42f24b2757a6da1b11a10cd50040bc583473f 100644 (file)
@@ -8,6 +8,9 @@ struct inode;
 struct super_block;
 struct vfsmount;
 
+/* limit the handle size to NFSv4 handle size now */
+#define MAX_HANDLE_SZ 128
+
 /*
  * The fileid_type identifies how the file within the filesystem is encoded.
  * In theory this is freely set and parsed by the filesystem, but we try to
@@ -121,8 +124,10 @@ struct fid {
  *    set, the encode_fh() should store sufficient information so that a good
  *    attempt can be made to find not only the file but also it's place in the
  *    filesystem.   This typically means storing a reference to de->d_parent in
- *    the filehandle fragment.  encode_fh() should return the number of bytes
- *    stored or a negative error code such as %-ENOSPC
+ *    the filehandle fragment.  encode_fh() should return the fileid_type on
+ *    success and on error returns 255 (if the space needed to encode fh is
+ *    greater than @max_len*4 bytes). On error @max_len contains the minimum
+ *    size(in 4 byte unit) needed to encode the file handle.
  *
  * fh_to_dentry:
  *    @fh_to_dentry is given a &struct super_block (@sb) and a file handle
index a562fa5fb4e3ca2d879c6e7cb359b969e1d1a2e6..f550f894ba15edabd684bd7aa5bdcac3f4df257e 100644 (file)
@@ -46,6 +46,7 @@
                                            unlinking file.  */
 #define AT_SYMLINK_FOLLOW      0x400   /* Follow symbolic links.  */
 #define AT_NO_AUTOMOUNT                0x800   /* Suppress terminal automount traversal */
+#define AT_EMPTY_PATH          0x1000  /* Allow empty relative pathname */
 
 #ifdef __KERNEL__
 
index e85baebf62798b6d17e5e9376656230695a434e6..21a79958541cba4c7f664063edb92ee5d6845d2e 100644 (file)
@@ -29,6 +29,8 @@ static inline void fput_light(struct file *file, int fput_needed)
 
 extern struct file *fget(unsigned int fd);
 extern struct file *fget_light(unsigned int fd, int *fput_needed);
+extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_raw_light(unsigned int fd, int *fput_needed);
 extern void set_close_on_exec(unsigned int fd, int flag);
 extern void put_filp(struct file *);
 extern int alloc_fd(unsigned start, unsigned flags);
index bd3215940c3746ec22d8351854649f9b5b485f67..13df14e2c42e6e2a76a96d509c91f691f67e3858 100644 (file)
@@ -102,6 +102,9 @@ struct inodes_stat_t {
 /* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
 #define FMODE_UNSIGNED_OFFSET  ((__force fmode_t)0x2000)
 
+/* File is opened with O_PATH; almost nothing can be done with it */
+#define FMODE_PATH             ((__force fmode_t)0x4000)
+
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY         ((__force fmode_t)0x1000000)
 
@@ -649,6 +652,7 @@ struct address_space {
        spinlock_t              private_lock;   /* for use by the address_space */
        struct list_head        private_list;   /* ditto */
        struct address_space    *assoc_mapping; /* ditto */
+       struct mutex            unmap_mutex;    /* to protect unmapping */
 } __attribute__((aligned(sizeof(long))));
        /*
         * On most architectures that alignment is already the case; but
@@ -977,6 +981,13 @@ struct file {
 #endif
 };
 
+struct file_handle {
+       __u32 handle_bytes;
+       int handle_type;
+       /* file identifier */
+       unsigned char f_handle[0];
+};
+
 #define get_file(x)    atomic_long_inc(&(x)->f_count)
 #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
 #define file_count(x)  atomic_long_read(&(x)->f_count)
@@ -1400,6 +1411,7 @@ struct super_block {
        wait_queue_head_t       s_wait_unfrozen;
 
        char s_id[32];                          /* Informational name */
+       u8 s_uuid[16];                          /* UUID */
 
        void                    *s_fs_info;     /* Filesystem private info */
        fmode_t                 s_mode;
@@ -1873,6 +1885,8 @@ extern void drop_collected_mounts(struct vfsmount *);
 extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
                          struct vfsmount *);
 extern int vfs_statfs(struct path *, struct kstatfs *);
+extern int user_statfs(const char __user *, struct kstatfs *);
+extern int fd_statfs(int, struct kstatfs *);
 extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
 extern int freeze_super(struct super_block *super);
 extern int thaw_super(struct super_block *super);
@@ -1989,6 +2003,8 @@ extern int do_fallocate(struct file *file, int mode, loff_t offset,
 extern long do_sys_open(int dfd, const char __user *filename, int flags,
                        int mode);
 extern struct file *filp_open(const char *, int, int);
+extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+                                  const char *, int);
 extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
                                 const struct cred *);
 extern int filp_close(struct file *, fl_owner_t id);
@@ -2139,7 +2155,7 @@ extern void check_disk_size_change(struct gendisk *disk,
                                   struct block_device *bdev);
 extern int revalidate_disk(struct gendisk *);
 extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *);
+extern int __invalidate_device(struct block_device *, bool);
 extern int invalidate_partition(struct gendisk *, int);
 #endif
 unsigned long invalidate_mapping_pages(struct address_space *mapping,
@@ -2204,10 +2220,6 @@ extern struct file *create_read_pipe(struct file *f, int flags);
 extern struct file *create_write_pipe(int flags);
 extern void free_write_pipe(struct file *);
 
-extern struct file *do_filp_open(int dfd, const char *pathname,
-               int open_flag, int mode, int acc_mode);
-extern int may_open(struct path *, int, int);
-
 extern int kernel_read(struct file *, loff_t, char *, unsigned long);
 extern struct file * open_exec(const char *);
  
@@ -2225,6 +2237,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
 
 extern int inode_init_always(struct super_block *, struct inode *);
 extern void inode_init_once(struct inode *);
+extern void address_space_init_once(struct address_space *mapping);
 extern void ihold(struct inode * inode);
 extern void iput(struct inode *);
 extern struct inode * igrab(struct inode *);
index dcd6a7c3a4358b310b430a16dc52f43b547e02f2..ca29e03c1fac0c1d461814405b5c0724dc61bc99 100644 (file)
@@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void);
 
 extern void ftrace_graph_init_task(struct task_struct *t);
 extern void ftrace_graph_exit_task(struct task_struct *t);
+extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
 
 static inline int task_curr_ret_stack(struct task_struct *t)
 {
@@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void)
 
 static inline void ftrace_graph_init_task(struct task_struct *t) { }
 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
+static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
 
 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
                          trace_func_graph_ent_t entryfunc)
index 47e3997f7b5cf39233283ff43d84937daa502c2f..22b32af1b5ec3d1aa81ddebbc2de1ace29c17cc6 100644 (file)
@@ -37,7 +37,6 @@ struct trace_entry {
        unsigned char           flags;
        unsigned char           preempt_count;
        int                     pid;
-       int                     lock_depth;
 };
 
 #define FTRACE_MAX_EVENT                                               \
@@ -208,7 +207,6 @@ struct ftrace_event_call {
 
 #define PERF_MAX_TRACE_SIZE    2048
 
-#define MAX_FILTER_PRED                32
 #define MAX_FILTER_STR_VAL     256     /* Should handle KSYM_SYMBOL_LEN */
 
 extern void destroy_preds(struct ftrace_event_call *call);
index 0b84c61607e8ce808dbb6b2e611a01a6ade11646..dca31761b3110e92f5a0e28e47cef0c60d910cef 100644 (file)
@@ -332,16 +332,19 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
        return alloc_pages_current(gfp_mask, order);
 }
 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
-                       struct vm_area_struct *vma, unsigned long addr);
+                       struct vm_area_struct *vma, unsigned long addr,
+                       int node);
 #else
 #define alloc_pages(gfp_mask, order) \
                alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr)    \
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node)      \
        alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-#define alloc_page_vma(gfp_mask, vma, addr)    \
-       alloc_pages_vma(gfp_mask, 0, vma, addr)
+#define alloc_page_vma(gfp_mask, vma, addr)                    \
+       alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
+#define alloc_page_vma_node(gfp_mask, vma, addr, node)         \
+       alloc_pages_vma(gfp_mask, 0, vma, addr, node)
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
index f376ddc64c4dd164a6d1304e017242ffe0f356a8..62f500c724f9490ac30804157fed08f41a359e6b 100644 (file)
@@ -54,11 +54,13 @@ enum hrtimer_restart {
  * 0x00                inactive
  * 0x01                enqueued into rbtree
  * 0x02                callback function running
+ * 0x04                timer is migrated to another cpu
  *
  * Special cases:
  * 0x03                callback function running and enqueued
  *             (was requeued on another CPU)
- * 0x09                timer was migrated on CPU hotunplug
+ * 0x05                timer was migrated on CPU hotunplug
+ *
  * The "callback function running and enqueued" status is only possible on
  * SMP. It happens for example when a posix timer expired and the callback
  * queued a signal. Between dropping the lock which protects the posix timer
@@ -67,8 +69,11 @@ enum hrtimer_restart {
  * as otherwise the timer could be removed before the softirq code finishes the
  * the handling of the timer.
  *
- * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state to
- * preserve the HRTIMER_STATE_CALLBACK bit in the above scenario.
+ * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
+ * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
+ * also affects HRTIMER_STATE_MIGRATE where the preservation is not
+ * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
+ * enqueued on the new cpu.
  *
  * All state transitions are protected by cpu_base->lock.
  */
@@ -148,7 +153,12 @@ struct hrtimer_clock_base {
 #endif
 };
 
-#define HRTIMER_MAX_CLOCK_BASES 2
+enum  hrtimer_base_type {
+       HRTIMER_BASE_REALTIME,
+       HRTIMER_BASE_MONOTONIC,
+       HRTIMER_BASE_BOOTTIME,
+       HRTIMER_MAX_CLOCK_BASES,
+};
 
 /*
  * struct hrtimer_cpu_base - the per cpu clock bases
@@ -308,6 +318,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 
 extern ktime_t ktime_get(void);
 extern ktime_t ktime_get_real(void);
+extern ktime_t ktime_get_boottime(void);
 
 
 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
@@ -370,8 +381,9 @@ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
 extern ktime_t hrtimer_get_next_event(void);
 
 /*
- * A timer is active, when it is enqueued into the rbtree or the callback
- * function is running.
+ * A timer is active, when it is enqueued into the rbtree or the
+ * callback function is running or it's in the state of being migrated
+ * to another cpu.
  */
 static inline int hrtimer_active(const struct hrtimer *timer)
 {
index 55e0d4253e4927eb67254f38137b2a9e787afa9d..59b72ca1c5d1140d2f035eeb4ad0220fff69950d 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/smp.h>
 #include <linux/percpu.h>
 #include <linux/hrtimer.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
 
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
@@ -55,7 +57,8 @@
  *                Used by threaded interrupts which need to keep the
  *                irq line disabled until the threaded handler has been run.
  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
- *
+ * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
+ * IRQF_NO_THREAD - Interrupt cannot be threaded
  */
 #define IRQF_DISABLED          0x00000020
 #define IRQF_SAMPLE_RANDOM     0x00000040
 #define IRQF_IRQPOLL           0x00001000
 #define IRQF_ONESHOT           0x00002000
 #define IRQF_NO_SUSPEND                0x00004000
+#define IRQF_FORCE_RESUME      0x00008000
+#define IRQF_NO_THREAD         0x00010000
 
-#define IRQF_TIMER             (__IRQF_TIMER | IRQF_NO_SUSPEND)
-
-/*
- * Bits used by threaded handlers:
- * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
- * IRQTF_DIED      - handler thread died
- * IRQTF_WARNED    - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
- * IRQTF_AFFINITY  - irq thread is requested to adjust affinity
- */
-enum {
-       IRQTF_RUNTHREAD,
-       IRQTF_DIED,
-       IRQTF_WARNED,
-       IRQTF_AFFINITY,
-};
+#define IRQF_TIMER             (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
 
 /*
  * These values can be returned by request_any_context_irq() and
@@ -110,6 +101,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
  * @thread_fn: interupt handler function for threaded interrupts
  * @thread:    thread pointer for threaded interrupts
  * @thread_flags:      flags related to @thread
+ * @thread_mask:       bitmask for keeping track of @thread activity
  */
 struct irqaction {
        irq_handler_t handler;
@@ -120,6 +112,7 @@ struct irqaction {
        irq_handler_t thread_fn;
        struct task_struct *thread;
        unsigned long thread_flags;
+       unsigned long thread_mask;
        const char *name;
        struct proc_dir_entry *dir;
 } ____cacheline_internodealigned_in_smp;
@@ -240,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq);
 extern int irq_select_affinity(unsigned int irq);
 
 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+
+/**
+ * struct irq_affinity_notify - context for notification of IRQ affinity changes
+ * @irq:               Interrupt to which notification applies
+ * @kref:              Reference count, for internal use
+ * @work:              Work item, for internal use
+ * @notify:            Function to be called on change.  This will be
+ *                     called in process context.
+ * @release:           Function to be called on release.  This will be
+ *                     called in process context.  Once registered, the
+ *                     structure must only be freed when this function is
+ *                     called or later.
+ */
+struct irq_affinity_notify {
+       unsigned int irq;
+       struct kref kref;
+       struct work_struct work;
+       void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
+       void (*release)(struct kref *ref);
+};
+
+extern int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
+
+static inline void irq_run_affinity_notifiers(void)
+{
+       flush_scheduled_work();
+}
+
 #else /* CONFIG_SMP */
 
 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -255,7 +277,7 @@ static inline int irq_can_set_affinity(unsigned int irq)
 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
 
 static inline int irq_set_affinity_hint(unsigned int irq,
-                                        const struct cpumask *m)
+                                       const struct cpumask *m)
 {
        return -EINVAL;
 }
@@ -314,16 +336,24 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long
 }
 
 /* IRQ wakeup (PM) control: */
-extern int set_irq_wake(unsigned int irq, unsigned int on);
+extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
+
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+/* Please do not use: Use the replacement functions instead */
+static inline int set_irq_wake(unsigned int irq, unsigned int on)
+{
+       return irq_set_irq_wake(irq, on);
+}
+#endif
 
 static inline int enable_irq_wake(unsigned int irq)
 {
-       return set_irq_wake(irq, 1);
+       return irq_set_irq_wake(irq, 1);
 }
 
 static inline int disable_irq_wake(unsigned int irq)
 {
-       return set_irq_wake(irq, 0);
+       return irq_set_irq_wake(irq, 0);
 }
 
 #else /* !CONFIG_GENERIC_HARDIRQS */
@@ -353,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq)
 }
 #endif /* CONFIG_GENERIC_HARDIRQS */
 
+
+#ifdef CONFIG_IRQ_FORCED_THREADING
+extern bool force_irqthreads;
+#else
+#define force_irqthreads       (0)
+#endif
+
 #ifndef __ARCH_SET_SOFTIRQ_PENDING
 #define set_softirq_pending(x) (local_softirq_pending() = (x))
 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
@@ -426,6 +463,13 @@ extern void raise_softirq(unsigned int nr);
  */
 DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
 
+DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+
+static inline struct task_struct *this_cpu_ksoftirqd(void)
+{
+       return this_cpu_read(ksoftirqd);
+}
+
 /* Try to send a softirq to a remote cpu.  If this cannot be done, the
  * work will be queued to the local cpu.
  */
@@ -645,6 +689,7 @@ static inline void init_irq_proc(void)
 
 struct seq_file;
 int show_interrupts(struct seq_file *p, void *v);
+int arch_show_interrupts(struct seq_file *p, int prec);
 
 extern int early_irq_init(void);
 extern int arch_probe_nr_irqs(void);
index 80fcb53057bcad28ce50a0fca50b4070d3a61ec7..1d3577f30d45f62c55ef18f12eb6204b7f0dd1e9 100644 (file)
 #include <asm/irq_regs.h>
 
 struct irq_desc;
+struct irq_data;
 typedef        void (*irq_flow_handler_t)(unsigned int irq,
                                            struct irq_desc *desc);
-
+typedef        void (*irq_preflow_handler_t)(struct irq_data *data);
 
 /*
  * IRQ line status.
  *
- * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h
+ * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h
+ *
+ * IRQ_TYPE_NONE               - default, unspecified type
+ * IRQ_TYPE_EDGE_RISING                - rising edge triggered
+ * IRQ_TYPE_EDGE_FALLING       - falling edge triggered
+ * IRQ_TYPE_EDGE_BOTH          - rising and falling edge triggered
+ * IRQ_TYPE_LEVEL_HIGH         - high level triggered
+ * IRQ_TYPE_LEVEL_LOW          - low level triggered
+ * IRQ_TYPE_LEVEL_MASK         - Mask to filter out the level bits
+ * IRQ_TYPE_SENSE_MASK         - Mask for all the above bits
+ * IRQ_TYPE_PROBE              - Special flag for probing in progress
+ *
+ * Bits which can be modified via irq_set/clear/modify_status_flags()
+ * IRQ_LEVEL                   - Interrupt is level type. Will be also
+ *                               updated in the code when the above trigger
+ *                               bits are modified via set_irq_type()
+ * IRQ_PER_CPU                 - Mark an interrupt PER_CPU. Will protect
+ *                               it from affinity setting
+ * IRQ_NOPROBE                 - Interrupt cannot be probed by autoprobing
+ * IRQ_NOREQUEST               - Interrupt cannot be requested via
+ *                               request_irq()
+ * IRQ_NOAUTOEN                        - Interrupt is not automatically enabled in
+ *                               request/setup_irq()
+ * IRQ_NO_BALANCING            - Interrupt cannot be balanced (affinity set)
+ * IRQ_MOVE_PCNTXT             - Interrupt can be migrated from process context
+ * IRQ_NESTED_TRHEAD           - Interrupt nests into another thread
+ *
+ * Deprecated bits. They are kept updated as long as
+ * CONFIG_GENERIC_HARDIRQS_NO_COMPAT is not set. Will go away soon. These bits
+ * are internal state of the core code and if you really need to acces
+ * them then talk to the genirq maintainer instead of hacking
+ * something weird.
  *
- * IRQ types
  */
-#define IRQ_TYPE_NONE          0x00000000      /* Default, unspecified type */
-#define IRQ_TYPE_EDGE_RISING   0x00000001      /* Edge rising type */
-#define IRQ_TYPE_EDGE_FALLING  0x00000002      /* Edge falling type */
-#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)
-#define IRQ_TYPE_LEVEL_HIGH    0x00000004      /* Level high type */
-#define IRQ_TYPE_LEVEL_LOW     0x00000008      /* Level low type */
-#define IRQ_TYPE_SENSE_MASK    0x0000000f      /* Mask of the above */
-#define IRQ_TYPE_PROBE         0x00000010      /* Probing in progress */
-
-/* Internal flags */
-#define IRQ_INPROGRESS         0x00000100      /* IRQ handler active - do not enter! */
-#define IRQ_DISABLED           0x00000200      /* IRQ disabled - do not enter! */
-#define IRQ_PENDING            0x00000400      /* IRQ pending - replay on enable */
-#define IRQ_REPLAY             0x00000800      /* IRQ has been replayed but not acked yet */
-#define IRQ_AUTODETECT         0x00001000      /* IRQ is being autodetected */
-#define IRQ_WAITING            0x00002000      /* IRQ not yet seen - for autodetection */
-#define IRQ_LEVEL              0x00004000      /* IRQ level triggered */
-#define IRQ_MASKED             0x00008000      /* IRQ masked - shouldn't be seen again */
-#define IRQ_PER_CPU            0x00010000      /* IRQ is per CPU */
-#define IRQ_NOPROBE            0x00020000      /* IRQ is not valid for probing */
-#define IRQ_NOREQUEST          0x00040000      /* IRQ cannot be requested */
-#define IRQ_NOAUTOEN           0x00080000      /* IRQ will not be enabled on request irq */
-#define IRQ_WAKEUP             0x00100000      /* IRQ triggers system wakeup */
-#define IRQ_MOVE_PENDING       0x00200000      /* need to re-target IRQ destination */
-#define IRQ_NO_BALANCING       0x00400000      /* IRQ is excluded from balancing */
-#define IRQ_SPURIOUS_DISABLED  0x00800000      /* IRQ was disabled by the spurious trap */
-#define IRQ_MOVE_PCNTXT                0x01000000      /* IRQ migration from process context */
-#define IRQ_AFFINITY_SET       0x02000000      /* IRQ affinity was set from userspace*/
-#define IRQ_SUSPENDED          0x04000000      /* IRQ has gone through suspend sequence */
-#define IRQ_ONESHOT            0x08000000      /* IRQ is not unmasked after hardirq */
-#define IRQ_NESTED_THREAD      0x10000000      /* IRQ is nested into another, no own handler thread */
+enum {
+       IRQ_TYPE_NONE           = 0x00000000,
+       IRQ_TYPE_EDGE_RISING    = 0x00000001,
+       IRQ_TYPE_EDGE_FALLING   = 0x00000002,
+       IRQ_TYPE_EDGE_BOTH      = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
+       IRQ_TYPE_LEVEL_HIGH     = 0x00000004,
+       IRQ_TYPE_LEVEL_LOW      = 0x00000008,
+       IRQ_TYPE_LEVEL_MASK     = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
+       IRQ_TYPE_SENSE_MASK     = 0x0000000f,
+
+       IRQ_TYPE_PROBE          = 0x00000010,
+
+       IRQ_LEVEL               = (1 <<  8),
+       IRQ_PER_CPU             = (1 <<  9),
+       IRQ_NOPROBE             = (1 << 10),
+       IRQ_NOREQUEST           = (1 << 11),
+       IRQ_NOAUTOEN            = (1 << 12),
+       IRQ_NO_BALANCING        = (1 << 13),
+       IRQ_MOVE_PCNTXT         = (1 << 14),
+       IRQ_NESTED_THREAD       = (1 << 15),
+
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+       IRQ_INPROGRESS          = (1 << 16),
+       IRQ_REPLAY              = (1 << 17),
+       IRQ_WAITING             = (1 << 18),
+       IRQ_DISABLED            = (1 << 19),
+       IRQ_PENDING             = (1 << 20),
+       IRQ_MASKED              = (1 << 21),
+       IRQ_MOVE_PENDING        = (1 << 22),
+       IRQ_AFFINITY_SET        = (1 << 23),
+       IRQ_WAKEUP              = (1 << 24),
+#endif
+};
 
 #define IRQF_MODIFY_MASK       \
        (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
         IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
-        IRQ_PER_CPU)
+        IRQ_PER_CPU | IRQ_NESTED_THREAD)
 
-#ifdef CONFIG_IRQ_PER_CPU
-# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
-# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
-#else
-# define CHECK_IRQ_PER_CPU(var) 0
-# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING
-#endif
+#define IRQ_NO_BALANCING_MASK  (IRQ_PER_CPU | IRQ_NO_BALANCING)
+
+static inline __deprecated bool CHECK_IRQ_PER_CPU(unsigned int status)
+{
+       return status & IRQ_PER_CPU;
+}
+
+/*
+ * Return value for chip->irq_set_affinity()
+ *
+ * IRQ_SET_MASK_OK     - OK, core updates irq_data.affinity
+ * IRQ_SET_MASK_NOCPY  - OK, chip did update irq_data.affinity
+ */
+enum {
+       IRQ_SET_MASK_OK = 0,
+       IRQ_SET_MASK_OK_NOCOPY,
+};
 
 struct msi_desc;
 
@@ -91,6 +134,8 @@ struct msi_desc;
  * struct irq_data - per irq and irq chip data passed down to chip functions
  * @irq:               interrupt number
  * @node:              node index useful for balancing
+ * @state_use_accessor: status information for irq chip functions.
+ *                     Use accessor functions to deal with it
  * @chip:              low level interrupt hardware access
  * @handler_data:      per-IRQ data for the irq_chip methods
  * @chip_data:         platform-specific per-chip private data for the chip
@@ -105,6 +150,7 @@ struct msi_desc;
 struct irq_data {
        unsigned int            irq;
        unsigned int            node;
+       unsigned int            state_use_accessors;
        struct irq_chip         *chip;
        void                    *handler_data;
        void                    *chip_data;
@@ -114,6 +160,80 @@ struct irq_data {
 #endif
 };
 
+/*
+ * Bit masks for irq_data.state
+ *
+ * IRQD_TRIGGER_MASK           - Mask for the trigger type bits
+ * IRQD_SETAFFINITY_PENDING    - Affinity setting is pending
+ * IRQD_NO_BALANCING           - Balancing disabled for this IRQ
+ * IRQD_PER_CPU                        - Interrupt is per cpu
+ * IRQD_AFFINITY_SET           - Interrupt affinity was set
+ * IRQD_LEVEL                  - Interrupt is level triggered
+ * IRQD_WAKEUP_STATE           - Interrupt is configured for wakeup
+ *                               from suspend
+ * IRDQ_MOVE_PCNTXT            - Interrupt can be moved in process
+ *                               context
+ */
+enum {
+       IRQD_TRIGGER_MASK               = 0xf,
+       IRQD_SETAFFINITY_PENDING        = (1 <<  8),
+       IRQD_NO_BALANCING               = (1 << 10),
+       IRQD_PER_CPU                    = (1 << 11),
+       IRQD_AFFINITY_SET               = (1 << 12),
+       IRQD_LEVEL                      = (1 << 13),
+       IRQD_WAKEUP_STATE               = (1 << 14),
+       IRQD_MOVE_PCNTXT                = (1 << 15),
+};
+
+static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
+{
+       return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
+}
+
+static inline bool irqd_is_per_cpu(struct irq_data *d)
+{
+       return d->state_use_accessors & IRQD_PER_CPU;
+}
+
+static inline bool irqd_can_balance(struct irq_data *d)
+{
+       return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
+}
+
+static inline bool irqd_affinity_was_set(struct irq_data *d)
+{
+       return d->state_use_accessors & IRQD_AFFINITY_SET;
+}
+
+static inline u32 irqd_get_trigger_type(struct irq_data *d)
+{
+       return d->state_use_accessors & IRQD_TRIGGER_MASK;
+}
+
+/*
+ * Must only be called inside irq_chip.irq_set_type() functions.
+ */
+static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
+{
+       d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
+       d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
+}
+
+static inline bool irqd_is_level_type(struct irq_data *d)
+{
+       return d->state_use_accessors & IRQD_LEVEL;
+}
+
+static inline bool irqd_is_wakeup_set(struct irq_data *d)
+{
+       return d->state_use_accessors & IRQD_WAKEUP_STATE;
+}
+
+static inline bool irqd_can_move_in_process_context(struct irq_data *d)
+{
+       return d->state_use_accessors & IRQD_MOVE_PCNTXT;
+}
+
 /**
  * struct irq_chip - hardware interrupt chip descriptor
  *
@@ -150,6 +270,7 @@ struct irq_data {
  * @irq_set_wake:      enable/disable power-management wake-on of an IRQ
  * @irq_bus_lock:      function to lock access to slow bus (i2c) chips
  * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
+ * @flags:             chip specific flags
  *
  * @release:           release function solely used by UML
  */
@@ -196,12 +317,27 @@ struct irq_chip {
        void            (*irq_bus_lock)(struct irq_data *data);
        void            (*irq_bus_sync_unlock)(struct irq_data *data);
 
+       unsigned long   flags;
+
        /* Currently used only by UML, might disappear one day.*/
 #ifdef CONFIG_IRQ_RELEASE_METHOD
        void            (*release)(unsigned int irq, void *dev_id);
 #endif
 };
 
+/*
+ * irq_chip specific flags
+ *
+ * IRQCHIP_SET_TYPE_MASKED:    Mask before calling chip.irq_set_type()
+ * IRQCHIP_EOI_IF_HANDLED:     Only issue irq_eoi() when irq was handled
+ * IRQCHIP_MASK_ON_SUSPEND:    Mask non wake irqs in the suspend path
+ */
+enum {
+       IRQCHIP_SET_TYPE_MASKED         = (1 <<  0),
+       IRQCHIP_EOI_IF_HANDLED          = (1 <<  1),
+       IRQCHIP_MASK_ON_SUSPEND         = (1 <<  2),
+};
+
 /* This include will go away once we isolated irq_desc usage to core code */
 #include <linux/irqdesc.h>
 
@@ -218,7 +354,7 @@ struct irq_chip {
 # define ARCH_IRQ_INIT_FLAGS   0
 #endif
 
-#define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS)
+#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
 
 struct irqaction;
 extern int setup_irq(unsigned int irq, struct irqaction *new);
@@ -229,9 +365,13 @@ extern void remove_irq(unsigned int irq, struct irqaction *act);
 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
 void move_native_irq(int irq);
 void move_masked_irq(int irq);
+void irq_move_irq(struct irq_data *data);
+void irq_move_masked_irq(struct irq_data *data);
 #else
 static inline void move_native_irq(int irq) { }
 static inline void move_masked_irq(int irq) { }
+static inline void irq_move_irq(struct irq_data *data) { }
+static inline void irq_move_masked_irq(struct irq_data *data) { }
 #endif
 
 extern int no_irq_affinity;
@@ -267,23 +407,23 @@ extern struct irq_chip no_irq_chip;
 extern struct irq_chip dummy_irq_chip;
 
 extern void
-set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
-                        irq_flow_handler_t handle);
-extern void
-set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
                              irq_flow_handler_t handle, const char *name);
 
+static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+                                           irq_flow_handler_t handle)
+{
+       irq_set_chip_and_handler_name(irq, chip, handle, NULL);
+}
+
 extern void
-__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
                  const char *name);
 
-/*
- * Set a highlevel flow handler for a given IRQ:
- */
 static inline void
-set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
+irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
 {
-       __set_irq_handler(irq, handle, 0, NULL);
+       __irq_set_handler(irq, handle, 0, NULL);
 }
 
 /*
@@ -292,14 +432,11 @@ set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
  *  IRQ_NOREQUEST and IRQ_NOPROBE)
  */
 static inline void
-set_irq_chained_handler(unsigned int irq,
-                       irq_flow_handler_t handle)
+irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
 {
-       __set_irq_handler(irq, handle, 1, NULL);
+       __irq_set_handler(irq, handle, 1, NULL);
 }
 
-extern void set_irq_nested_thread(unsigned int irq, int nest);
-
 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
 
 static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
@@ -312,16 +449,24 @@ static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
        irq_modify_status(irq, clr, 0);
 }
 
-static inline void set_irq_noprobe(unsigned int irq)
+static inline void irq_set_noprobe(unsigned int irq)
 {
        irq_modify_status(irq, 0, IRQ_NOPROBE);
 }
 
-static inline void set_irq_probe(unsigned int irq)
+static inline void irq_set_probe(unsigned int irq)
 {
        irq_modify_status(irq, IRQ_NOPROBE, 0);
 }
 
+static inline void irq_set_nested_thread(unsigned int irq, bool nest)
+{
+       if (nest)
+               irq_set_status_flags(irq, IRQ_NESTED_THREAD);
+       else
+               irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
+}
+
 /* Handle dynamic irq creation and destruction */
 extern unsigned int create_irq_nr(unsigned int irq_want, int node);
 extern int create_irq(void);
@@ -338,14 +483,14 @@ static inline void dynamic_irq_init(unsigned int irq)
 }
 
 /* Set/get chip/data for an IRQ: */
-extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
-extern int set_irq_data(unsigned int irq, void *data);
-extern int set_irq_chip_data(unsigned int irq, void *data);
-extern int set_irq_type(unsigned int irq, unsigned int type);
-extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
+extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
+extern int irq_set_handler_data(unsigned int irq, void *data);
+extern int irq_set_chip_data(unsigned int irq, void *data);
+extern int irq_set_irq_type(unsigned int irq, unsigned int type);
+extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
 extern struct irq_data *irq_get_irq_data(unsigned int irq);
 
-static inline struct irq_chip *get_irq_chip(unsigned int irq)
+static inline struct irq_chip *irq_get_chip(unsigned int irq)
 {
        struct irq_data *d = irq_get_irq_data(irq);
        return d ? d->chip : NULL;
@@ -356,7 +501,7 @@ static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
        return d->chip;
 }
 
-static inline void *get_irq_chip_data(unsigned int irq)
+static inline void *irq_get_chip_data(unsigned int irq)
 {
        struct irq_data *d = irq_get_irq_data(irq);
        return d ? d->chip_data : NULL;
@@ -367,18 +512,18 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
        return d->chip_data;
 }
 
-static inline void *get_irq_data(unsigned int irq)
+static inline void *irq_get_handler_data(unsigned int irq)
 {
        struct irq_data *d = irq_get_irq_data(irq);
        return d ? d->handler_data : NULL;
 }
 
-static inline void *irq_data_get_irq_data(struct irq_data *d)
+static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
 {
        return d->handler_data;
 }
 
-static inline struct msi_desc *get_irq_msi(unsigned int irq)
+static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
 {
        struct irq_data *d = irq_get_irq_data(irq);
        return d ? d->msi_desc : NULL;
@@ -389,6 +534,89 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
        return d->msi_desc;
 }
 
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+/* Please do not use: Use the replacement functions instead */
+static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip)
+{
+       return irq_set_chip(irq, chip);
+}
+static inline int set_irq_data(unsigned int irq, void *data)
+{
+       return irq_set_handler_data(irq, data);
+}
+static inline int set_irq_chip_data(unsigned int irq, void *data)
+{
+       return irq_set_chip_data(irq, data);
+}
+static inline int set_irq_type(unsigned int irq, unsigned int type)
+{
+       return irq_set_irq_type(irq, type);
+}
+static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry)
+{
+       return irq_set_msi_desc(irq, entry);
+}
+static inline struct irq_chip *get_irq_chip(unsigned int irq)
+{
+       return irq_get_chip(irq);
+}
+static inline void *get_irq_chip_data(unsigned int irq)
+{
+       return irq_get_chip_data(irq);
+}
+static inline void *get_irq_data(unsigned int irq)
+{
+       return irq_get_handler_data(irq);
+}
+static inline void *irq_data_get_irq_data(struct irq_data *d)
+{
+       return irq_data_get_irq_handler_data(d);
+}
+static inline struct msi_desc *get_irq_msi(unsigned int irq)
+{
+       return irq_get_msi_desc(irq);
+}
+static inline void set_irq_noprobe(unsigned int irq)
+{
+       irq_set_noprobe(irq);
+}
+static inline void set_irq_probe(unsigned int irq)
+{
+       irq_set_probe(irq);
+}
+static inline void set_irq_nested_thread(unsigned int irq, int nest)
+{
+       irq_set_nested_thread(irq, nest);
+}
+static inline void
+set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+                             irq_flow_handler_t handle, const char *name)
+{
+       irq_set_chip_and_handler_name(irq, chip, handle, name);
+}
+static inline void
+set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+                        irq_flow_handler_t handle)
+{
+       irq_set_chip_and_handler(irq, chip, handle);
+}
+static inline void
+__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+                 const char *name)
+{
+       __irq_set_handler(irq, handle, is_chained, name);
+}
+static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
+{
+       irq_set_handler(irq, handle);
+}
+static inline void
+set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle)
+{
+       irq_set_chained_handler(irq, handle);
+}
+#endif
+
 int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node);
 void irq_free_descs(unsigned int irq, unsigned int cnt);
 int irq_reserve_irqs(unsigned int from, unsigned int cnt);
index c1a95b7b58de736f228cfeef9bfe1f118e02bf29..00218371518b0830328f6c089b3d5df0df471d4c 100644 (file)
@@ -8,6 +8,7 @@
  * For now it's included from <linux/irq.h>
  */
 
+struct irq_affinity_notify;
 struct proc_dir_entry;
 struct timer_rand_state;
 /**
@@ -18,13 +19,16 @@ struct timer_rand_state;
  * @handle_irq:                highlevel irq-events handler [if NULL, __do_IRQ()]
  * @action:            the irq action chain
  * @status:            status information
+ * @core_internal_state__do_not_mess_with_it: core internal status information
  * @depth:             disable-depth, for nested irq_disable() calls
  * @wake_depth:                enable depth, for multiple set_irq_wake() callers
  * @irq_count:         stats field to detect stalled irqs
  * @last_unhandled:    aging timer for unhandled count
  * @irqs_unhandled:    stats field for spurious unhandled interrupts
  * @lock:              locking for SMP
+ * @affinity_notify:   context for notification of affinity changes
  * @pending_mask:      pending rebalanced interrupts
+ * @threads_oneshot:   bitfield to handle shared oneshot threads
  * @threads_active:    number of irqaction threads currently running
  * @wait_for_threads:  wait queue for sync_irq to wait for threaded handlers
  * @dir:               /proc/irq/ procfs entry
@@ -45,6 +49,7 @@ struct irq_desc {
                struct {
                        unsigned int            irq;
                        unsigned int            node;
+                       unsigned int            pad_do_not_even_think_about_it;
                        struct irq_chip         *chip;
                        void                    *handler_data;
                        void                    *chip_data;
@@ -59,9 +64,16 @@ struct irq_desc {
        struct timer_rand_state *timer_rand_state;
        unsigned int __percpu   *kstat_irqs;
        irq_flow_handler_t      handle_irq;
+#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+       irq_preflow_handler_t   preflow_handler;
+#endif
        struct irqaction        *action;        /* IRQ action list */
+#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+       unsigned int            status_use_accessors;
+#else
        unsigned int            status;         /* IRQ status */
-
+#endif
+       unsigned int            core_internal_state__do_not_mess_with_it;
        unsigned int            depth;          /* nested irq disables */
        unsigned int            wake_depth;     /* nested wake enables */
        unsigned int            irq_count;      /* For detecting broken IRQs */
@@ -70,10 +82,12 @@ struct irq_desc {
        raw_spinlock_t          lock;
 #ifdef CONFIG_SMP
        const struct cpumask    *affinity_hint;
+       struct irq_affinity_notify *affinity_notify;
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        cpumask_var_t           pending_mask;
 #endif
 #endif
+       unsigned long           threads_oneshot;
        atomic_t                threads_active;
        wait_queue_head_t       wait_for_threads;
 #ifdef CONFIG_PROC_FS
@@ -95,10 +109,51 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
 
 #ifdef CONFIG_GENERIC_HARDIRQS
 
-#define get_irq_desc_chip(desc)                ((desc)->irq_data.chip)
-#define get_irq_desc_chip_data(desc)   ((desc)->irq_data.chip_data)
-#define get_irq_desc_data(desc)                ((desc)->irq_data.handler_data)
-#define get_irq_desc_msi(desc)         ((desc)->irq_data.msi_desc)
+static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
+{
+       return &desc->irq_data;
+}
+
+static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
+{
+       return desc->irq_data.chip;
+}
+
+static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
+{
+       return desc->irq_data.chip_data;
+}
+
+static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
+{
+       return desc->irq_data.handler_data;
+}
+
+static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
+{
+       return desc->irq_data.msi_desc;
+}
+
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc)
+{
+       return irq_desc_get_chip(desc);
+}
+static inline void *get_irq_desc_data(struct irq_desc *desc)
+{
+       return irq_desc_get_handler_data(desc);
+}
+
+static inline void *get_irq_desc_chip_data(struct irq_desc *desc)
+{
+       return irq_desc_get_chip_data(desc);
+}
+
+static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc)
+{
+       return irq_desc_get_msi_desc(desc);
+}
+#endif
 
 /*
  * Architectures call this to let the generic IRQ layer
@@ -123,6 +178,7 @@ static inline int irq_has_action(unsigned int irq)
        return desc->action != NULL;
 }
 
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
 static inline int irq_balancing_disabled(unsigned int irq)
 {
        struct irq_desc *desc;
@@ -130,6 +186,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
        desc = irq_to_desc(irq);
        return desc->status & IRQ_NO_BALANCING_MASK;
 }
+#endif
 
 /* caller has locked the irq_desc and both params are valid */
 static inline void __set_irq_handler_unlocked(int irq,
@@ -140,6 +197,17 @@ static inline void __set_irq_handler_unlocked(int irq,
        desc = irq_to_desc(irq);
        desc->handle_irq = handler;
 }
+
+#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+static inline void
+__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
+{
+       struct irq_desc *desc;
+
+       desc = irq_to_desc(irq);
+       desc->preflow_handler = handler;
+}
+#endif
 #endif
 
 #endif
index 6811f4bfc6e7a3ea8c8ab3c59d7f3e9606f07844..922aa313c9f93ce4b55e65ebdc7690376f03052e 100644 (file)
@@ -307,6 +307,7 @@ extern clock_t jiffies_to_clock_t(long x);
 extern unsigned long clock_t_to_jiffies(unsigned long x);
 extern u64 jiffies_64_to_clock_t(u64 x);
 extern u64 nsec_to_clock_t(u64 x);
+extern u64 nsecs_to_jiffies64(u64 n);
 extern unsigned long nsecs_to_jiffies(u64 n);
 
 #define TIMESTAMP_SIZE 30
index ce0775aa64c376b8980a6af70ef59f14d19f1ba0..7ff16f7d3ed41530a9c67cdaa2bc5bbf2c609f87 100644 (file)
@@ -64,7 +64,7 @@ struct kthread_work {
 };
 
 #define KTHREAD_WORKER_INIT(worker)    {                               \
-       .lock = SPIN_LOCK_UNLOCKED,                                     \
+       .lock = __SPIN_LOCK_UNLOCKED((worker).lock),                    \
        .work_list = LIST_HEAD_INIT((worker).work_list),                \
        }
 
index 3fd36845ca4520cede1dad03e6a6e5576cd1cbeb..ef4f0b6083a39ae55450a289cc70269df6937e62 100644 (file)
@@ -71,6 +71,7 @@ struct wm8994 {
        u16 irq_masks_cache[WM8994_NUM_IRQ_REGS];
 
        /* Used over suspend/resume */
+       bool suspended;
        u16 ldo_regs[WM8994_NUM_LDO_REGS];
        u16 gpio_regs[WM8994_NUM_GPIO_REGS];
 
index f6385fc17ad46a6137bb7fdd387f8f81bb91e492..679300c050f5720f07597d5ee4ac3ec3e564f19f 100644 (file)
@@ -1309,8 +1309,6 @@ int add_from_early_node_map(struct range *range, int az,
                                   int nr_range, int nid);
 u64 __init find_memory_core_early(int nid, u64 size, u64 align,
                                        u64 goal, u64 limit);
-void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
-                                u64 goal, u64 limit);
 typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
 extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
 extern void sparse_memory_present_with_active_regions(int nid);
index f276d4fa01fc886fe7a3dbff57733501d00eaece..9c8603872c36e9a04e350a930abef359a6c90c86 100644 (file)
@@ -19,7 +19,6 @@ struct nameidata {
        struct path     path;
        struct qstr     last;
        struct path     root;
-       struct file     *file;
        struct inode    *inode; /* path.dentry.d_inode */
        unsigned int    flags;
        unsigned        seq;
@@ -63,6 +62,10 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
 #define LOOKUP_EXCL            0x0400
 #define LOOKUP_RENAME_TARGET   0x0800
 
+#define LOOKUP_JUMPED          0x1000
+#define LOOKUP_ROOT            0x2000
+#define LOOKUP_EMPTY           0x4000
+
 extern int user_path_at(int, const char __user *, unsigned, struct path *);
 
 #define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
@@ -72,7 +75,7 @@ extern int user_path_at(int, const char __user *, unsigned, struct path *);
 
 extern int kern_path(const char *, unsigned, struct path *);
 
-extern int path_lookup(const char *, unsigned, struct nameidata *);
+extern int kern_path_parent(const char *, struct nameidata *);
 extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
                           const char *, unsigned int, struct nameidata *);
 
index d971346b0340da50ae229f0dda8244f66c0eab2b..71caf7a5e6c6cc59fd975f06a0ce6bb304288d52 100644 (file)
@@ -2392,6 +2392,9 @@ extern int netdev_notice(const struct net_device *dev, const char *format, ...)
 extern int netdev_info(const struct net_device *dev, const char *format, ...)
        __attribute__ ((format (printf, 2, 3)));
 
+#define MODULE_ALIAS_NETDEV(device) \
+       MODULE_ALIAS("netdev-" device)
+
 #if defined(DEBUG)
 #define netdev_dbg(__dev, format, args...)                     \
        netdev_printk(KERN_DEBUG, __dev, format, ##args)
index b197563913bf94a86af7d69f4892083f62a4b3ae..3e112de12d8d61fcc7fd4700c671a369ca15f86a 100644 (file)
@@ -68,11 +68,7 @@ struct nfs_client {
        unsigned char           cl_id_uniquifier;
        u32                     cl_cb_ident;    /* v4.0 callback identifier */
        const struct nfs4_minor_version_ops *cl_mvops;
-#endif /* CONFIG_NFS_V4 */
 
-#ifdef CONFIG_NFS_V4_1
-       /* clientid returned from EXCHANGE_ID, used by session operations */
-       u64                     cl_ex_clid;
        /* The sequence id to use for the next CREATE_SESSION */
        u32                     cl_seqid;
        /* The flags used for obtaining the clientid during EXCHANGE_ID */
@@ -80,7 +76,7 @@ struct nfs_client {
        struct nfs4_session     *cl_session;    /* sharred session */
        struct list_head        cl_layouts;
        struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_NFS_V4 */
 
 #ifdef CONFIG_NFS_FSCACHE
        struct fscache_cookie   *fscache;       /* client index cache cookie */
@@ -185,7 +181,7 @@ struct nfs_server {
 /* maximum number of slots to use */
 #define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE
 
-#if defined(CONFIG_NFS_V4_1)
+#if defined(CONFIG_NFS_V4)
 
 /* Sessions */
 #define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long)))
@@ -225,5 +221,5 @@ struct nfs4_session {
        struct nfs_client               *clp;
 };
 
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_NFS_V4 */
 #endif
index 3adb06ebf8418aa0a886e474a9e3460ddb4cbe09..580de67f318b4216900c04bea1f4b5c522a2da43 100644 (file)
 #define PCI_DEVICE_ID_AMD_11H_NB_MISC  0x1303
 #define PCI_DEVICE_ID_AMD_11H_NB_LINK  0x1304
 #define PCI_DEVICE_ID_AMD_15H_NB_MISC  0x1603
+#define PCI_DEVICE_ID_AMD_15H_NB_LINK  0x1604
 #define PCI_DEVICE_ID_AMD_CNB17H_F3    0x1703
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
index dda5b0a3ff6014b8a0741a186ed0e3968b63d298..614615b8d42b851481a624aeabcb110e7e27078e 100644 (file)
@@ -225,8 +225,14 @@ struct perf_event_attr {
        };
 
        __u32                   bp_type;
-       __u64                   bp_addr;
-       __u64                   bp_len;
+       union {
+               __u64           bp_addr;
+               __u64           config1; /* extension of config */
+       };
+       union {
+               __u64           bp_len;
+               __u64           config2; /* extension of config1 */
+       };
 };
 
 /*
@@ -464,6 +470,7 @@ enum perf_callchain_context {
 
 #define PERF_FLAG_FD_NO_GROUP  (1U << 0)
 #define PERF_FLAG_FD_OUTPUT    (1U << 1)
+#define PERF_FLAG_PID_CGROUP   (1U << 2) /* pid=cgroup id, per-cpu mode only */
 
 #ifdef __KERNEL__
 /*
@@ -471,6 +478,7 @@ enum perf_callchain_context {
  */
 
 #ifdef CONFIG_PERF_EVENTS
+# include <linux/cgroup.h>
 # include <asm/perf_event.h>
 # include <asm/local64.h>
 #endif
@@ -539,6 +547,9 @@ struct hw_perf_event {
                        unsigned long   event_base;
                        int             idx;
                        int             last_cpu;
+                       unsigned int    extra_reg;
+                       u64             extra_config;
+                       int             extra_alloc;
                };
                struct { /* software */
                        struct hrtimer  hrtimer;
@@ -716,6 +727,22 @@ struct swevent_hlist {
 #define PERF_ATTACH_GROUP      0x02
 #define PERF_ATTACH_TASK       0x04
 
+#ifdef CONFIG_CGROUP_PERF
+/*
+ * perf_cgroup_info keeps track of time_enabled for a cgroup.
+ * This is a per-cpu dynamically allocated data structure.
+ */
+struct perf_cgroup_info {
+       u64 time;
+       u64 timestamp;
+};
+
+struct perf_cgroup {
+       struct cgroup_subsys_state css;
+       struct perf_cgroup_info *info;  /* timing info, one per cpu */
+};
+#endif
+
 /**
  * struct perf_event - performance event kernel representation:
  */
@@ -832,6 +859,11 @@ struct perf_event {
        struct event_filter             *filter;
 #endif
 
+#ifdef CONFIG_CGROUP_PERF
+       struct perf_cgroup              *cgrp; /* cgroup event is attach to */
+       int                             cgrp_defer_enabled;
+#endif
+
 #endif /* CONFIG_PERF_EVENTS */
 };
 
@@ -886,6 +918,7 @@ struct perf_event_context {
        u64                             generation;
        int                             pin_count;
        struct rcu_head                 rcu_head;
+       int                             nr_cgroups; /* cgroup events present */
 };
 
 /*
@@ -905,6 +938,9 @@ struct perf_cpu_context {
        struct list_head                rotation_list;
        int                             jiffies_interval;
        struct pmu                      *active_pmu;
+#ifdef CONFIG_CGROUP_PERF
+       struct perf_cgroup              *cgrp;
+#endif
 };
 
 struct perf_output_handle {
@@ -1040,11 +1076,11 @@ have_event:
        __perf_sw_event(event_id, nr, nmi, regs, addr);
 }
 
-extern atomic_t perf_task_events;
+extern atomic_t perf_sched_events;
 
 static inline void perf_event_task_sched_in(struct task_struct *task)
 {
-       COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
+       COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task));
 }
 
 static inline
@@ -1052,7 +1088,7 @@ void perf_event_task_sched_out(struct task_struct *task, struct task_struct *nex
 {
        perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
 
-       COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
+       COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next));
 }
 
 extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1083,6 +1119,10 @@ extern int sysctl_perf_event_paranoid;
 extern int sysctl_perf_event_mlock;
 extern int sysctl_perf_event_sample_rate;
 
+extern int perf_proc_update_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos);
+
 static inline bool perf_paranoid_tracepoint_raw(void)
 {
        return sysctl_perf_event_paranoid > -1;
index 7254eda078e5ad33231e5284ecb9d893d452631a..c9b9f322c8d885ae20982dab7588739820715840 100644 (file)
  *
  * Simple ASCII art explanation:
  *
- * |HEAD          |
- * |              |
- * |prio_list.prev|<------------------------------------|
- * |prio_list.next|<->|pl|<->|pl|<--------------->|pl|<-|
- * |10            |   |10|   |21|   |21|   |21|   |40|   (prio)
- * |              |   |  |   |  |   |  |   |  |   |  |
- * |              |   |  |   |  |   |  |   |  |   |  |
- * |node_list.next|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-|
- * |node_list.prev|<------------------------------------|
+ * pl:prio_list (only for plist_node)
+ * nl:node_list
+ *   HEAD|             NODE(S)
+ *       |
+ *       ||------------------------------------|
+ *       ||->|pl|<->|pl|<--------------->|pl|<-|
+ *       |   |10|   |21|   |21|   |21|   |40|   (prio)
+ *       |   |  |   |  |   |  |   |  |   |  |
+ *       |   |  |   |  |   |  |   |  |   |  |
+ * |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-|
+ * |-------------------------------------------|
  *
  * The nodes on the prio_list list are sorted by priority to simplify
  * the insertion of new nodes. There are no nodes with duplicate
@@ -78,7 +80,6 @@
 #include <linux/spinlock_types.h>
 
 struct plist_head {
-       struct list_head prio_list;
        struct list_head node_list;
 #ifdef CONFIG_DEBUG_PI_LIST
        raw_spinlock_t *rawlock;
@@ -88,7 +89,8 @@ struct plist_head {
 
 struct plist_node {
        int                     prio;
-       struct plist_head       plist;
+       struct list_head        prio_list;
+       struct list_head        node_list;
 };
 
 #ifdef CONFIG_DEBUG_PI_LIST
@@ -100,7 +102,6 @@ struct plist_node {
 #endif
 
 #define _PLIST_HEAD_INIT(head)                         \
-       .prio_list = LIST_HEAD_INIT((head).prio_list),  \
        .node_list = LIST_HEAD_INIT((head).node_list)
 
 /**
@@ -133,7 +134,8 @@ struct plist_node {
 #define PLIST_NODE_INIT(node, __prio)                  \
 {                                                      \
        .prio  = (__prio),                              \
-       .plist = { _PLIST_HEAD_INIT((node).plist) },    \
+       .prio_list = LIST_HEAD_INIT((node).prio_list),  \
+       .node_list = LIST_HEAD_INIT((node).node_list),  \
 }
 
 /**
@@ -144,7 +146,6 @@ struct plist_node {
 static inline void
 plist_head_init(struct plist_head *head, spinlock_t *lock)
 {
-       INIT_LIST_HEAD(&head->prio_list);
        INIT_LIST_HEAD(&head->node_list);
 #ifdef CONFIG_DEBUG_PI_LIST
        head->spinlock = lock;
@@ -160,7 +161,6 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
 static inline void
 plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
 {
-       INIT_LIST_HEAD(&head->prio_list);
        INIT_LIST_HEAD(&head->node_list);
 #ifdef CONFIG_DEBUG_PI_LIST
        head->rawlock = lock;
@@ -176,7 +176,8 @@ plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
 static inline void plist_node_init(struct plist_node *node, int prio)
 {
        node->prio = prio;
-       plist_head_init(&node->plist, NULL);
+       INIT_LIST_HEAD(&node->prio_list);
+       INIT_LIST_HEAD(&node->node_list);
 }
 
 extern void plist_add(struct plist_node *node, struct plist_head *head);
@@ -188,7 +189,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
  * @head:      the head for your list
  */
 #define plist_for_each(pos, head)      \
-        list_for_each_entry(pos, &(head)->node_list, plist.node_list)
+        list_for_each_entry(pos, &(head)->node_list, node_list)
 
 /**
  * plist_for_each_safe - iterate safely over a plist of given type
@@ -199,7 +200,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
  * Iterate over a plist of given type, safe against removal of list entry.
  */
 #define plist_for_each_safe(pos, n, head)      \
-        list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list)
+        list_for_each_entry_safe(pos, n, &(head)->node_list, node_list)
 
 /**
  * plist_for_each_entry        - iterate over list of given type
@@ -208,7 +209,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
  * @mem:       the name of the list_struct within the struct
  */
 #define plist_for_each_entry(pos, head, mem)   \
-        list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list)
+        list_for_each_entry(pos, &(head)->node_list, mem.node_list)
 
 /**
  * plist_for_each_entry_safe - iterate safely over list of given type
@@ -220,7 +221,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
  * Iterate over list of given type, safe against removal of list entry.
  */
 #define plist_for_each_entry_safe(pos, n, head, m)     \
-       list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list)
+       list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list)
 
 /**
  * plist_head_empty - return !0 if a plist_head is empty
@@ -237,7 +238,7 @@ static inline int plist_head_empty(const struct plist_head *head)
  */
 static inline int plist_node_empty(const struct plist_node *node)
 {
-       return plist_head_empty(&node->plist);
+       return list_empty(&node->node_list);
 }
 
 /* All functions below assume the plist_head is not empty. */
@@ -285,7 +286,7 @@ static inline int plist_node_empty(const struct plist_node *node)
 static inline struct plist_node *plist_first(const struct plist_head *head)
 {
        return list_entry(head->node_list.next,
-                         struct plist_node, plist.node_list);
+                         struct plist_node, node_list);
 }
 
 /**
@@ -297,7 +298,7 @@ static inline struct plist_node *plist_first(const struct plist_head *head)
 static inline struct plist_node *plist_last(const struct plist_head *head)
 {
        return list_entry(head->node_list.prev,
-                         struct plist_node, plist.node_list);
+                         struct plist_node, node_list);
 }
 
 #endif
index dd9c7ab38270059a147090ff87d37b45e591bc75..21415cc91cbb440492f06091b8237c9bed42d00d 100644 (file)
@@ -431,6 +431,8 @@ struct dev_pm_info {
        struct list_head        entry;
        struct completion       completion;
        struct wakeup_source    *wakeup;
+#else
+       unsigned int            should_wakeup:1;
 #endif
 #ifdef CONFIG_PM_RUNTIME
        struct timer_list       suspend_timer;
index 9cff00dd6b63a031d02e59b797fcf2c5fe94f082..03a67db03d01e4133e027d22871929e5c150956e 100644 (file)
@@ -109,11 +109,6 @@ static inline bool device_can_wakeup(struct device *dev)
        return dev->power.can_wakeup;
 }
 
-static inline bool device_may_wakeup(struct device *dev)
-{
-       return false;
-}
-
 static inline struct wakeup_source *wakeup_source_create(const char *name)
 {
        return NULL;
@@ -134,24 +129,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {}
 
 static inline int device_wakeup_enable(struct device *dev)
 {
-       return -EINVAL;
+       dev->power.should_wakeup = true;
+       return 0;
 }
 
 static inline int device_wakeup_disable(struct device *dev)
 {
+       dev->power.should_wakeup = false;
        return 0;
 }
 
-static inline int device_init_wakeup(struct device *dev, bool val)
+static inline int device_set_wakeup_enable(struct device *dev, bool enable)
 {
-       dev->power.can_wakeup = val;
-       return val ? -EINVAL : 0;
+       dev->power.should_wakeup = enable;
+       return 0;
 }
 
+static inline int device_init_wakeup(struct device *dev, bool val)
+{
+       device_set_wakeup_capable(dev, val);
+       device_set_wakeup_enable(dev, val);
+       return 0;
+}
 
-static inline int device_set_wakeup_enable(struct device *dev, bool enable)
+static inline bool device_may_wakeup(struct device *dev)
 {
-       return -EINVAL;
+       return dev->power.can_wakeup && dev->power.should_wakeup;
 }
 
 static inline void __pm_stay_awake(struct wakeup_source *ws) {}
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
new file mode 100644 (file)
index 0000000..369e19d
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * posix-clock.h - support for dynamic clock devices
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef _LINUX_POSIX_CLOCK_H_
+#define _LINUX_POSIX_CLOCK_H_
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/posix-timers.h>
+
+struct posix_clock;
+
+/**
+ * struct posix_clock_operations - functional interface to the clock
+ *
+ * Every posix clock is represented by a character device. Drivers may
+ * optionally offer extended capabilities by implementing the
+ * character device methods. The character device file operations are
+ * first handled by the clock device layer, then passed on to the
+ * driver by calling these functions.
+ *
+ * @owner:          The clock driver should set to THIS_MODULE
+ * @clock_adjtime:  Adjust the clock
+ * @clock_gettime:  Read the current time
+ * @clock_getres:   Get the clock resolution
+ * @clock_settime:  Set the current time value
+ * @timer_create:   Create a new timer
+ * @timer_delete:   Remove a previously created timer
+ * @timer_gettime:  Get remaining time and interval of a timer
+ * @timer_setttime: Set a timer's initial expiration and interval
+ * @fasync:         Optional character device fasync method
+ * @mmap:           Optional character device mmap method
+ * @open:           Optional character device open method
+ * @release:        Optional character device release method
+ * @ioctl:          Optional character device ioctl method
+ * @read:           Optional character device read method
+ * @poll:           Optional character device poll method
+ */
+struct posix_clock_operations {
+       struct module *owner;
+
+       int  (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
+
+       int  (*clock_gettime)(struct posix_clock *pc, struct timespec *ts);
+
+       int  (*clock_getres) (struct posix_clock *pc, struct timespec *ts);
+
+       int  (*clock_settime)(struct posix_clock *pc,
+                             const struct timespec *ts);
+
+       int  (*timer_create) (struct posix_clock *pc, struct k_itimer *kit);
+
+       int  (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit);
+
+       void (*timer_gettime)(struct posix_clock *pc,
+                             struct k_itimer *kit, struct itimerspec *tsp);
+
+       int  (*timer_settime)(struct posix_clock *pc,
+                             struct k_itimer *kit, int flags,
+                             struct itimerspec *tsp, struct itimerspec *old);
+       /*
+        * Optional character device methods:
+        */
+       int     (*fasync)  (struct posix_clock *pc,
+                           int fd, struct file *file, int on);
+
+       long    (*ioctl)   (struct posix_clock *pc,
+                           unsigned int cmd, unsigned long arg);
+
+       int     (*mmap)    (struct posix_clock *pc,
+                           struct vm_area_struct *vma);
+
+       int     (*open)    (struct posix_clock *pc, fmode_t f_mode);
+
+       uint    (*poll)    (struct posix_clock *pc,
+                           struct file *file, poll_table *wait);
+
+       int     (*release) (struct posix_clock *pc);
+
+       ssize_t (*read)    (struct posix_clock *pc,
+                           uint flags, char __user *buf, size_t cnt);
+};
+
+/**
+ * struct posix_clock - represents a dynamic posix clock
+ *
+ * @ops:     Functional interface to the clock
+ * @cdev:    Character device instance for this clock
+ * @kref:    Reference count.
+ * @mutex:   Protects the 'zombie' field from concurrent access.
+ * @zombie:  If 'zombie' is true, then the hardware has disappeared.
+ * @release: A function to free the structure when the reference count reaches
+ *           zero. May be NULL if structure is statically allocated.
+ *
+ * Drivers should embed their struct posix_clock within a private
+ * structure, obtaining a reference to it during callbacks using
+ * container_of().
+ */
+struct posix_clock {
+       struct posix_clock_operations ops;
+       struct cdev cdev;
+       struct kref kref;
+       struct mutex mutex;
+       bool zombie;
+       void (*release)(struct posix_clock *clk);
+};
+
+/**
+ * posix_clock_register() - register a new clock
+ * @clk:   Pointer to the clock. Caller must provide 'ops' and 'release'
+ * @devid: Allocated device id
+ *
+ * A clock driver calls this function to register itself with the
+ * clock device subsystem. If 'clk' points to dynamically allocated
+ * memory, then the caller must provide a 'release' function to free
+ * that memory.
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+int posix_clock_register(struct posix_clock *clk, dev_t devid);
+
+/**
+ * posix_clock_unregister() - unregister a clock
+ * @clk: Clock instance previously registered via posix_clock_register()
+ *
+ * A clock driver calls this function to remove itself from the clock
+ * device subsystem. The posix_clock itself will remain (in an
+ * inactive state) until its reference count drops to zero, at which
+ * point it will be deallocated with its 'release' method.
+ */
+void posix_clock_unregister(struct posix_clock *clk);
+
+#endif
index 3e23844a6990ccb9ac4d3776b36c4e43bc0ab32a..d51243ae0726b321d3696eef4f3ff9bdc09a0403 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/spinlock.h>
 #include <linux/list.h>
 #include <linux/sched.h>
+#include <linux/timex.h>
 
 union cpu_time_count {
        cputime_t cpu;
@@ -17,10 +18,21 @@ struct cpu_timer_list {
        int firing;
 };
 
+/*
+ * Bit fields within a clockid:
+ *
+ * The most significant 29 bits hold either a pid or a file descriptor.
+ *
+ * Bit 2 indicates whether a cpu clock refers to a thread or a process.
+ *
+ * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
+ *
+ * A clockid is invalid if bits 2, 1, and 0 are all set.
+ */
 #define CPUCLOCK_PID(clock)            ((pid_t) ~((clock) >> 3))
 #define CPUCLOCK_PERTHREAD(clock) \
        (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
-#define CPUCLOCK_PID_MASK      7
+
 #define CPUCLOCK_PERTHREAD_MASK        4
 #define CPUCLOCK_WHICH(clock)  ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
 #define CPUCLOCK_CLOCK_MASK    3
@@ -28,12 +40,17 @@ struct cpu_timer_list {
 #define CPUCLOCK_VIRT          1
 #define CPUCLOCK_SCHED         2
 #define CPUCLOCK_MAX           3
+#define CLOCKFD                        CPUCLOCK_MAX
+#define CLOCKFD_MASK           (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
 
 #define MAKE_PROCESS_CPUCLOCK(pid, clock) \
        ((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
 #define MAKE_THREAD_CPUCLOCK(tid, clock) \
        MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK)
 
+#define FD_TO_CLOCKID(fd)      ((~(clockid_t) (fd) << 3) | CLOCKFD)
+#define CLOCKID_TO_FD(clk)     ((unsigned int) ~((clk) >> 3))
+
 /* POSIX.1b interval timer structure. */
 struct k_itimer {
        struct list_head list;          /* free/ allocate list */
@@ -67,10 +84,11 @@ struct k_itimer {
 };
 
 struct k_clock {
-       int res;                /* in nanoseconds */
        int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
-       int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
+       int (*clock_set) (const clockid_t which_clock,
+                         const struct timespec *tp);
        int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
+       int (*clock_adj) (const clockid_t which_clock, struct timex *tx);
        int (*timer_create) (struct k_itimer *timer);
        int (*nsleep) (const clockid_t which_clock, int flags,
                       struct timespec *, struct timespec __user *);
@@ -84,28 +102,14 @@ struct k_clock {
                           struct itimerspec * cur_setting);
 };
 
-void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
+extern struct k_clock clock_posix_cpu;
+extern struct k_clock clock_posix_dynamic;
 
-/* error handlers for timer_create, nanosleep and settime */
-int do_posix_clock_nonanosleep(const clockid_t, int flags, struct timespec *,
-                              struct timespec __user *);
-int do_posix_clock_nosettime(const clockid_t, struct timespec *tp);
+void posix_timers_register_clock(const clockid_t clock_id, struct k_clock *new_clock);
 
 /* function to call to trigger timer event */
 int posix_timer_event(struct k_itimer *timr, int si_private);
 
-int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *ts);
-int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *ts);
-int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *ts);
-int posix_cpu_timer_create(struct k_itimer *timer);
-int posix_cpu_nsleep(const clockid_t which_clock, int flags,
-                    struct timespec *rqtp, struct timespec __user *rmtp);
-long posix_cpu_nsleep_restart(struct restart_block *restart_block);
-int posix_cpu_timer_set(struct k_itimer *timer, int flags,
-                       struct itimerspec *new, struct itimerspec *old);
-int posix_cpu_timer_del(struct k_itimer *timer);
-void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp);
-
 void posix_cpu_timer_schedule(struct k_itimer *timer);
 
 void run_posix_cpu_timers(struct task_struct *task);
index 092a04f874a850ad66537aca4a2bfa7b7e973606..a1147e5dd245e8d7d262598b3c920ceb796efd00 100644 (file)
 
 extern long arch_ptrace(struct task_struct *child, long request,
                        unsigned long addr, unsigned long data);
-extern int ptrace_traceme(void);
 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
-extern int ptrace_attach(struct task_struct *tsk);
-extern int ptrace_detach(struct task_struct *, unsigned int);
 extern void ptrace_disable(struct task_struct *);
 extern int ptrace_check_attach(struct task_struct *task, int kill);
 extern int ptrace_request(struct task_struct *child, long request,
index 8d3a2486544d1d91583e703b45d9b90794d2cca1..ab38ac80b0f9c7c4979a30475626c9bf2fbffa36 100644 (file)
@@ -100,6 +100,8 @@ void ring_buffer_free(struct ring_buffer *buffer);
 
 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
 
+void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
+
 struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
                                                   unsigned long length);
 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
index d63dcbaea169e857f1d9f62eabdf32dd69ec9413..9026b30238f32de96612bee16d8579022b8e8e10 100644 (file)
 #define LINUX_RIO_REGS_H
 
 /*
- * In RapidIO, each device has a 2MB configuration space that is
+ * In RapidIO, each device has a 16MB configuration space that is
  * accessed via maintenance transactions.  Portions of configuration
  * space are standardized and/or reserved.
  */
+#define RIO_MAINT_SPACE_SZ     0x1000000 /* 16MB of RapidIO mainenance space */
+
 #define RIO_DEV_ID_CAR         0x00    /* [I] Device Identity CAR */
 #define RIO_DEV_INFO_CAR       0x04    /* [I] Device Information CAR */
 #define RIO_ASM_ID_CAR         0x08    /* [I] Assembly Identity CAR */
index 89c3e5182991137e2a8d83c2bf06a61a2dd2e6cb..2ca7e8a78060f24057d1c2290e05289393092e27 100644 (file)
@@ -133,7 +133,6 @@ extern struct class *rtc_class;
  * The (current) exceptions are mostly filesystem hooks:
  *   - the proc() hook for procfs
  *   - non-ioctl() chardev hooks:  open(), release(), read_callback()
- *   - periodic irq calls:  irq_set_state(), irq_set_freq()
  *
  * REVISIT those periodic irq calls *do* have ops_lock when they're
  * issued through ioctl() ...
@@ -148,11 +147,8 @@ struct rtc_class_ops {
        int (*set_alarm)(struct device *, struct rtc_wkalrm *);
        int (*proc)(struct device *, struct seq_file *);
        int (*set_mmss)(struct device *, unsigned long secs);
-       int (*irq_set_state)(struct device *, int enabled);
-       int (*irq_set_freq)(struct device *, int freq);
        int (*read_callback)(struct device *, int data);
        int (*alarm_irq_enable)(struct device *, unsigned int enabled);
-       int (*update_irq_enable)(struct device *, unsigned int enabled);
 };
 
 #define RTC_DEVICE_NAME_SIZE 20
@@ -227,6 +223,7 @@ extern void rtc_device_unregister(struct rtc_device *rtc);
 extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
 extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
 extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
+int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
 extern int rtc_read_alarm(struct rtc_device *rtc,
                        struct rtc_wkalrm *alrm);
 extern int rtc_set_alarm(struct rtc_device *rtc,
index bd31808c7d8e5333f35d565fb746af5927b9cf2e..cc0072e93e360722f40a19928c001a1feab272bb 100644 (file)
@@ -43,14 +43,6 @@ typedef struct {
                                RW_DEP_MAP_INIT(lockname) }
 #endif
 
-/*
- * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
- * deprecated.
- *
- * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
- */
-#define RW_LOCK_UNLOCKED       __RW_LOCK_UNLOCKED(old_style_rw_init)
-
 #define DEFINE_RWLOCK(x)       rwlock_t x = __RW_LOCK_UNLOCKED(x)
 
 #endif /* __LINUX_RWLOCK_TYPES_H */
index bdfcc2527970d04ed2e3b3ee773519075d2da23a..34701241b67395bd58f7a4b262b6dcceb0e4f74f 100644 (file)
 #error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
 #endif
 
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
 #ifdef __KERNEL__
-
-#include <linux/types.h>
-
-struct rwsem_waiter;
-
 /*
  * the rw-semaphore definition
  * - if activity is 0 then there are no active readers or writers
@@ -37,28 +29,7 @@ struct rw_semaphore {
 #endif
 };
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
-{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
-  __RWSEM_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(name) \
-       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
-                        struct lock_class_key *key);
-
-#define init_rwsem(sem)                                                \
-do {                                                           \
-       static struct lock_class_key __key;                     \
-                                                               \
-       __init_rwsem((sem), #sem, &__key);                      \
-} while (0)
+#define RWSEM_UNLOCKED_VALUE           0x00000000
 
 extern void __down_read(struct rw_semaphore *sem);
 extern int __down_read_trylock(struct rw_semaphore *sem);
index efd348fe8ca75f6d9effcf8f05e87bd276e46d97..a8afe9cd000c08bf06b394eab36e84d424307eb7 100644 (file)
@@ -11,6 +11,9 @@
 
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
 #include <asm/system.h>
 #include <asm/atomic.h>
 
@@ -19,9 +22,57 @@ struct rw_semaphore;
 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
 #include <linux/rwsem-spinlock.h> /* use a generic implementation */
 #else
-#include <asm/rwsem.h> /* use an arch-specific implementation */
+/* All arch specific implementations share the same struct */
+struct rw_semaphore {
+       long                    count;
+       spinlock_t              wait_lock;
+       struct list_head        wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
+};
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+/* Include the arch specific part */
+#include <asm/rwsem.h>
+
+/* In all implementations count != 0 means locked */
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+       return sem->count != 0;
+}
+
+#endif
+
+/* Common initializer macros and functions */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
 #endif
 
+#define __RWSEM_INITIALIZER(name) \
+       { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock),   \
+         LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+                        struct lock_class_key *key);
+
+#define init_rwsem(sem)                                                \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       __init_rwsem((sem), #sem, &__key);                      \
+} while (0)
+
 /*
  * lock for reading
  */
index 777d8a5ed06beab057a293d33d7a42d9c80ab133..c15936fe998b9672e634967fe03734e8da53aef2 100644 (file)
@@ -1058,6 +1058,7 @@ struct sched_class {
        void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
        void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
        void (*yield_task) (struct rq *rq);
+       bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
 
        void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
 
@@ -1084,12 +1085,10 @@ struct sched_class {
        void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
        void (*task_fork) (struct task_struct *p);
 
-       void (*switched_from) (struct rq *this_rq, struct task_struct *task,
-                              int running);
-       void (*switched_to) (struct rq *this_rq, struct task_struct *task,
-                            int running);
+       void (*switched_from) (struct rq *this_rq, struct task_struct *task);
+       void (*switched_to) (struct rq *this_rq, struct task_struct *task);
        void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
-                            int oldprio, int running);
+                            int oldprio);
 
        unsigned int (*get_rr_interval) (struct rq *rq,
                                         struct task_struct *task);
@@ -1715,7 +1714,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 /*
  * Per process flags
  */
-#define PF_KSOFTIRQD   0x00000001      /* I am ksoftirqd */
 #define PF_STARTING    0x00000002      /* being created */
 #define PF_EXITING     0x00000004      /* getting shut down */
 #define PF_EXITPIDONE  0x00000008      /* pi exit done on shut down */
@@ -1945,8 +1943,6 @@ int sched_rt_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos);
 
-extern unsigned int sysctl_sched_compat_yield;
-
 #ifdef CONFIG_SCHED_AUTOGROUP
 extern unsigned int sysctl_sched_autogroup_enabled;
 
@@ -1977,6 +1973,7 @@ static inline int rt_mutex_getprio(struct task_struct *p)
 # define rt_mutex_adjust_pi(p)         do { } while (0)
 #endif
 
+extern bool yield_to(struct task_struct *p, bool preempt);
 extern void set_user_nice(struct task_struct *p, long nice);
 extern int task_prio(const struct task_struct *p);
 extern int task_nice(const struct task_struct *p);
@@ -2049,7 +2046,7 @@ extern void release_uids(struct user_namespace *ns);
 
 #include <asm/current.h>
 
-extern void do_timer(unsigned long ticks);
+extern void xtime_update(unsigned long ticks);
 
 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
 extern int wake_up_process(struct task_struct *tsk);
@@ -2578,13 +2575,6 @@ static inline void inc_syscw(struct task_struct *tsk)
 #define TASK_SIZE_OF(tsk)      TASK_SIZE
 #endif
 
-/*
- * Call the function if the target task is executing on a CPU right now:
- */
-extern void task_oncpu_function_call(struct task_struct *p,
-                                    void (*func) (void *info), void *info);
-
-
 #ifdef CONFIG_MM_OWNER
 extern void mm_update_next_owner(struct mm_struct *mm);
 extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
index b2b7f9749f5eb2da633264afd7453fe0a6263561..debbd97db7abc9ffb18db1600eed181338c03331 100644 (file)
@@ -53,7 +53,7 @@ struct audit_krule;
  */
 extern int cap_capable(struct task_struct *tsk, const struct cred *cred,
                       int cap, int audit);
-extern int cap_settime(struct timespec *ts, struct timezone *tz);
+extern int cap_settime(const struct timespec *ts, const struct timezone *tz);
 extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
 extern int cap_ptrace_traceme(struct task_struct *parent);
 extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
@@ -1387,7 +1387,7 @@ struct security_operations {
        int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
        int (*quota_on) (struct dentry *dentry);
        int (*syslog) (int type);
-       int (*settime) (struct timespec *ts, struct timezone *tz);
+       int (*settime) (const struct timespec *ts, const struct timezone *tz);
        int (*vm_enough_memory) (struct mm_struct *mm, long pages);
 
        int (*bprm_set_creds) (struct linux_binprm *bprm);
@@ -1669,7 +1669,7 @@ int security_sysctl(struct ctl_table *table, int op);
 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
 int security_quota_on(struct dentry *dentry);
 int security_syslog(int type);
-int security_settime(struct timespec *ts, struct timezone *tz);
+int security_settime(const struct timespec *ts, const struct timezone *tz);
 int security_vm_enough_memory(long pages);
 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
 int security_vm_enough_memory_kern(long pages);
@@ -1904,7 +1904,8 @@ static inline int security_syslog(int type)
        return 0;
 }
 
-static inline int security_settime(struct timespec *ts, struct timezone *tz)
+static inline int security_settime(const struct timespec *ts,
+                                  const struct timezone *tz)
 {
        return cap_settime(ts, tz);
 }
index 851b7783720d89685f7a0653f1efa43263118585..73548eb13a5ddc82ea16c6a292a8d704471eb7ac 100644 (file)
@@ -81,14 +81,6 @@ typedef struct spinlock {
 #define __SPIN_LOCK_UNLOCKED(lockname) \
        (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
 
-/*
- * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
- * deprecated.
- * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
- * appropriate.
- */
-#define SPIN_LOCK_UNLOCKED     __SPIN_LOCK_UNLOCKED(old_style_spin_init)
-
 #define DEFINE_SPINLOCK(x)     spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
 
 #include <linux/rwlock_types.h>
index 88513fd8e208cec3936d2af337e300325766e1ce..d81db8012c63b658158228469f89643426d9e435 100644 (file)
@@ -212,6 +212,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
                                const struct rpc_call_ops *ops);
 void           rpc_put_task(struct rpc_task *);
+void           rpc_put_task_async(struct rpc_task *);
 void           rpc_exit_task(struct rpc_task *);
 void           rpc_exit(struct rpc_task *, int);
 void           rpc_release_calldata(const struct rpc_call_ops *, void *);
index 98664db1be472f42183faba02175c61afe5cb049..1f5c18e6f4f17bedfbd38e64da3cc112d1cb90bb 100644 (file)
@@ -62,6 +62,7 @@ struct robust_list_head;
 struct getcpu_cache;
 struct old_linux_dirent;
 struct perf_event_attr;
+struct file_handle;
 
 #include <linux/types.h>
 #include <linux/aio_abi.h>
@@ -132,11 +133,11 @@ extern struct trace_event_functions exit_syscall_print_funcs;
                .class                  = &event_class_syscall_enter,   \
                .event.funcs            = &enter_syscall_print_funcs,   \
                .data                   = (void *)&__syscall_meta_##sname,\
+               .flags                  = TRACE_EVENT_FL_CAP_ANY,       \
        };                                                              \
        static struct ftrace_event_call __used                          \
          __attribute__((section("_ftrace_events")))                    \
-        *__event_enter_##sname = &event_enter_##sname;                 \
-       __TRACE_EVENT_FLAGS(enter_##sname, TRACE_EVENT_FL_CAP_ANY)
+        *__event_enter_##sname = &event_enter_##sname;
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)                                        \
        static struct syscall_metadata __syscall_meta_##sname;          \
@@ -146,11 +147,11 @@ extern struct trace_event_functions exit_syscall_print_funcs;
                .class                  = &event_class_syscall_exit,    \
                .event.funcs            = &exit_syscall_print_funcs,    \
                .data                   = (void *)&__syscall_meta_##sname,\
+               .flags                  = TRACE_EVENT_FL_CAP_ANY,       \
        };                                                              \
        static struct ftrace_event_call __used                          \
          __attribute__((section("_ftrace_events")))                    \
-       *__event_exit_##sname = &event_exit_##sname;                    \
-       __TRACE_EVENT_FLAGS(exit_##sname, TRACE_EVENT_FL_CAP_ANY)
+       *__event_exit_##sname = &event_exit_##sname;
 
 #define SYSCALL_METADATA(sname, nb)                            \
        SYSCALL_TRACE_ENTER_EVENT(sname);                       \
@@ -158,6 +159,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
        static struct syscall_metadata __used                   \
          __syscall_meta_##sname = {                            \
                .name           = "sys"#sname,                  \
+               .syscall_nr     = -1,   /* Filled in at boot */ \
                .nb_args        = nb,                           \
                .types          = types_##sname,                \
                .args           = args_##sname,                 \
@@ -175,6 +177,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
        static struct syscall_metadata __used                   \
          __syscall_meta__##sname = {                           \
                .name           = "sys_"#sname,                 \
+               .syscall_nr     = -1,   /* Filled in at boot */ \
                .nb_args        = 0,                            \
                .enter_event    = &event_enter__##sname,        \
                .exit_event     = &event_exit__##sname,         \
@@ -313,6 +316,8 @@ asmlinkage long sys_clock_settime(clockid_t which_clock,
                                const struct timespec __user *tp);
 asmlinkage long sys_clock_gettime(clockid_t which_clock,
                                struct timespec __user *tp);
+asmlinkage long sys_clock_adjtime(clockid_t which_clock,
+                               struct timex __user *tx);
 asmlinkage long sys_clock_getres(clockid_t which_clock,
                                struct timespec __user *tp);
 asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags,
@@ -832,5 +837,10 @@ asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
                        unsigned long prot, unsigned long flags,
                        unsigned long fd, unsigned long pgoff);
 asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg);
-
+asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
+                                     struct file_handle __user *handle,
+                                     int __user *mnt_id, int flag);
+asmlinkage long sys_open_by_handle_at(int mountdirfd,
+                                     struct file_handle __user *handle,
+                                     int flags);
 #endif
index 7bb5cb64f3b84798db8023be585755d94af785a0..11684d9e6bd2391374f2cc3fd9ff7d4f9df4e11d 100644 (file)
@@ -930,6 +930,7 @@ enum
 
 #ifdef __KERNEL__
 #include <linux/list.h>
+#include <linux/rcupdate.h>
 
 /* For the /proc/sys support */
 struct ctl_table;
@@ -1037,10 +1038,15 @@ struct ctl_table_root {
    struct ctl_table trees. */
 struct ctl_table_header
 {
-       struct ctl_table *ctl_table;
-       struct list_head ctl_entry;
-       int used;
-       int count;
+       union {
+               struct {
+                       struct ctl_table *ctl_table;
+                       struct list_head ctl_entry;
+                       int used;
+                       int count;
+               };
+               struct rcu_head rcu;
+       };
        struct completion *unregistering;
        struct ctl_table *ctl_table_arg;
        struct ctl_table_root *root;
index 8651556dbd52aa7fe12d7d7b57b861b270544cfa..d3ec89fb4122610c26dfe0d86f499d50539280ff 100644 (file)
@@ -172,6 +172,14 @@ void thermal_zone_device_update(struct thermal_zone_device *);
 struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
                const struct thermal_cooling_device_ops *);
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+
+#ifdef CONFIG_NET
 extern int generate_netlink_event(u32 orig, enum events event);
+#else
+static inline int generate_netlink_event(u32 orig, enum events event)
+{
+       return 0;
+}
+#endif
 
 #endif /* __THERMAL_H__ */
index c90696544176902262699186b32dc1c515311a15..20fc303947d37029a9e10456ddc8491a802f3fa1 100644 (file)
@@ -18,9 +18,6 @@ struct compat_timespec;
 struct restart_block {
        long (*fn)(struct restart_block *);
        union {
-               struct {
-                       unsigned long arg0, arg1, arg2, arg3;
-               };
                /* For futex_wait and futex_wait_requeue_pi */
                struct {
                        u32 __user *uaddr;
index 1e6d3b59238d3d69f8b1963b2c457451fc60513a..454a262057878235c98e25db1de03bd94ab4c45a 100644 (file)
@@ -113,8 +113,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
 #define timespec_valid(ts) \
        (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
 
-extern seqlock_t xtime_lock;
-
 extern void read_persistent_clock(struct timespec *ts);
 extern void read_boot_clock(struct timespec *ts);
 extern int update_persistent_clock(struct timespec now);
@@ -125,8 +123,9 @@ extern int timekeeping_suspended;
 unsigned long get_seconds(void);
 struct timespec current_kernel_time(void);
 struct timespec __current_kernel_time(void); /* does not take xtime_lock */
-struct timespec __get_wall_to_monotonic(void); /* does not take xtime_lock */
 struct timespec get_monotonic_coarse(void);
+void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
+                               struct timespec *wtom, struct timespec *sleep);
 
 #define CURRENT_TIME           (current_kernel_time())
 #define CURRENT_TIME_SEC       ((struct timespec) { get_seconds(), 0 })
@@ -147,8 +146,9 @@ static inline u32 arch_gettimeoffset(void) { return 0; }
 #endif
 
 extern void do_gettimeofday(struct timeval *tv);
-extern int do_settimeofday(struct timespec *tv);
-extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
+extern int do_settimeofday(const struct timespec *tv);
+extern int do_sys_settimeofday(const struct timespec *tv,
+                              const struct timezone *tz);
 #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
 extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
 struct itimerval;
@@ -162,12 +162,13 @@ extern void getnstime_raw_and_real(struct timespec *ts_raw,
                struct timespec *ts_real);
 extern void getboottime(struct timespec *ts);
 extern void monotonic_to_bootbased(struct timespec *ts);
+extern void get_monotonic_boottime(struct timespec *ts);
 
 extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
 extern int timekeeping_valid_for_hres(void);
 extern u64 timekeeping_max_deferment(void);
-extern void update_wall_time(void);
 extern void timekeeping_leap_insert(int leapsecond);
+extern int timekeeping_inject_offset(struct timespec *ts);
 
 struct tms;
 extern void do_sys_times(struct tms *);
@@ -292,6 +293,7 @@ struct itimerval {
 #define CLOCK_MONOTONIC_RAW            4
 #define CLOCK_REALTIME_COARSE          5
 #define CLOCK_MONOTONIC_COARSE         6
+#define CLOCK_BOOTTIME                 7
 
 /*
  * The IDs of various hardware clocks:
index d23999f9499d3798a8d5776da0cab3c7bc0e18df..aa60fe7b6ed646f623253c24403e233654d71025 100644 (file)
@@ -73,7 +73,7 @@ struct timex {
        long tolerance;         /* clock frequency tolerance (ppm)
                                 * (read only)
                                 */
-       struct timeval time;    /* (read only) */
+       struct timeval time;    /* (read only, except for ADJ_SETOFFSET) */
        long tick;              /* (modified) usecs between clock ticks */
 
        long ppsfreq;           /* pps frequency (scaled ppm) (ro) */
@@ -102,6 +102,7 @@ struct timex {
 #define ADJ_STATUS             0x0010  /* clock status */
 #define ADJ_TIMECONST          0x0020  /* pll time constant */
 #define ADJ_TAI                        0x0080  /* set TAI offset */
+#define ADJ_SETOFFSET          0x0100  /* add 'time' to current time */
 #define ADJ_MICRO              0x1000  /* select microsecond resolution */
 #define ADJ_NANO               0x2000  /* select nanosecond resolution */
 #define ADJ_TICK               0x4000  /* tick value */
index 4a3cd2cd2f5e1c54184391efef96dfd7608e4400..96e50e0ce3ca396700cd9797b932e391338a3b32 100644 (file)
 #define IPV6_ADDR_SCOPE_ORGLOCAL       0x08
 #define IPV6_ADDR_SCOPE_GLOBAL         0x0e
 
+/*
+ *     Addr flags
+ */
+#ifdef __KERNEL__
+#define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
+       ((a)->s6_addr[1] & 0x10)
+#define IPV6_ADDR_MC_FLAG_PREFIX(a)    \
+       ((a)->s6_addr[1] & 0x20)
+#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a)        \
+       ((a)->s6_addr[1] & 0x40)
+#endif
+
 /*
  *     fragmentation header
  */
index cd85b3bc8327219f1e036698c253034c27765d66..e505358d89993c44720529a9890162a25518248f 100644 (file)
@@ -201,18 +201,8 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
 }
 #endif
 
-static inline void
-nf_tproxy_put_sock(struct sock *sk)
-{
-       /* TIME_WAIT inet sockets have to be handled differently */
-       if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT))
-               inet_twsk_put(inet_twsk(sk));
-       else
-               sock_put(sk);
-}
-
 /* assign a socket to the skb -- consumes sk */
-int
+void
 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
 
 #endif
index 160a407c19632a2220fa6fc5efc120086fd076ee..04f8556313d5f52c37ff9722ab7c1f38a2f955af 100644 (file)
@@ -199,7 +199,7 @@ struct tcf_proto {
 
 struct qdisc_skb_cb {
        unsigned int            pkt_len;
-       char                    data[];
+       long                    data[];
 };
 
 static inline int qdisc_qlen(struct Qdisc *q)
index 8479b66c067bbab89374f55551e775ff61d47f29..3fd5064dd43a7805c900f28a20a218179836ebbe 100644 (file)
@@ -261,6 +261,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev);
 #define CONF_ENABLE_ESR         0x0008
 #define CONF_ENABLE_IOCARD     0x0010 /* auto-enabled if IO resources or IRQ
                                        * (CONF_ENABLE_IRQ) in use */
+#define CONF_ENABLE_ZVCARD     0x0020
 
 /* flags used by pcmcia_loop_config() autoconfiguration */
 #define CONF_AUTO_CHECK_VCC    0x0100 /* check for matching Vcc? */
index b4a0db2307ef439737b805c817a23e4a3b9b3b16..1eeebd534f7e7feea81e1bac43843847fc49e337 100644 (file)
 /*
  * R6 (0x06) - Mic Bias Control 0
  */
-#define WM8903_MICDET_HYST_ENA                  0x0080  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_MASK             0x0080  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_SHIFT                 7  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_WIDTH                 1  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_THR_MASK                  0x0070  /* MICDET_THR - [6:4] */
-#define WM8903_MICDET_THR_SHIFT                      4  /* MICDET_THR - [6:4] */
-#define WM8903_MICDET_THR_WIDTH                      3  /* MICDET_THR - [6:4] */
+#define WM8903_MICDET_THR_MASK                  0x0030  /* MICDET_THR - [5:4] */
+#define WM8903_MICDET_THR_SHIFT                      4  /* MICDET_THR - [5:4] */
+#define WM8903_MICDET_THR_WIDTH                      2  /* MICDET_THR - [5:4] */
 #define WM8903_MICSHORT_THR_MASK                0x000C  /* MICSHORT_THR - [3:2] */
 #define WM8903_MICSHORT_THR_SHIFT                    2  /* MICSHORT_THR - [3:2] */
 #define WM8903_MICSHORT_THR_WIDTH                    2  /* MICSHORT_THR - [3:2] */
index 246940511579fc077bcca9ec4f1a0a358dcd53e1..2e8ec51f061558e5668fb3c8203996407652e334 100644 (file)
@@ -135,6 +135,8 @@ extern void transport_complete_task(struct se_task *, int);
 extern void transport_add_task_to_execute_queue(struct se_task *,
                                                struct se_task *,
                                                struct se_device *);
+extern void transport_remove_task_from_execute_queue(struct se_task *,
+                                               struct se_device *);
 unsigned char *transport_dump_cmd_direction(struct se_cmd *);
 extern void transport_dump_dev_state(struct se_device *, char *, int *);
 extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
index aba421d68f6f56a6054b6b1bcf0d04b73ef83277..78f18adb49c88fee50ef7a6e2bf3dabf23f851a2 100644 (file)
@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
                                        0 : blk_rq_sectors(rq);
                __entry->errors    = rq->errors;
 
-               blk_fill_rwbs_rq(__entry->rwbs, rq);
+               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
                blk_dump_cmd(__get_str(cmd), rq);
        ),
 
@@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq,
                __entry->bytes     = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
                                        blk_rq_bytes(rq) : 0;
 
-               blk_fill_rwbs_rq(__entry->rwbs, rq);
+               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
                blk_dump_cmd(__get_str(cmd), rq);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
@@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap,
                __entry->nr_sector      = blk_rq_sectors(rq);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
-               blk_fill_rwbs_rq(__entry->rwbs, rq);
+               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
        ),
 
        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
index 7eee77895cb37475837c546a44aac2358f18e687..4cbbcef6baa89ffc3b53ad724f3500ba93c73fff 100644 (file)
@@ -17,36 +17,36 @@ TRACE_EVENT(mce_record,
        TP_STRUCT__entry(
                __field(        u64,            mcgcap          )
                __field(        u64,            mcgstatus       )
-               __field(        u8,             bank            )
                __field(        u64,            status          )
                __field(        u64,            addr            )
                __field(        u64,            misc            )
                __field(        u64,            ip              )
-               __field(        u8,             cs              )
                __field(        u64,            tsc             )
                __field(        u64,            walltime        )
                __field(        u32,            cpu             )
                __field(        u32,            cpuid           )
                __field(        u32,            apicid          )
                __field(        u32,            socketid        )
+               __field(        u8,             cs              )
+               __field(        u8,             bank            )
                __field(        u8,             cpuvendor       )
        ),
 
        TP_fast_assign(
                __entry->mcgcap         = m->mcgcap;
                __entry->mcgstatus      = m->mcgstatus;
-               __entry->bank           = m->bank;
                __entry->status         = m->status;
                __entry->addr           = m->addr;
                __entry->misc           = m->misc;
                __entry->ip             = m->ip;
-               __entry->cs             = m->cs;
                __entry->tsc            = m->tsc;
                __entry->walltime       = m->time;
                __entry->cpu            = m->extcpu;
                __entry->cpuid          = m->cpuid;
                __entry->apicid         = m->apicid;
                __entry->socketid       = m->socketid;
+               __entry->cs             = m->cs;
+               __entry->bank           = m->bank;
                __entry->cpuvendor      = m->cpuvendor;
        ),
 
index c6bae36547e53ffb3c77b6ab6a0b0d6085beec8c..21a546d27c0c499e371852b1023be7c67a160140 100644 (file)
@@ -108,14 +108,14 @@ TRACE_EVENT(module_request,
        TP_ARGS(name, wait, ip),
 
        TP_STRUCT__entry(
-               __field(        bool,           wait            )
                __field(        unsigned long,  ip              )
+               __field(        bool,           wait            )
                __string(       name,           name            )
        ),
 
        TP_fast_assign(
-               __entry->wait   = wait;
                __entry->ip     = ip;
+               __entry->wait   = wait;
                __assign_str(name, name);
        ),
 
@@ -129,4 +129,3 @@ TRACE_EVENT(module_request,
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
-
index f10293c41b1e8e690706941d12f654550f61b65b..0c68ae22da22f6acefae7ef346d11e0bec42c533 100644 (file)
@@ -19,14 +19,14 @@ TRACE_EVENT(kfree_skb,
 
        TP_STRUCT__entry(
                __field(        void *,         skbaddr         )
-               __field(        unsigned short, protocol        )
                __field(        void *,         location        )
+               __field(        unsigned short, protocol        )
        ),
 
        TP_fast_assign(
                __entry->skbaddr = skb;
-               __entry->protocol = ntohs(skb->protocol);
                __entry->location = location;
+               __entry->protocol = ntohs(skb->protocol);
        ),
 
        TP_printk("skbaddr=%p protocol=%u location=%p",
index 00f53ddcc06284658d33ff80d66908e2b394d4ca..962da2ced5b4dc358e5980f1429f3fb659ddd054 100644 (file)
@@ -75,11 +75,9 @@ int xen_allocate_pirq(unsigned gsi, int shareable, char *name);
 int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name);
 
 #ifdef CONFIG_PCI_MSI
-/* Allocate an irq and a pirq to be used with MSIs. */
-#define XEN_ALLOC_PIRQ (1 << 0)
-#define XEN_ALLOC_IRQ  (1 << 1)
-void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc_mask);
-int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type);
+int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc);
+int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+                            int pirq, int vector, const char *name);
 #endif
 
 /* De-allocates the above mentioned physical interrupt. */
index c2d1fa4dc1eea34cc845836370cd99f0e77e9eb3..61e523af3c46c8311c354a4fb22e36048ebf2a0a 100644 (file)
@@ -51,11 +51,7 @@ typedef uint64_t blkif_sector_t;
  */
 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
 
-struct blkif_request {
-       uint8_t        operation;    /* BLKIF_OP_???                         */
-       uint8_t        nr_segments;  /* number of segments                   */
-       blkif_vdev_t   handle;       /* only for read/write requests         */
-       uint64_t       id;           /* private guest value, echoed in resp  */
+struct blkif_request_rw {
        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
        struct blkif_request_segment {
                grant_ref_t gref;        /* reference to I/O buffer frame        */
@@ -65,6 +61,16 @@ struct blkif_request {
        } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 };
 
+struct blkif_request {
+       uint8_t        operation;    /* BLKIF_OP_???                         */
+       uint8_t        nr_segments;  /* number of segments                   */
+       blkif_vdev_t   handle;       /* only for read/write requests         */
+       uint64_t       id;           /* private guest value, echoed in resp  */
+       union {
+               struct blkif_request_rw rw;
+       } u;
+};
+
 struct blkif_response {
        uint64_t        id;              /* copied from request */
        uint8_t         operation;       /* copied from request */
@@ -91,4 +97,25 @@ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
 #define VDISK_REMOVABLE    0x2
 #define VDISK_READONLY     0x4
 
+/* Xen-defined major numbers for virtual disks, they look strangely
+ * familiar */
+#define XEN_IDE0_MAJOR 3
+#define XEN_IDE1_MAJOR 22
+#define XEN_SCSI_DISK0_MAJOR   8
+#define XEN_SCSI_DISK1_MAJOR   65
+#define XEN_SCSI_DISK2_MAJOR   66
+#define XEN_SCSI_DISK3_MAJOR   67
+#define XEN_SCSI_DISK4_MAJOR   68
+#define XEN_SCSI_DISK5_MAJOR   69
+#define XEN_SCSI_DISK6_MAJOR   70
+#define XEN_SCSI_DISK7_MAJOR   71
+#define XEN_SCSI_DISK8_MAJOR   128
+#define XEN_SCSI_DISK9_MAJOR   129
+#define XEN_SCSI_DISK10_MAJOR  130
+#define XEN_SCSI_DISK11_MAJOR  131
+#define XEN_SCSI_DISK12_MAJOR  132
+#define XEN_SCSI_DISK13_MAJOR  133
+#define XEN_SCSI_DISK14_MAJOR  134
+#define XEN_SCSI_DISK15_MAJOR  135
+
 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
index 2befa3e2f1bce59edaddfb25ce1b301aa6e554e3..b33257bc7e83b58562dd5abc9f12cd89eadbc813 100644 (file)
@@ -30,7 +30,7 @@
 #define __HYPERVISOR_stack_switch          3
 #define __HYPERVISOR_set_callbacks         4
 #define __HYPERVISOR_fpu_taskswitch        5
-#define __HYPERVISOR_sched_op              6
+#define __HYPERVISOR_sched_op_compat       6
 #define __HYPERVISOR_dom0_op               7
 #define __HYPERVISOR_set_debugreg          8
 #define __HYPERVISOR_get_debugreg          9
@@ -52,7 +52,7 @@
 #define __HYPERVISOR_mmuext_op            26
 #define __HYPERVISOR_acm_op               27
 #define __HYPERVISOR_nmi_op               28
-#define __HYPERVISOR_sched_op_new         29
+#define __HYPERVISOR_sched_op             29
 #define __HYPERVISOR_callback_op          30
 #define __HYPERVISOR_xenoprof_op          31
 #define __HYPERVISOR_event_channel_op     32
index 98b92154a2645407808c74ba9326e735f84b75af..03c85d7387fb7be265dad680dd50ba13b5425341 100644 (file)
@@ -5,9 +5,9 @@
 
 DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
 
-void xen_pre_suspend(void);
-void xen_post_suspend(int suspend_cancelled);
-void xen_hvm_post_suspend(int suspend_cancelled);
+void xen_arch_pre_suspend(void);
+void xen_arch_post_suspend(int suspend_cancelled);
+void xen_arch_hvm_post_suspend(int suspend_cancelled);
 
 void xen_mm_pin_all(void);
 void xen_mm_unpin_all(void);
index be788c0957d4abac813eef18abe7822177542969..5721d27af626adb6974c92bd1065adb27fa5748c 100644 (file)
@@ -287,6 +287,18 @@ config BSD_PROCESS_ACCT_V3
          for processing it. A preliminary version of these tools is available
          at <http://www.gnu.org/software/acct/>.
 
+config FHANDLE
+       bool "open by fhandle syscalls"
+       select EXPORTFS
+       help
+         If you say Y here, a user level program will be able to map
+         file names to handle and then later use the handle for
+         different file system operations. This is useful in implementing
+         userspace file servers, which now track files using handles instead
+         of names. The handle would remain the same even if file names
+         get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2)
+         syscalls.
+
 config TASKSTATS
        bool "Export task/process statistics through netlink (EXPERIMENTAL)"
        depends on NET
@@ -683,6 +695,16 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED
          select this option (if, for some reason, they need to disable it
          then noswapaccount does the trick).
 
+config CGROUP_PERF
+       bool "Enable perf_event per-cpu per-container group (cgroup) monitoring"
+       depends on PERF_EVENTS && CGROUPS
+       help
+         This option extends the per-cpu mode to restrict monitoring to
+         threads which belong to the cgroup specified and run on the
+         designated cpu.
+
+         Say N if unsure.
+
 menuconfig CGROUP_SCHED
        bool "Group CPU scheduler"
        depends on EXPERIMENTAL
index d2e3c786646055e1bbf5442b7d68090dc02b5681..e683869365d9bdaf60abcb44e37da05506ab5672 100644 (file)
@@ -144,9 +144,9 @@ int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev)
 }
 
 /* Initialize a parent watch entry. */
-static struct audit_parent *audit_init_parent(struct nameidata *ndp)
+static struct audit_parent *audit_init_parent(struct path *path)
 {
-       struct inode *inode = ndp->path.dentry->d_inode;
+       struct inode *inode = path->dentry->d_inode;
        struct audit_parent *parent;
        int ret;
 
@@ -353,53 +353,40 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
 }
 
 /* Get path information necessary for adding watches. */
-static int audit_get_nd(char *path, struct nameidata **ndp, struct nameidata **ndw)
+static int audit_get_nd(struct audit_watch *watch, struct path *parent)
 {
-       struct nameidata *ndparent, *ndwatch;
+       struct nameidata nd;
+       struct dentry *d;
        int err;
 
-       ndparent = kmalloc(sizeof(*ndparent), GFP_KERNEL);
-       if (unlikely(!ndparent))
-               return -ENOMEM;
+       err = kern_path_parent(watch->path, &nd);
+       if (err)
+               return err;
 
-       ndwatch = kmalloc(sizeof(*ndwatch), GFP_KERNEL);
-       if (unlikely(!ndwatch)) {
-               kfree(ndparent);
-               return -ENOMEM;
+       if (nd.last_type != LAST_NORM) {
+               path_put(&nd.path);
+               return -EINVAL;
        }
 
-       err = path_lookup(path, LOOKUP_PARENT, ndparent);
-       if (err) {
-               kfree(ndparent);
-               kfree(ndwatch);
-               return err;
+       mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+       d = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
+       if (IS_ERR(d)) {
+               mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+               path_put(&nd.path);
+               return PTR_ERR(d);
        }
-
-       err = path_lookup(path, 0, ndwatch);
-       if (err) {
-               kfree(ndwatch);
-               ndwatch = NULL;
+       if (d->d_inode) {
+               /* update watch filter fields */
+               watch->dev = d->d_inode->i_sb->s_dev;
+               watch->ino = d->d_inode->i_ino;
        }
+       mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
 
-       *ndp = ndparent;
-       *ndw = ndwatch;
-
+       *parent = nd.path;
+       dput(d);
        return 0;
 }
 
-/* Release resources used for watch path information. */
-static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw)
-{
-       if (ndp) {
-               path_put(&ndp->path);
-               kfree(ndp);
-       }
-       if (ndw) {
-               path_put(&ndw->path);
-               kfree(ndw);
-       }
-}
-
 /* Associate the given rule with an existing parent.
  * Caller must hold audit_filter_mutex. */
 static void audit_add_to_parent(struct audit_krule *krule,
@@ -440,31 +427,24 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
 {
        struct audit_watch *watch = krule->watch;
        struct audit_parent *parent;
-       struct nameidata *ndp = NULL, *ndw = NULL;
+       struct path parent_path;
        int h, ret = 0;
 
        mutex_unlock(&audit_filter_mutex);
 
        /* Avoid calling path_lookup under audit_filter_mutex. */
-       ret = audit_get_nd(watch->path, &ndp, &ndw);
-       if (ret) {
-               /* caller expects mutex locked */
-               mutex_lock(&audit_filter_mutex);
-               goto error;
-       }
+       ret = audit_get_nd(watch, &parent_path);
 
+       /* caller expects mutex locked */
        mutex_lock(&audit_filter_mutex);
 
-       /* update watch filter fields */
-       if (ndw) {
-               watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev;
-               watch->ino = ndw->path.dentry->d_inode->i_ino;
-       }
+       if (ret)
+               return ret;
 
        /* either find an old parent or attach a new one */
-       parent = audit_find_parent(ndp->path.dentry->d_inode);
+       parent = audit_find_parent(parent_path.dentry->d_inode);
        if (!parent) {
-               parent = audit_init_parent(ndp);
+               parent = audit_init_parent(&parent_path);
                if (IS_ERR(parent)) {
                        ret = PTR_ERR(parent);
                        goto error;
@@ -479,9 +459,8 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
        h = audit_hash_ino((u32)watch->ino);
        *list = &audit_inode_hash[h];
 error:
-       audit_put_nd(ndp, ndw);         /* NULL args OK */
+       path_put(&parent_path);
        return ret;
-
 }
 
 void audit_remove_watch_rule(struct audit_krule *krule)
index b24d7027b83c29b067bdc56f27ae28bf2d16c464..95362d15128cb7b40ad2bddc35add5adb76c00cb 100644 (file)
@@ -4230,20 +4230,8 @@ void cgroup_post_fork(struct task_struct *child)
  */
 void cgroup_exit(struct task_struct *tsk, int run_callbacks)
 {
-       int i;
        struct css_set *cg;
-
-       if (run_callbacks && need_forkexit_callback) {
-               /*
-                * modular subsystems can't use callbacks, so no need to lock
-                * the subsys array
-                */
-               for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
-                       struct cgroup_subsys *ss = subsys[i];
-                       if (ss->exit)
-                               ss->exit(ss, tsk);
-               }
-       }
+       int i;
 
        /*
         * Unlink from the css_set task list if necessary.
@@ -4261,7 +4249,24 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
        task_lock(tsk);
        cg = tsk->cgroups;
        tsk->cgroups = &init_css_set;
+
+       if (run_callbacks && need_forkexit_callback) {
+               /*
+                * modular subsystems can't use callbacks, so no need to lock
+                * the subsys array
+                */
+               for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
+                       struct cgroup_subsys *ss = subsys[i];
+                       if (ss->exit) {
+                               struct cgroup *old_cgrp =
+                                       rcu_dereference_raw(cg->subsys[i])->cgroup;
+                               struct cgroup *cgrp = task_cgroup(tsk, i);
+                               ss->exit(ss, cgrp, old_cgrp, tsk);
+                       }
+               }
+       }
        task_unlock(tsk);
+
        if (cg)
                put_css_set_taskexit(cg);
 }
@@ -4813,6 +4818,29 @@ css_get_next(struct cgroup_subsys *ss, int id,
        return ret;
 }
 
+/*
+ * get corresponding css from file open on cgroupfs directory
+ */
+struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
+{
+       struct cgroup *cgrp;
+       struct inode *inode;
+       struct cgroup_subsys_state *css;
+
+       inode = f->f_dentry->d_inode;
+       /* check in cgroup filesystem dir */
+       if (inode->i_op != &cgroup_dir_inode_operations)
+               return ERR_PTR(-EBADF);
+
+       if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
+               return ERR_PTR(-EINVAL);
+
+       /* get cgroup */
+       cgrp = __d_cgrp(f->f_dentry);
+       css = cgrp->subsys[id];
+       return css ? css : ERR_PTR(-ENOENT);
+}
+
 #ifdef CONFIG_CGROUP_DEBUG
 static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
                                                   struct cgroup *cont)
index c9e2ec0b34a8cc38cda604273618a22c8d2d928c..38b1d2c1cbe80bd88371a09412b5e52abe86afc3 100644 (file)
@@ -52,6 +52,64 @@ static int compat_put_timeval(struct compat_timeval __user *o,
                put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0;
 }
 
+static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp)
+{
+       memset(txc, 0, sizeof(struct timex));
+
+       if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) ||
+                       __get_user(txc->modes, &utp->modes) ||
+                       __get_user(txc->offset, &utp->offset) ||
+                       __get_user(txc->freq, &utp->freq) ||
+                       __get_user(txc->maxerror, &utp->maxerror) ||
+                       __get_user(txc->esterror, &utp->esterror) ||
+                       __get_user(txc->status, &utp->status) ||
+                       __get_user(txc->constant, &utp->constant) ||
+                       __get_user(txc->precision, &utp->precision) ||
+                       __get_user(txc->tolerance, &utp->tolerance) ||
+                       __get_user(txc->time.tv_sec, &utp->time.tv_sec) ||
+                       __get_user(txc->time.tv_usec, &utp->time.tv_usec) ||
+                       __get_user(txc->tick, &utp->tick) ||
+                       __get_user(txc->ppsfreq, &utp->ppsfreq) ||
+                       __get_user(txc->jitter, &utp->jitter) ||
+                       __get_user(txc->shift, &utp->shift) ||
+                       __get_user(txc->stabil, &utp->stabil) ||
+                       __get_user(txc->jitcnt, &utp->jitcnt) ||
+                       __get_user(txc->calcnt, &utp->calcnt) ||
+                       __get_user(txc->errcnt, &utp->errcnt) ||
+                       __get_user(txc->stbcnt, &utp->stbcnt))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int compat_put_timex(struct compat_timex __user *utp, struct timex *txc)
+{
+       if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) ||
+                       __put_user(txc->modes, &utp->modes) ||
+                       __put_user(txc->offset, &utp->offset) ||
+                       __put_user(txc->freq, &utp->freq) ||
+                       __put_user(txc->maxerror, &utp->maxerror) ||
+                       __put_user(txc->esterror, &utp->esterror) ||
+                       __put_user(txc->status, &utp->status) ||
+                       __put_user(txc->constant, &utp->constant) ||
+                       __put_user(txc->precision, &utp->precision) ||
+                       __put_user(txc->tolerance, &utp->tolerance) ||
+                       __put_user(txc->time.tv_sec, &utp->time.tv_sec) ||
+                       __put_user(txc->time.tv_usec, &utp->time.tv_usec) ||
+                       __put_user(txc->tick, &utp->tick) ||
+                       __put_user(txc->ppsfreq, &utp->ppsfreq) ||
+                       __put_user(txc->jitter, &utp->jitter) ||
+                       __put_user(txc->shift, &utp->shift) ||
+                       __put_user(txc->stabil, &utp->stabil) ||
+                       __put_user(txc->jitcnt, &utp->jitcnt) ||
+                       __put_user(txc->calcnt, &utp->calcnt) ||
+                       __put_user(txc->errcnt, &utp->errcnt) ||
+                       __put_user(txc->stbcnt, &utp->stbcnt) ||
+                       __put_user(txc->tai, &utp->tai))
+               return -EFAULT;
+       return 0;
+}
+
 asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
                struct timezone __user *tz)
 {
@@ -617,6 +675,29 @@ long compat_sys_clock_gettime(clockid_t which_clock,
        return err;
 }
 
+long compat_sys_clock_adjtime(clockid_t which_clock,
+               struct compat_timex __user *utp)
+{
+       struct timex txc;
+       mm_segment_t oldfs;
+       int err, ret;
+
+       err = compat_get_timex(&txc, utp);
+       if (err)
+               return err;
+
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
+       set_fs(oldfs);
+
+       err = compat_put_timex(utp, &txc);
+       if (err)
+               return err;
+
+       return ret;
+}
+
 long compat_sys_clock_getres(clockid_t which_clock,
                struct compat_timespec __user *tp)
 {
@@ -951,58 +1032,17 @@ asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat
 asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
 {
        struct timex txc;
-       int ret;
-
-       memset(&txc, 0, sizeof(struct timex));
+       int err, ret;
 
-       if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) ||
-                       __get_user(txc.modes, &utp->modes) ||
-                       __get_user(txc.offset, &utp->offset) ||
-                       __get_user(txc.freq, &utp->freq) ||
-                       __get_user(txc.maxerror, &utp->maxerror) ||
-                       __get_user(txc.esterror, &utp->esterror) ||
-                       __get_user(txc.status, &utp->status) ||
-                       __get_user(txc.constant, &utp->constant) ||
-                       __get_user(txc.precision, &utp->precision) ||
-                       __get_user(txc.tolerance, &utp->tolerance) ||
-                       __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
-                       __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
-                       __get_user(txc.tick, &utp->tick) ||
-                       __get_user(txc.ppsfreq, &utp->ppsfreq) ||
-                       __get_user(txc.jitter, &utp->jitter) ||
-                       __get_user(txc.shift, &utp->shift) ||
-                       __get_user(txc.stabil, &utp->stabil) ||
-                       __get_user(txc.jitcnt, &utp->jitcnt) ||
-                       __get_user(txc.calcnt, &utp->calcnt) ||
-                       __get_user(txc.errcnt, &utp->errcnt) ||
-                       __get_user(txc.stbcnt, &utp->stbcnt))
-               return -EFAULT;
+       err = compat_get_timex(&txc, utp);
+       if (err)
+               return err;
 
        ret = do_adjtimex(&txc);
 
-       if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) ||
-                       __put_user(txc.modes, &utp->modes) ||
-                       __put_user(txc.offset, &utp->offset) ||
-                       __put_user(txc.freq, &utp->freq) ||
-                       __put_user(txc.maxerror, &utp->maxerror) ||
-                       __put_user(txc.esterror, &utp->esterror) ||
-                       __put_user(txc.status, &utp->status) ||
-                       __put_user(txc.constant, &utp->constant) ||
-                       __put_user(txc.precision, &utp->precision) ||
-                       __put_user(txc.tolerance, &utp->tolerance) ||
-                       __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
-                       __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
-                       __put_user(txc.tick, &utp->tick) ||
-                       __put_user(txc.ppsfreq, &utp->ppsfreq) ||
-                       __put_user(txc.jitter, &utp->jitter) ||
-                       __put_user(txc.shift, &utp->shift) ||
-                       __put_user(txc.stabil, &utp->stabil) ||
-                       __put_user(txc.jitcnt, &utp->jitcnt) ||
-                       __put_user(txc.calcnt, &utp->calcnt) ||
-                       __put_user(txc.errcnt, &utp->errcnt) ||
-                       __put_user(txc.stbcnt, &utp->stbcnt) ||
-                       __put_user(txc.tai, &utp->tai))
-               ret = -EFAULT;
+       err = compat_put_timex(utp, &txc);
+       if (err)
+               return err;
 
        return ret;
 }
index 4349935c2ad8b1a252e18ce18a2be8b638a6a075..e92e981890321a7cc73beb11a8b32edf19d6a17c 100644 (file)
@@ -1575,8 +1575,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
                return -ENODEV;
 
        trialcs = alloc_trial_cpuset(cs);
-       if (!trialcs)
-               return -ENOMEM;
+       if (!trialcs) {
+               retval = -ENOMEM;
+               goto out;
+       }
 
        switch (cft->private) {
        case FILE_CPULIST:
@@ -1591,6 +1593,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
        }
 
        free_trial_cpuset(trialcs);
+out:
        cgroup_unlock();
        return retval;
 }
index 3a9d6dd53a6cd79696fe88784d27fe3c07c27aaf..2343c132c5a7f45556bf156388705ac921fa84e0 100644 (file)
@@ -35,7 +35,7 @@ static struct kmem_cache *cred_jar;
 static struct thread_group_cred init_tgcred = {
        .usage  = ATOMIC_INIT(2),
        .tgid   = 0,
-       .lock   = SPIN_LOCK_UNLOCKED,
+       .lock   = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock),
 };
 #endif
 
index b766d28accd6be8dc2b735de11aa004514b8f91b..bda41571538263394eb468e486abc00e6e46ae96 100644 (file)
@@ -381,15 +381,16 @@ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
        return NULL;
 }
 
-static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
+static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
+                                     u32 uval, u32 newval)
 {
-       u32 curval;
+       int ret;
 
        pagefault_disable();
-       curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
+       ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
        pagefault_enable();
 
-       return curval;
+       return ret;
 }
 
 static int get_futex_value_locked(u32 *dest, u32 __user *from)
@@ -674,7 +675,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
                                struct task_struct *task, int set_waiters)
 {
        int lock_taken, ret, ownerdied = 0;
-       u32 uval, newval, curval;
+       u32 uval, newval, curval, vpid = task_pid_vnr(task);
 
 retry:
        ret = lock_taken = 0;
@@ -684,19 +685,17 @@ retry:
         * (by doing a 0 -> TID atomic cmpxchg), while holding all
         * the locks. It will most likely not succeed.
         */
-       newval = task_pid_vnr(task);
+       newval = vpid;
        if (set_waiters)
                newval |= FUTEX_WAITERS;
 
-       curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
-
-       if (unlikely(curval == -EFAULT))
+       if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
                return -EFAULT;
 
        /*
         * Detect deadlocks.
         */
-       if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
+       if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
                return -EDEADLK;
 
        /*
@@ -723,14 +722,12 @@ retry:
         */
        if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
                /* Keep the OWNER_DIED bit */
-               newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
+               newval = (curval & ~FUTEX_TID_MASK) | vpid;
                ownerdied = 0;
                lock_taken = 1;
        }
 
-       curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
-
-       if (unlikely(curval == -EFAULT))
+       if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
                return -EFAULT;
        if (unlikely(curval != uval))
                goto retry;
@@ -775,6 +772,24 @@ retry:
        return ret;
 }
 
+/**
+ * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
+ * @q: The futex_q to unqueue
+ *
+ * The q->lock_ptr must not be NULL and must be held by the caller.
+ */
+static void __unqueue_futex(struct futex_q *q)
+{
+       struct futex_hash_bucket *hb;
+
+       if (WARN_ON(!q->lock_ptr || !spin_is_locked(q->lock_ptr)
+                       || plist_node_empty(&q->list)))
+               return;
+
+       hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
+       plist_del(&q->list, &hb->chain);
+}
+
 /*
  * The hash bucket lock must be held when this is called.
  * Afterwards, the futex_q must not be accessed.
@@ -792,7 +807,7 @@ static void wake_futex(struct futex_q *q)
         */
        get_task_struct(p);
 
-       plist_del(&q->list, &q->list.plist);
+       __unqueue_futex(q);
        /*
         * The waiting task can free the futex_q as soon as
         * q->lock_ptr = NULL is written, without taking any locks. A
@@ -843,9 +858,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
 
                newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
 
-               curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
-
-               if (curval == -EFAULT)
+               if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
                        ret = -EFAULT;
                else if (curval != uval)
                        ret = -EINVAL;
@@ -880,10 +893,8 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
         * There is no waiter, so we unlock the futex. The owner died
         * bit has not to be preserved here. We are the owner:
         */
-       oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
-
-       if (oldval == -EFAULT)
-               return oldval;
+       if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
+               return -EFAULT;
        if (oldval != uval)
                return -EAGAIN;
 
@@ -1071,9 +1082,6 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
                plist_del(&q->list, &hb1->chain);
                plist_add(&q->list, &hb2->chain);
                q->lock_ptr = &hb2->lock;
-#ifdef CONFIG_DEBUG_PI_LIST
-               q->list.plist.spinlock = &hb2->lock;
-#endif
        }
        get_futex_key_refs(key2);
        q->key = *key2;
@@ -1100,16 +1108,12 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
        get_futex_key_refs(key);
        q->key = *key;
 
-       WARN_ON(plist_node_empty(&q->list));
-       plist_del(&q->list, &q->list.plist);
+       __unqueue_futex(q);
 
        WARN_ON(!q->rt_waiter);
        q->rt_waiter = NULL;
 
        q->lock_ptr = &hb->lock;
-#ifdef CONFIG_DEBUG_PI_LIST
-       q->list.plist.spinlock = &hb->lock;
-#endif
 
        wake_up_state(q->task, TASK_NORMAL);
 }
@@ -1457,9 +1461,6 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
        prio = min(current->normal_prio, MAX_RT_PRIO);
 
        plist_node_init(&q->list, prio);
-#ifdef CONFIG_DEBUG_PI_LIST
-       q->list.plist.spinlock = &hb->lock;
-#endif
        plist_add(&q->list, &hb->chain);
        q->task = current;
        spin_unlock(&hb->lock);
@@ -1504,8 +1505,7 @@ retry:
                        spin_unlock(lock_ptr);
                        goto retry;
                }
-               WARN_ON(plist_node_empty(&q->list));
-               plist_del(&q->list, &q->list.plist);
+               __unqueue_futex(q);
 
                BUG_ON(q->pi_state);
 
@@ -1525,8 +1525,7 @@ retry:
 static void unqueue_me_pi(struct futex_q *q)
        __releases(q->lock_ptr)
 {
-       WARN_ON(plist_node_empty(&q->list));
-       plist_del(&q->list, &q->list.plist);
+       __unqueue_futex(q);
 
        BUG_ON(!q->pi_state);
        free_pi_state(q->pi_state);
@@ -1556,10 +1555,10 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
 
        /*
         * We are here either because we stole the rtmutex from the
-        * pending owner or we are the pending owner which failed to
-        * get the rtmutex. We have to replace the pending owner TID
-        * in the user space variable. This must be atomic as we have
-        * to preserve the owner died bit here.
+        * previous highest priority waiter or we are the highest priority
+        * waiter but failed to get the rtmutex the first time.
+        * We have to replace the newowner TID in the user space variable.
+        * This must be atomic as we have to preserve the owner died bit here.
         *
         * Note: We write the user space value _before_ changing the pi_state
         * because we can fault here. Imagine swapped out pages or a fork
@@ -1578,9 +1577,7 @@ retry:
        while (1) {
                newval = (uval & FUTEX_OWNER_DIED) | newtid;
 
-               curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
-
-               if (curval == -EFAULT)
+               if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
                        goto handle_fault;
                if (curval == uval)
                        break;
@@ -1608,8 +1605,8 @@ retry:
 
        /*
         * To handle the page fault we need to drop the hash bucket
-        * lock here. That gives the other task (either the pending
-        * owner itself or the task which stole the rtmutex) the
+        * lock here. That gives the other task (either the highest priority
+        * waiter itself or the task which stole the rtmutex) the
         * chance to try the fixup of the pi_state. So once we are
         * back from handling the fault we need to check the pi_state
         * after reacquiring the hash bucket lock and before trying to
@@ -1685,18 +1682,20 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
                /*
                 * pi_state is incorrect, some other task did a lock steal and
                 * we returned due to timeout or signal without taking the
-                * rt_mutex. Too late. We can access the rt_mutex_owner without
-                * locking, as the other task is now blocked on the hash bucket
-                * lock. Fix the state up.
+                * rt_mutex. Too late.
                 */
+               raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
                owner = rt_mutex_owner(&q->pi_state->pi_mutex);
+               if (!owner)
+                       owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
+               raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
                ret = fixup_pi_state_owner(uaddr, q, owner);
                goto out;
        }
 
        /*
         * Paranoia check. If we did not take the lock, then we should not be
-        * the owner, nor the pending owner, of the rt_mutex.
+        * the owner of the rt_mutex.
         */
        if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
                printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
@@ -1781,13 +1780,14 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
         *
         * The basic logical guarantee of a futex is that it blocks ONLY
         * if cond(var) is known to be true at the time of blocking, for
-        * any cond.  If we queued after testing *uaddr, that would open
-        * a race condition where we could block indefinitely with
+        * any cond.  If we locked the hash-bucket after testing *uaddr, that
+        * would open a race condition where we could block indefinitely with
         * cond(var) false, which would violate the guarantee.
         *
-        * A consequence is that futex_wait() can return zero and absorb
-        * a wakeup when *uaddr != val on entry to the syscall.  This is
-        * rare, but normal.
+        * On the other hand, we insert q and release the hash-bucket only
+        * after testing *uaddr.  This guarantees that futex_wait() will NOT
+        * absorb a wakeup if *uaddr does not match the desired values
+        * while the syscall executes.
         */
 retry:
        ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key);
@@ -2046,9 +2046,9 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
 {
        struct futex_hash_bucket *hb;
        struct futex_q *this, *next;
-       u32 uval;
        struct plist_head *head;
        union futex_key key = FUTEX_KEY_INIT;
+       u32 uval, vpid = task_pid_vnr(current);
        int ret;
 
 retry:
@@ -2057,7 +2057,7 @@ retry:
        /*
         * We release only a lock we actually own:
         */
-       if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
+       if ((uval & FUTEX_TID_MASK) != vpid)
                return -EPERM;
 
        ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
@@ -2072,17 +2072,14 @@ retry:
         * again. If it succeeds then we can return without waking
         * anyone else up:
         */
-       if (!(uval & FUTEX_OWNER_DIED))
-               uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
-
-
-       if (unlikely(uval == -EFAULT))
+       if (!(uval & FUTEX_OWNER_DIED) &&
+           cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
                goto pi_faulted;
        /*
         * Rare case: we managed to release the lock atomically,
         * no need to wake anyone else up:
         */
-       if (unlikely(uval == task_pid_vnr(current)))
+       if (unlikely(uval == vpid))
                goto out_unlock;
 
        /*
@@ -2167,7 +2164,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
                 * We were woken prior to requeue by a timeout or a signal.
                 * Unqueue the futex_q and determine which it was.
                 */
-               plist_del(&q->list, &q->list.plist);
+               plist_del(&q->list, &hb->chain);
 
                /* Handle spurious wakeups gracefully */
                ret = -EWOULDBLOCK;
@@ -2463,11 +2460,20 @@ retry:
                 * userspace.
                 */
                mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
-               nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
-
-               if (nval == -EFAULT)
-                       return -1;
-
+               /*
+                * We are not holding a lock here, but we want to have
+                * the pagefault_disable/enable() protection because
+                * we want to handle the fault gracefully. If the
+                * access fails we try to fault in the futex with R/W
+                * verification via get_user_pages. get_user() above
+                * does not guarantee R/W access. If that fails we
+                * give up and leave the futex locked.
+                */
+               if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
+                       if (fault_in_user_writeable(uaddr))
+                               return -1;
+                       goto retry;
+               }
                if (nval != uval)
                        goto retry;
 
@@ -2678,8 +2684,7 @@ static int __init futex_init(void)
         * implementation, the non-functional ones will return
         * -ENOSYS.
         */
-       curval = cmpxchg_futex_value_locked(NULL, 0, 0);
-       if (curval == -EFAULT)
+       if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
                futex_cmpxchg_enabled = 1;
 
        for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
index 0c8d7c04861542102960852c60150c6771180553..9017478c5d4c3f224cde995518976ed8d88d79d5 100644 (file)
 /*
  * The timer bases:
  *
- * Note: If we want to add new timer bases, we have to skip the two
- * clock ids captured by the cpu-timers. We do this by holding empty
- * entries rather than doing math adjustment of the clock ids.
- * This ensures that we capture erroneous accesses to these clock ids
- * rather than moving them into the range of valid clock id's.
+ * There are more clockids then hrtimer bases. Thus, we index
+ * into the timer bases by the hrtimer_base_type enum. When trying
+ * to reach a base using a clockid, hrtimer_clockid_to_base()
+ * is used to convert from clockid to the proper hrtimer_base_type.
  */
 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
 {
@@ -74,30 +73,39 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
                        .get_time = &ktime_get,
                        .resolution = KTIME_LOW_RES,
                },
+               {
+                       .index = CLOCK_BOOTTIME,
+                       .get_time = &ktime_get_boottime,
+                       .resolution = KTIME_LOW_RES,
+               },
        }
 };
 
+static int hrtimer_clock_to_base_table[MAX_CLOCKS];
+
+static inline int hrtimer_clockid_to_base(clockid_t clock_id)
+{
+       return hrtimer_clock_to_base_table[clock_id];
+}
+
+
 /*
  * Get the coarse grained time at the softirq based on xtime and
  * wall_to_monotonic.
  */
 static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
 {
-       ktime_t xtim, tomono;
-       struct timespec xts, tom;
-       unsigned long seq;
+       ktime_t xtim, mono, boot;
+       struct timespec xts, tom, slp;
 
-       do {
-               seq = read_seqbegin(&xtime_lock);
-               xts = __current_kernel_time();
-               tom = __get_wall_to_monotonic();
-       } while (read_seqretry(&xtime_lock, seq));
+       get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp);
 
        xtim = timespec_to_ktime(xts);
-       tomono = timespec_to_ktime(tom);
-       base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
-       base->clock_base[CLOCK_MONOTONIC].softirq_time =
-               ktime_add(xtim, tomono);
+       mono = ktime_add(xtim, timespec_to_ktime(tom));
+       boot = ktime_add(mono, timespec_to_ktime(slp));
+       base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
+       base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
+       base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
 }
 
 /*
@@ -184,10 +192,11 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
        struct hrtimer_cpu_base *new_cpu_base;
        int this_cpu = smp_processor_id();
        int cpu = hrtimer_get_target(this_cpu, pinned);
+       int basenum = hrtimer_clockid_to_base(base->index);
 
 again:
        new_cpu_base = &per_cpu(hrtimer_bases, cpu);
-       new_base = &new_cpu_base->clock_base[base->index];
+       new_base = &new_cpu_base->clock_base[basenum];
 
        if (base != new_base) {
                /*
@@ -334,6 +343,11 @@ EXPORT_SYMBOL_GPL(ktime_add_safe);
 
 static struct debug_obj_descr hrtimer_debug_descr;
 
+static void *hrtimer_debug_hint(void *addr)
+{
+       return ((struct hrtimer *) addr)->function;
+}
+
 /*
  * fixup_init is called when:
  * - an active object is initialized
@@ -393,6 +407,7 @@ static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
 
 static struct debug_obj_descr hrtimer_debug_descr = {
        .name           = "hrtimer",
+       .debug_hint     = hrtimer_debug_hint,
        .fixup_init     = hrtimer_fixup_init,
        .fixup_activate = hrtimer_fixup_activate,
        .fixup_free     = hrtimer_fixup_free,
@@ -611,24 +626,23 @@ static int hrtimer_reprogram(struct hrtimer *timer,
 static void retrigger_next_event(void *arg)
 {
        struct hrtimer_cpu_base *base;
-       struct timespec realtime_offset, wtm;
-       unsigned long seq;
+       struct timespec realtime_offset, wtm, sleep;
 
        if (!hrtimer_hres_active())
                return;
 
-       do {
-               seq = read_seqbegin(&xtime_lock);
-               wtm = __get_wall_to_monotonic();
-       } while (read_seqretry(&xtime_lock, seq));
+       get_xtime_and_monotonic_and_sleep_offset(&realtime_offset, &wtm,
+                                                       &sleep);
        set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
 
        base = &__get_cpu_var(hrtimer_bases);
 
        /* Adjust CLOCK_REALTIME offset */
        raw_spin_lock(&base->lock);
-       base->clock_base[CLOCK_REALTIME].offset =
+       base->clock_base[HRTIMER_BASE_REALTIME].offset =
                timespec_to_ktime(realtime_offset);
+       base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
+               timespec_to_ktime(sleep);
 
        hrtimer_force_reprogram(base, 0);
        raw_spin_unlock(&base->lock);
@@ -672,14 +686,6 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
        base->hres_active = 0;
 }
 
-/*
- * Initialize the high resolution related parts of a hrtimer
- */
-static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
-{
-}
-
-
 /*
  * When High resolution timers are active, try to reprogram. Note, that in case
  * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
@@ -725,8 +731,9 @@ static int hrtimer_switch_to_hres(void)
                return 0;
        }
        base->hres_active = 1;
-       base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
-       base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;
+       base->clock_base[HRTIMER_BASE_REALTIME].resolution = KTIME_HIGH_RES;
+       base->clock_base[HRTIMER_BASE_MONOTONIC].resolution = KTIME_HIGH_RES;
+       base->clock_base[HRTIMER_BASE_BOOTTIME].resolution = KTIME_HIGH_RES;
 
        tick_setup_sched_timer();
 
@@ -750,7 +757,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
        return 0;
 }
 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
-static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
 
 #endif /* CONFIG_HIGH_RES_TIMERS */
 
@@ -1121,6 +1127,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
                           enum hrtimer_mode mode)
 {
        struct hrtimer_cpu_base *cpu_base;
+       int base;
 
        memset(timer, 0, sizeof(struct hrtimer));
 
@@ -1129,8 +1136,8 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
        if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
                clock_id = CLOCK_MONOTONIC;
 
-       timer->base = &cpu_base->clock_base[clock_id];
-       hrtimer_init_timer_hres(timer);
+       base = hrtimer_clockid_to_base(clock_id);
+       timer->base = &cpu_base->clock_base[base];
        timerqueue_init(&timer->node);
 
 #ifdef CONFIG_TIMER_STATS
@@ -1165,9 +1172,10 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
 int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
 {
        struct hrtimer_cpu_base *cpu_base;
+       int base = hrtimer_clockid_to_base(which_clock);
 
        cpu_base = &__raw_get_cpu_var(hrtimer_bases);
-       *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
+       *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
 
        return 0;
 }
@@ -1714,6 +1722,10 @@ static struct notifier_block __cpuinitdata hrtimers_nb = {
 
 void __init hrtimers_init(void)
 {
+       hrtimer_clock_to_base_table[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME;
+       hrtimer_clock_to_base_table[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC;
+       hrtimer_clock_to_base_table[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME;
+
        hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
                          (void *)(long)smp_processor_id());
        register_cpu_notifier(&hrtimers_nb);
index 8e42fec7686d97815c3539d3da260984db63e90f..09bef82d74cb9f25517d781a5cc1070c72b482c1 100644 (file)
@@ -1,5 +1,6 @@
+# Select this to activate the generic irq options below
 config HAVE_GENERIC_HARDIRQS
-       def_bool n
+       bool
 
 if HAVE_GENERIC_HARDIRQS
 menu "IRQ subsystem"
@@ -11,26 +12,44 @@ config GENERIC_HARDIRQS
 
 # Select this to disable the deprecated stuff
 config GENERIC_HARDIRQS_NO_DEPRECATED
-       def_bool n
+       bool
+
+config GENERIC_HARDIRQS_NO_COMPAT
+       bool
 
 # Options selectable by the architecture code
+
+# Make sparse irq Kconfig switch below available
 config HAVE_SPARSE_IRQ
-       def_bool n
+       bool
 
+# Enable the generic irq autoprobe mechanism
 config GENERIC_IRQ_PROBE
-       def_bool n
+       bool
+
+# Use the generic /proc/interrupts implementation
+config GENERIC_IRQ_SHOW
+       bool
 
+# Support for delayed migration from interrupt context
 config GENERIC_PENDING_IRQ
-       def_bool n
+       bool
 
+# Alpha specific irq affinity mechanism
 config AUTO_IRQ_AFFINITY
-       def_bool n
-
-config IRQ_PER_CPU
-       def_bool n
+       bool
 
+# Tasklet based software resend for pending interrupts on enable_irq()
 config HARDIRQS_SW_RESEND
-       def_bool n
+       bool
+
+# Preflow handler support for fasteoi (sparc64)
+config IRQ_PREFLOW_FASTEOI
+       bool
+
+# Support forced irq threading
+config IRQ_FORCED_THREADING
+       bool
 
 config SPARSE_IRQ
        bool "Support sparse irq numbering"
index 505798f86c36d774afaa4938f2097ac32cd21e0a..394784c57060257e77666fdb9173697cf1a35092 100644 (file)
@@ -17,7 +17,7 @@
 /*
  * Autodetection depends on the fact that any interrupt that
  * comes in on to an unassigned handler will get stuck with
- * "IRQ_WAITING" cleared and the interrupt disabled.
+ * "IRQS_WAITING" cleared and the interrupt disabled.
  */
 static DEFINE_MUTEX(probing_active);
 
@@ -32,7 +32,6 @@ unsigned long probe_irq_on(void)
 {
        struct irq_desc *desc;
        unsigned long mask = 0;
-       unsigned int status;
        int i;
 
        /*
@@ -46,13 +45,7 @@ unsigned long probe_irq_on(void)
         */
        for_each_irq_desc_reverse(i, desc) {
                raw_spin_lock_irq(&desc->lock);
-               if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
-                       /*
-                        * An old-style architecture might still have
-                        * the handle_bad_irq handler there:
-                        */
-                       compat_irq_chip_set_default_handler(desc);
-
+               if (!desc->action && irq_settings_can_probe(desc)) {
                        /*
                         * Some chips need to know about probing in
                         * progress:
@@ -60,7 +53,7 @@ unsigned long probe_irq_on(void)
                        if (desc->irq_data.chip->irq_set_type)
                                desc->irq_data.chip->irq_set_type(&desc->irq_data,
                                                         IRQ_TYPE_PROBE);
-                       desc->irq_data.chip->irq_startup(&desc->irq_data);
+                       irq_startup(desc);
                }
                raw_spin_unlock_irq(&desc->lock);
        }
@@ -75,10 +68,12 @@ unsigned long probe_irq_on(void)
         */
        for_each_irq_desc_reverse(i, desc) {
                raw_spin_lock_irq(&desc->lock);
-               if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
-                       desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
-                       if (desc->irq_data.chip->irq_startup(&desc->irq_data))
-                               desc->status |= IRQ_PENDING;
+               if (!desc->action && irq_settings_can_probe(desc)) {
+                       desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
+                       if (irq_startup(desc)) {
+                               irq_compat_set_pending(desc);
+                               desc->istate |= IRQS_PENDING;
+                       }
                }
                raw_spin_unlock_irq(&desc->lock);
        }
@@ -93,13 +88,12 @@ unsigned long probe_irq_on(void)
         */
        for_each_irq_desc(i, desc) {
                raw_spin_lock_irq(&desc->lock);
-               status = desc->status;
 
-               if (status & IRQ_AUTODETECT) {
+               if (desc->istate & IRQS_AUTODETECT) {
                        /* It triggered already - consider it spurious. */
-                       if (!(status & IRQ_WAITING)) {
-                               desc->status = status & ~IRQ_AUTODETECT;
-                               desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+                       if (!(desc->istate & IRQS_WAITING)) {
+                               desc->istate &= ~IRQS_AUTODETECT;
+                               irq_shutdown(desc);
                        } else
                                if (i < 32)
                                        mask |= 1 << i;
@@ -125,20 +119,18 @@ EXPORT_SYMBOL(probe_irq_on);
  */
 unsigned int probe_irq_mask(unsigned long val)
 {
-       unsigned int status, mask = 0;
+       unsigned int mask = 0;
        struct irq_desc *desc;
        int i;
 
        for_each_irq_desc(i, desc) {
                raw_spin_lock_irq(&desc->lock);
-               status = desc->status;
-
-               if (status & IRQ_AUTODETECT) {
-                       if (i < 16 && !(status & IRQ_WAITING))
+               if (desc->istate & IRQS_AUTODETECT) {
+                       if (i < 16 && !(desc->istate & IRQS_WAITING))
                                mask |= 1 << i;
 
-                       desc->status = status & ~IRQ_AUTODETECT;
-                       desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+                       desc->istate &= ~IRQS_AUTODETECT;
+                       irq_shutdown(desc);
                }
                raw_spin_unlock_irq(&desc->lock);
        }
@@ -169,20 +161,18 @@ int probe_irq_off(unsigned long val)
 {
        int i, irq_found = 0, nr_of_irqs = 0;
        struct irq_desc *desc;
-       unsigned int status;
 
        for_each_irq_desc(i, desc) {
                raw_spin_lock_irq(&desc->lock);
-               status = desc->status;
 
-               if (status & IRQ_AUTODETECT) {
-                       if (!(status & IRQ_WAITING)) {
+               if (desc->istate & IRQS_AUTODETECT) {
+                       if (!(desc->istate & IRQS_WAITING)) {
                                if (!nr_of_irqs)
                                        irq_found = i;
                                nr_of_irqs++;
                        }
-                       desc->status = status & ~IRQ_AUTODETECT;
-                       desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+                       desc->istate &= ~IRQS_AUTODETECT;
+                       irq_shutdown(desc);
                }
                raw_spin_unlock_irq(&desc->lock);
        }
index baa5c4acad83cc4bd9efad28719d054afc960012..c9c0601f0615a1eaef8e3c425c61662395987c8e 100644 (file)
 #include "internals.h"
 
 /**
- *     set_irq_chip - set the irq chip for an irq
+ *     irq_set_chip - set the irq chip for an irq
  *     @irq:   irq number
  *     @chip:  pointer to irq chip description structure
  */
-int set_irq_chip(unsigned int irq, struct irq_chip *chip)
+int irq_set_chip(unsigned int irq, struct irq_chip *chip)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
 
-       if (!desc) {
-               WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
+       if (!desc)
                return -EINVAL;
-       }
 
        if (!chip)
                chip = &no_irq_chip;
 
-       raw_spin_lock_irqsave(&desc->lock, flags);
        irq_chip_set_defaults(chip);
        desc->irq_data.chip = chip;
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
-
+       irq_put_desc_unlock(desc, flags);
        return 0;
 }
-EXPORT_SYMBOL(set_irq_chip);
+EXPORT_SYMBOL(irq_set_chip);
 
 /**
- *     set_irq_type - set the irq trigger type for an irq
+ *     irq_set_type - set the irq trigger type for an irq
  *     @irq:   irq number
  *     @type:  IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  */
-int set_irq_type(unsigned int irq, unsigned int type)
+int irq_set_irq_type(unsigned int irq, unsigned int type)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
-       int ret = -ENXIO;
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+       int ret = 0;
 
-       if (!desc) {
-               printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
-               return -ENODEV;
-       }
+       if (!desc)
+               return -EINVAL;
 
        type &= IRQ_TYPE_SENSE_MASK;
-       if (type == IRQ_TYPE_NONE)
-               return 0;
-
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       ret = __irq_set_trigger(desc, irq, type);
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       if (type != IRQ_TYPE_NONE)
+               ret = __irq_set_trigger(desc, irq, type);
+       irq_put_desc_busunlock(desc, flags);
        return ret;
 }
-EXPORT_SYMBOL(set_irq_type);
+EXPORT_SYMBOL(irq_set_irq_type);
 
 /**
- *     set_irq_data - set irq type data for an irq
+ *     irq_set_handler_data - set irq handler data for an irq
  *     @irq:   Interrupt number
  *     @data:  Pointer to interrupt specific data
  *
  *     Set the hardware irq controller data for an irq
  */
-int set_irq_data(unsigned int irq, void *data)
+int irq_set_handler_data(unsigned int irq, void *data)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
 
-       if (!desc) {
-               printk(KERN_ERR
-                      "Trying to install controller data for IRQ%d\n", irq);
+       if (!desc)
                return -EINVAL;
-       }
-
-       raw_spin_lock_irqsave(&desc->lock, flags);
        desc->irq_data.handler_data = data;
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       irq_put_desc_unlock(desc, flags);
        return 0;
 }
-EXPORT_SYMBOL(set_irq_data);
+EXPORT_SYMBOL(irq_set_handler_data);
 
 /**
- *     set_irq_msi - set MSI descriptor data for an irq
+ *     irq_set_msi_desc - set MSI descriptor data for an irq
  *     @irq:   Interrupt number
  *     @entry: Pointer to MSI descriptor data
  *
  *     Set the MSI descriptor entry for an irq
  */
-int set_irq_msi(unsigned int irq, struct msi_desc *entry)
+int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
 
-       if (!desc) {
-               printk(KERN_ERR
-                      "Trying to install msi data for IRQ%d\n", irq);
+       if (!desc)
                return -EINVAL;
-       }
-
-       raw_spin_lock_irqsave(&desc->lock, flags);
        desc->irq_data.msi_desc = entry;
        if (entry)
                entry->irq = irq;
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       irq_put_desc_unlock(desc, flags);
        return 0;
 }
 
 /**
- *     set_irq_chip_data - set irq chip data for an irq
+ *     irq_set_chip_data - set irq chip data for an irq
  *     @irq:   Interrupt number
  *     @data:  Pointer to chip specific data
  *
  *     Set the hardware irq chip data for an irq
  */
-int set_irq_chip_data(unsigned int irq, void *data)
+int irq_set_chip_data(unsigned int irq, void *data)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
 
-       if (!desc) {
-               printk(KERN_ERR
-                      "Trying to install chip data for IRQ%d\n", irq);
-               return -EINVAL;
-       }
-
-       if (!desc->irq_data.chip) {
-               printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
+       if (!desc)
                return -EINVAL;
-       }
-
-       raw_spin_lock_irqsave(&desc->lock, flags);
        desc->irq_data.chip_data = data;
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
-
+       irq_put_desc_unlock(desc, flags);
        return 0;
 }
-EXPORT_SYMBOL(set_irq_chip_data);
+EXPORT_SYMBOL(irq_set_chip_data);
 
 struct irq_data *irq_get_irq_data(unsigned int irq)
 {
@@ -162,72 +132,75 @@ struct irq_data *irq_get_irq_data(unsigned int irq)
 }
 EXPORT_SYMBOL_GPL(irq_get_irq_data);
 
-/**
- *     set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
- *
- *     @irq:   Interrupt number
- *     @nest:  0 to clear / 1 to set the IRQ_NESTED_THREAD flag
- *
- *     The IRQ_NESTED_THREAD flag indicates that on
- *     request_threaded_irq() no separate interrupt thread should be
- *     created for the irq as the handler are called nested in the
- *     context of a demultiplexing interrupt handler thread.
- */
-void set_irq_nested_thread(unsigned int irq, int nest)
+static void irq_state_clr_disabled(struct irq_desc *desc)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
-       unsigned long flags;
-
-       if (!desc)
-               return;
-
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       if (nest)
-               desc->status |= IRQ_NESTED_THREAD;
-       else
-               desc->status &= ~IRQ_NESTED_THREAD;
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       desc->istate &= ~IRQS_DISABLED;
+       irq_compat_clr_disabled(desc);
 }
-EXPORT_SYMBOL_GPL(set_irq_nested_thread);
 
-/*
- * default enable function
- */
-static void default_enable(struct irq_data *data)
+static void irq_state_set_disabled(struct irq_desc *desc)
 {
-       struct irq_desc *desc = irq_data_to_desc(data);
+       desc->istate |= IRQS_DISABLED;
+       irq_compat_set_disabled(desc);
+}
 
-       desc->irq_data.chip->irq_unmask(&desc->irq_data);
-       desc->status &= ~IRQ_MASKED;
+static void irq_state_clr_masked(struct irq_desc *desc)
+{
+       desc->istate &= ~IRQS_MASKED;
+       irq_compat_clr_masked(desc);
 }
 
-/*
- * default disable function
- */
-static void default_disable(struct irq_data *data)
+static void irq_state_set_masked(struct irq_desc *desc)
 {
+       desc->istate |= IRQS_MASKED;
+       irq_compat_set_masked(desc);
 }
 
-/*
- * default startup function
- */
-static unsigned int default_startup(struct irq_data *data)
+int irq_startup(struct irq_desc *desc)
 {
-       struct irq_desc *desc = irq_data_to_desc(data);
+       irq_state_clr_disabled(desc);
+       desc->depth = 0;
+
+       if (desc->irq_data.chip->irq_startup) {
+               int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
+               irq_state_clr_masked(desc);
+               return ret;
+       }
 
-       desc->irq_data.chip->irq_enable(data);
+       irq_enable(desc);
        return 0;
 }
 
-/*
- * default shutdown function
- */
-static void default_shutdown(struct irq_data *data)
+void irq_shutdown(struct irq_desc *desc)
 {
-       struct irq_desc *desc = irq_data_to_desc(data);
+       irq_state_set_disabled(desc);
+       desc->depth = 1;
+       if (desc->irq_data.chip->irq_shutdown)
+               desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+       if (desc->irq_data.chip->irq_disable)
+               desc->irq_data.chip->irq_disable(&desc->irq_data);
+       else
+               desc->irq_data.chip->irq_mask(&desc->irq_data);
+       irq_state_set_masked(desc);
+}
 
-       desc->irq_data.chip->irq_mask(&desc->irq_data);
-       desc->status |= IRQ_MASKED;
+void irq_enable(struct irq_desc *desc)
+{
+       irq_state_clr_disabled(desc);
+       if (desc->irq_data.chip->irq_enable)
+               desc->irq_data.chip->irq_enable(&desc->irq_data);
+       else
+               desc->irq_data.chip->irq_unmask(&desc->irq_data);
+       irq_state_clr_masked(desc);
+}
+
+void irq_disable(struct irq_desc *desc)
+{
+       irq_state_set_disabled(desc);
+       if (desc->irq_data.chip->irq_disable) {
+               desc->irq_data.chip->irq_disable(&desc->irq_data);
+               irq_state_set_masked(desc);
+       }
 }
 
 #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
@@ -315,10 +288,6 @@ static void compat_bus_sync_unlock(struct irq_data *data)
 void irq_chip_set_defaults(struct irq_chip *chip)
 {
 #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
-       /*
-        * Compat fixup functions need to be before we set the
-        * defaults for enable/disable/startup/shutdown
-        */
        if (chip->enable)
                chip->irq_enable = compat_irq_enable;
        if (chip->disable)
@@ -327,33 +296,8 @@ void irq_chip_set_defaults(struct irq_chip *chip)
                chip->irq_shutdown = compat_irq_shutdown;
        if (chip->startup)
                chip->irq_startup = compat_irq_startup;
-#endif
-       /*
-        * The real defaults
-        */
-       if (!chip->irq_enable)
-               chip->irq_enable = default_enable;
-       if (!chip->irq_disable)
-               chip->irq_disable = default_disable;
-       if (!chip->irq_startup)
-               chip->irq_startup = default_startup;
-       /*
-        * We use chip->irq_disable, when the user provided its own. When
-        * we have default_disable set for chip->irq_disable, then we need
-        * to use default_shutdown, otherwise the irq line is not
-        * disabled on free_irq():
-        */
-       if (!chip->irq_shutdown)
-               chip->irq_shutdown = chip->irq_disable != default_disable ?
-                       chip->irq_disable : default_shutdown;
-
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
        if (!chip->end)
                chip->end = dummy_irq_chip.end;
-
-       /*
-        * Now fix up the remaining compat handlers
-        */
        if (chip->bus_lock)
                chip->irq_bus_lock = compat_bus_lock;
        if (chip->bus_sync_unlock)
@@ -388,22 +332,22 @@ static inline void mask_ack_irq(struct irq_desc *desc)
                if (desc->irq_data.chip->irq_ack)
                        desc->irq_data.chip->irq_ack(&desc->irq_data);
        }
-       desc->status |= IRQ_MASKED;
+       irq_state_set_masked(desc);
 }
 
-static inline void mask_irq(struct irq_desc *desc)
+void mask_irq(struct irq_desc *desc)
 {
        if (desc->irq_data.chip->irq_mask) {
                desc->irq_data.chip->irq_mask(&desc->irq_data);
-               desc->status |= IRQ_MASKED;
+               irq_state_set_masked(desc);
        }
 }
 
-static inline void unmask_irq(struct irq_desc *desc)
+void unmask_irq(struct irq_desc *desc)
 {
        if (desc->irq_data.chip->irq_unmask) {
                desc->irq_data.chip->irq_unmask(&desc->irq_data);
-               desc->status &= ~IRQ_MASKED;
+               irq_state_clr_masked(desc);
        }
 }
 
@@ -428,10 +372,11 @@ void handle_nested_irq(unsigned int irq)
        kstat_incr_irqs_this_cpu(irq, desc);
 
        action = desc->action;
-       if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+       if (unlikely(!action || (desc->istate & IRQS_DISABLED)))
                goto out_unlock;
 
-       desc->status |= IRQ_INPROGRESS;
+       irq_compat_set_progress(desc);
+       desc->istate |= IRQS_INPROGRESS;
        raw_spin_unlock_irq(&desc->lock);
 
        action_ret = action->thread_fn(action->irq, action->dev_id);
@@ -439,13 +384,21 @@ void handle_nested_irq(unsigned int irq)
                note_interrupt(irq, desc, action_ret);
 
        raw_spin_lock_irq(&desc->lock);
-       desc->status &= ~IRQ_INPROGRESS;
+       desc->istate &= ~IRQS_INPROGRESS;
+       irq_compat_clr_progress(desc);
 
 out_unlock:
        raw_spin_unlock_irq(&desc->lock);
 }
 EXPORT_SYMBOL_GPL(handle_nested_irq);
 
+static bool irq_check_poll(struct irq_desc *desc)
+{
+       if (!(desc->istate & IRQS_POLL_INPROGRESS))
+               return false;
+       return irq_wait_for_poll(desc);
+}
+
 /**
  *     handle_simple_irq - Simple and software-decoded IRQs.
  *     @irq:   the interrupt number
@@ -461,29 +414,20 @@ EXPORT_SYMBOL_GPL(handle_nested_irq);
 void
 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
 {
-       struct irqaction *action;
-       irqreturn_t action_ret;
-
        raw_spin_lock(&desc->lock);
 
-       if (unlikely(desc->status & IRQ_INPROGRESS))
-               goto out_unlock;
-       desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+       if (unlikely(desc->istate & IRQS_INPROGRESS))
+               if (!irq_check_poll(desc))
+                       goto out_unlock;
+
+       desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
        kstat_incr_irqs_this_cpu(irq, desc);
 
-       action = desc->action;
-       if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+       if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED)))
                goto out_unlock;
 
-       desc->status |= IRQ_INPROGRESS;
-       raw_spin_unlock(&desc->lock);
+       handle_irq_event(desc);
 
-       action_ret = handle_IRQ_event(irq, action);
-       if (!noirqdebug)
-               note_interrupt(irq, desc, action_ret);
-
-       raw_spin_lock(&desc->lock);
-       desc->status &= ~IRQ_INPROGRESS;
 out_unlock:
        raw_spin_unlock(&desc->lock);
 }
@@ -501,42 +445,42 @@ out_unlock:
 void
 handle_level_irq(unsigned int irq, struct irq_desc *desc)
 {
-       struct irqaction *action;
-       irqreturn_t action_ret;
-
        raw_spin_lock(&desc->lock);
        mask_ack_irq(desc);
 
-       if (unlikely(desc->status & IRQ_INPROGRESS))
-               goto out_unlock;
-       desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+       if (unlikely(desc->istate & IRQS_INPROGRESS))
+               if (!irq_check_poll(desc))
+                       goto out_unlock;
+
+       desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
        kstat_incr_irqs_this_cpu(irq, desc);
 
        /*
         * If its disabled or no action available
         * keep it masked and get out of here
         */
-       action = desc->action;
-       if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+       if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED)))
                goto out_unlock;
 
-       desc->status |= IRQ_INPROGRESS;
-       raw_spin_unlock(&desc->lock);
-
-       action_ret = handle_IRQ_event(irq, action);
-       if (!noirqdebug)
-               note_interrupt(irq, desc, action_ret);
+       handle_irq_event(desc);
 
-       raw_spin_lock(&desc->lock);
-       desc->status &= ~IRQ_INPROGRESS;
-
-       if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
+       if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT)))
                unmask_irq(desc);
 out_unlock:
        raw_spin_unlock(&desc->lock);
 }
 EXPORT_SYMBOL_GPL(handle_level_irq);
 
+#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+static inline void preflow_handler(struct irq_desc *desc)
+{
+       if (desc->preflow_handler)
+               desc->preflow_handler(&desc->irq_data);
+}
+#else
+static inline void preflow_handler(struct irq_desc *desc) { }
+#endif
+
 /**
  *     handle_fasteoi_irq - irq handler for transparent controllers
  *     @irq:   the interrupt number
@@ -550,42 +494,41 @@ EXPORT_SYMBOL_GPL(handle_level_irq);
 void
 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
 {
-       struct irqaction *action;
-       irqreturn_t action_ret;
-
        raw_spin_lock(&desc->lock);
 
-       if (unlikely(desc->status & IRQ_INPROGRESS))
-               goto out;
+       if (unlikely(desc->istate & IRQS_INPROGRESS))
+               if (!irq_check_poll(desc))
+                       goto out;
 
-       desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+       desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
        kstat_incr_irqs_this_cpu(irq, desc);
 
        /*
         * If its disabled or no action available
         * then mask it and get out of here:
         */
-       action = desc->action;
-       if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
-               desc->status |= IRQ_PENDING;
+       if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) {
+               irq_compat_set_pending(desc);
+               desc->istate |= IRQS_PENDING;
                mask_irq(desc);
                goto out;
        }
 
-       desc->status |= IRQ_INPROGRESS;
-       desc->status &= ~IRQ_PENDING;
-       raw_spin_unlock(&desc->lock);
+       if (desc->istate & IRQS_ONESHOT)
+               mask_irq(desc);
 
-       action_ret = handle_IRQ_event(irq, action);
-       if (!noirqdebug)
-               note_interrupt(irq, desc, action_ret);
+       preflow_handler(desc);
+       handle_irq_event(desc);
 
-       raw_spin_lock(&desc->lock);
-       desc->status &= ~IRQ_INPROGRESS;
-out:
+out_eoi:
        desc->irq_data.chip->irq_eoi(&desc->irq_data);
-
+out_unlock:
        raw_spin_unlock(&desc->lock);
+       return;
+out:
+       if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
+               goto out_eoi;
+       goto out_unlock;
 }
 
 /**
@@ -609,32 +552,28 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
 {
        raw_spin_lock(&desc->lock);
 
-       desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
-
+       desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
        /*
         * If we're currently running this IRQ, or its disabled,
         * we shouldn't process the IRQ. Mark it pending, handle
         * the necessary masking and go out
         */
-       if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
-                   !desc->action)) {
-               desc->status |= (IRQ_PENDING | IRQ_MASKED);
-               mask_ack_irq(desc);
-               goto out_unlock;
+       if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) ||
+                     !desc->action))) {
+               if (!irq_check_poll(desc)) {
+                       irq_compat_set_pending(desc);
+                       desc->istate |= IRQS_PENDING;
+                       mask_ack_irq(desc);
+                       goto out_unlock;
+               }
        }
        kstat_incr_irqs_this_cpu(irq, desc);
 
        /* Start handling the irq */
        desc->irq_data.chip->irq_ack(&desc->irq_data);
 
-       /* Mark the IRQ currently in progress.*/
-       desc->status |= IRQ_INPROGRESS;
-
        do {
-               struct irqaction *action = desc->action;
-               irqreturn_t action_ret;
-
-               if (unlikely(!action)) {
+               if (unlikely(!desc->action)) {
                        mask_irq(desc);
                        goto out_unlock;
                }
@@ -644,22 +583,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
                 * one, we could have masked the irq.
                 * Renable it, if it was not disabled in meantime.
                 */
-               if (unlikely((desc->status &
-                              (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
-                             (IRQ_PENDING | IRQ_MASKED))) {
-                       unmask_irq(desc);
+               if (unlikely(desc->istate & IRQS_PENDING)) {
+                       if (!(desc->istate & IRQS_DISABLED) &&
+                           (desc->istate & IRQS_MASKED))
+                               unmask_irq(desc);
                }
 
-               desc->status &= ~IRQ_PENDING;
-               raw_spin_unlock(&desc->lock);
-               action_ret = handle_IRQ_event(irq, action);
-               if (!noirqdebug)
-                       note_interrupt(irq, desc, action_ret);
-               raw_spin_lock(&desc->lock);
+               handle_irq_event(desc);
 
-       } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
+       } while ((desc->istate & IRQS_PENDING) &&
+                !(desc->istate & IRQS_DISABLED));
 
-       desc->status &= ~IRQ_INPROGRESS;
 out_unlock:
        raw_spin_unlock(&desc->lock);
 }
@@ -674,103 +608,84 @@ out_unlock:
 void
 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
 {
-       irqreturn_t action_ret;
+       struct irq_chip *chip = irq_desc_get_chip(desc);
 
        kstat_incr_irqs_this_cpu(irq, desc);
 
-       if (desc->irq_data.chip->irq_ack)
-               desc->irq_data.chip->irq_ack(&desc->irq_data);
+       if (chip->irq_ack)
+               chip->irq_ack(&desc->irq_data);
 
-       action_ret = handle_IRQ_event(irq, desc->action);
-       if (!noirqdebug)
-               note_interrupt(irq, desc, action_ret);
+       handle_irq_event_percpu(desc, desc->action);
 
-       if (desc->irq_data.chip->irq_eoi)
-               desc->irq_data.chip->irq_eoi(&desc->irq_data);
+       if (chip->irq_eoi)
+               chip->irq_eoi(&desc->irq_data);
 }
 
 void
-__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
                  const char *name)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
 
-       if (!desc) {
-               printk(KERN_ERR
-                      "Trying to install type control for IRQ%d\n", irq);
+       if (!desc)
                return;
-       }
 
-       if (!handle)
+       if (!handle) {
                handle = handle_bad_irq;
-       else if (desc->irq_data.chip == &no_irq_chip) {
-               printk(KERN_WARNING "Trying to install %sinterrupt handler "
-                      "for IRQ%d\n", is_chained ? "chained " : "", irq);
-               /*
-                * Some ARM implementations install a handler for really dumb
-                * interrupt hardware without setting an irq_chip. This worked
-                * with the ARM no_irq_chip but the check in setup_irq would
-                * prevent us to setup the interrupt at all. Switch it to
-                * dummy_irq_chip for easy transition.
-                */
-               desc->irq_data.chip = &dummy_irq_chip;
+       } else {
+               if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
+                       goto out;
        }
 
-       chip_bus_lock(desc);
-       raw_spin_lock_irqsave(&desc->lock, flags);
-
        /* Uninstall? */
        if (handle == handle_bad_irq) {
                if (desc->irq_data.chip != &no_irq_chip)
                        mask_ack_irq(desc);
-               desc->status |= IRQ_DISABLED;
+               irq_compat_set_disabled(desc);
+               desc->istate |= IRQS_DISABLED;
                desc->depth = 1;
        }
        desc->handle_irq = handle;
        desc->name = name;
 
        if (handle != handle_bad_irq && is_chained) {
-               desc->status &= ~IRQ_DISABLED;
-               desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
-               desc->depth = 0;
-               desc->irq_data.chip->irq_startup(&desc->irq_data);
+               irq_settings_set_noprobe(desc);
+               irq_settings_set_norequest(desc);
+               irq_startup(desc);
        }
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
-       chip_bus_sync_unlock(desc);
-}
-EXPORT_SYMBOL_GPL(__set_irq_handler);
-
-void
-set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
-                        irq_flow_handler_t handle)
-{
-       set_irq_chip(irq, chip);
-       __set_irq_handler(irq, handle, 0, NULL);
+out:
+       irq_put_desc_busunlock(desc, flags);
 }
+EXPORT_SYMBOL_GPL(__irq_set_handler);
 
 void
-set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
                              irq_flow_handler_t handle, const char *name)
 {
-       set_irq_chip(irq, chip);
-       __set_irq_handler(irq, handle, 0, name);
+       irq_set_chip(irq, chip);
+       __irq_set_handler(irq, handle, 0, name);
 }
 
 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
 
        if (!desc)
                return;
+       irq_settings_clr_and_set(desc, clr, set);
+
+       irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
+                  IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
+       if (irq_settings_has_no_balance_set(desc))
+               irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+       if (irq_settings_is_per_cpu(desc))
+               irqd_set(&desc->irq_data, IRQD_PER_CPU);
+       if (irq_settings_can_move_pcntxt(desc))
+               irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
 
-       /* Sanitize flags */
-       set &= IRQF_MODIFY_MASK;
-       clr &= IRQF_MODIFY_MASK;
+       irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
 
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       desc->status &= ~clr;
-       desc->status |= set;
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       irq_put_desc_unlock(desc, flags);
 }
diff --git a/kernel/irq/compat.h b/kernel/irq/compat.h
new file mode 100644 (file)
index 0000000..6bbaf66
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Compat layer for transition period
+ */
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+static inline void irq_compat_set_progress(struct irq_desc *desc)
+{
+       desc->status |= IRQ_INPROGRESS;
+}
+
+static inline void irq_compat_clr_progress(struct irq_desc *desc)
+{
+       desc->status &= ~IRQ_INPROGRESS;
+}
+static inline void irq_compat_set_disabled(struct irq_desc *desc)
+{
+       desc->status |= IRQ_DISABLED;
+}
+static inline void irq_compat_clr_disabled(struct irq_desc *desc)
+{
+       desc->status &= ~IRQ_DISABLED;
+}
+static inline void irq_compat_set_pending(struct irq_desc *desc)
+{
+       desc->status |= IRQ_PENDING;
+}
+
+static inline void irq_compat_clr_pending(struct irq_desc *desc)
+{
+       desc->status &= ~IRQ_PENDING;
+}
+static inline void irq_compat_set_masked(struct irq_desc *desc)
+{
+       desc->status |= IRQ_MASKED;
+}
+
+static inline void irq_compat_clr_masked(struct irq_desc *desc)
+{
+       desc->status &= ~IRQ_MASKED;
+}
+static inline void irq_compat_set_move_pending(struct irq_desc *desc)
+{
+       desc->status |= IRQ_MOVE_PENDING;
+}
+
+static inline void irq_compat_clr_move_pending(struct irq_desc *desc)
+{
+       desc->status &= ~IRQ_MOVE_PENDING;
+}
+static inline void irq_compat_set_affinity(struct irq_desc *desc)
+{
+       desc->status |= IRQ_AFFINITY_SET;
+}
+
+static inline void irq_compat_clr_affinity(struct irq_desc *desc)
+{
+       desc->status &= ~IRQ_AFFINITY_SET;
+}
+#else
+static inline void irq_compat_set_progress(struct irq_desc *desc) { }
+static inline void irq_compat_clr_progress(struct irq_desc *desc) { }
+static inline void irq_compat_set_disabled(struct irq_desc *desc) { }
+static inline void irq_compat_clr_disabled(struct irq_desc *desc) { }
+static inline void irq_compat_set_pending(struct irq_desc *desc) { }
+static inline void irq_compat_clr_pending(struct irq_desc *desc) { }
+static inline void irq_compat_set_masked(struct irq_desc *desc) { }
+static inline void irq_compat_clr_masked(struct irq_desc *desc) { }
+static inline void irq_compat_set_move_pending(struct irq_desc *desc) { }
+static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { }
+static inline void irq_compat_set_affinity(struct irq_desc *desc) { }
+static inline void irq_compat_clr_affinity(struct irq_desc *desc) { }
+#endif
+
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
new file mode 100644 (file)
index 0000000..d1a33b7
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Debugging printout:
+ */
+
+#include <linux/kallsyms.h>
+
+#define P(f) if (desc->status & f) printk("%14s set\n", #f)
+#define PS(f) if (desc->istate & f) printk("%14s set\n", #f)
+
+static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+       printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
+               irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
+       printk("->handle_irq():  %p, ", desc->handle_irq);
+       print_symbol("%s\n", (unsigned long)desc->handle_irq);
+       printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
+       print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
+       printk("->action(): %p\n", desc->action);
+       if (desc->action) {
+               printk("->action->handler(): %p, ", desc->action->handler);
+               print_symbol("%s\n", (unsigned long)desc->action->handler);
+       }
+
+       P(IRQ_LEVEL);
+       P(IRQ_PER_CPU);
+       P(IRQ_NOPROBE);
+       P(IRQ_NOREQUEST);
+       P(IRQ_NOAUTOEN);
+
+       PS(IRQS_AUTODETECT);
+       PS(IRQS_INPROGRESS);
+       PS(IRQS_REPLAY);
+       PS(IRQS_WAITING);
+       PS(IRQS_DISABLED);
+       PS(IRQS_PENDING);
+       PS(IRQS_MASKED);
+}
+
+#undef P
+#undef PS
index 3540a7190122361a3cdaec62c08d7652693bde44..517561fc73178b2f08dd7e12d32b4bf59d610517 100644 (file)
@@ -51,30 +51,92 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)
               "but no thread function available.", irq, action->name);
 }
 
-/**
- * handle_IRQ_event - irq action chain handler
- * @irq:       the interrupt number
- * @action:    the interrupt action chain for this irq
- *
- * Handles the action chain of an irq event
- */
-irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
+static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
+{
+       /*
+        * Wake up the handler thread for this action. In case the
+        * thread crashed and was killed we just pretend that we
+        * handled the interrupt. The hardirq handler has disabled the
+        * device interrupt, so no irq storm is lurking. If the
+        * RUNTHREAD bit is already set, nothing to do.
+        */
+       if (test_bit(IRQTF_DIED, &action->thread_flags) ||
+           test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+               return;
+
+       /*
+        * It's safe to OR the mask lockless here. We have only two
+        * places which write to threads_oneshot: This code and the
+        * irq thread.
+        *
+        * This code is the hard irq context and can never run on two
+        * cpus in parallel. If it ever does we have more serious
+        * problems than this bitmask.
+        *
+        * The irq threads of this irq which clear their "running" bit
+        * in threads_oneshot are serialized via desc->lock against
+        * each other and they are serialized against this code by
+        * IRQS_INPROGRESS.
+        *
+        * Hard irq handler:
+        *
+        *      spin_lock(desc->lock);
+        *      desc->state |= IRQS_INPROGRESS;
+        *      spin_unlock(desc->lock);
+        *      set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
+        *      desc->threads_oneshot |= mask;
+        *      spin_lock(desc->lock);
+        *      desc->state &= ~IRQS_INPROGRESS;
+        *      spin_unlock(desc->lock);
+        *
+        * irq thread:
+        *
+        * again:
+        *      spin_lock(desc->lock);
+        *      if (desc->state & IRQS_INPROGRESS) {
+        *              spin_unlock(desc->lock);
+        *              while(desc->state & IRQS_INPROGRESS)
+        *                      cpu_relax();
+        *              goto again;
+        *      }
+        *      if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+        *              desc->threads_oneshot &= ~mask;
+        *      spin_unlock(desc->lock);
+        *
+        * So either the thread waits for us to clear IRQS_INPROGRESS
+        * or we are waiting in the flow handler for desc->lock to be
+        * released before we reach this point. The thread also checks
+        * IRQTF_RUNTHREAD under desc->lock. If set it leaves
+        * threads_oneshot untouched and runs the thread another time.
+        */
+       desc->threads_oneshot |= action->thread_mask;
+       wake_up_process(action->thread);
+}
+
+irqreturn_t
+handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
 {
-       irqreturn_t ret, retval = IRQ_NONE;
-       unsigned int status = 0;
+       irqreturn_t retval = IRQ_NONE;
+       unsigned int random = 0, irq = desc->irq_data.irq;
 
        do {
+               irqreturn_t res;
+
                trace_irq_handler_entry(irq, action);
-               ret = action->handler(irq, action->dev_id);
-               trace_irq_handler_exit(irq, action, ret);
+               res = action->handler(irq, action->dev_id);
+               trace_irq_handler_exit(irq, action, res);
 
-               switch (ret) {
+               if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
+                             irq, action->handler))
+                       local_irq_disable();
+
+               switch (res) {
                case IRQ_WAKE_THREAD:
                        /*
                         * Set result to handled so the spurious check
                         * does not trigger.
                         */
-                       ret = IRQ_HANDLED;
+                       res = IRQ_HANDLED;
 
                        /*
                         * Catch drivers which return WAKE_THREAD but
@@ -85,36 +147,56 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
                                break;
                        }
 
-                       /*
-                        * Wake up the handler thread for this
-                        * action. In case the thread crashed and was
-                        * killed we just pretend that we handled the
-                        * interrupt. The hardirq handler above has
-                        * disabled the device interrupt, so no irq
-                        * storm is lurking.
-                        */
-                       if (likely(!test_bit(IRQTF_DIED,
-                                            &action->thread_flags))) {
-                               set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
-                               wake_up_process(action->thread);
-                       }
+                       irq_wake_thread(desc, action);
 
                        /* Fall through to add to randomness */
                case IRQ_HANDLED:
-                       status |= action->flags;
+                       random |= action->flags;
                        break;
 
                default:
                        break;
                }
 
-               retval |= ret;
+               retval |= res;
                action = action->next;
        } while (action);
 
-       if (status & IRQF_SAMPLE_RANDOM)
+       if (random & IRQF_SAMPLE_RANDOM)
                add_interrupt_randomness(irq);
-       local_irq_disable();
 
+       if (!noirqdebug)
+               note_interrupt(irq, desc, retval);
        return retval;
 }
+
+irqreturn_t handle_irq_event(struct irq_desc *desc)
+{
+       struct irqaction *action = desc->action;
+       irqreturn_t ret;
+
+       irq_compat_clr_pending(desc);
+       desc->istate &= ~IRQS_PENDING;
+       irq_compat_set_progress(desc);
+       desc->istate |= IRQS_INPROGRESS;
+       raw_spin_unlock(&desc->lock);
+
+       ret = handle_irq_event_percpu(desc, action);
+
+       raw_spin_lock(&desc->lock);
+       desc->istate &= ~IRQS_INPROGRESS;
+       irq_compat_clr_progress(desc);
+       return ret;
+}
+
+/**
+ * handle_IRQ_event - irq action chain handler
+ * @irq:       the interrupt number
+ * @action:    the interrupt action chain for this irq
+ *
+ * Handles the action chain of an irq event
+ */
+irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
+{
+       return handle_irq_event_percpu(irq_to_desc(irq), action);
+}
index 4571ae7e085ac95ad6affb1f31d802f32064665e..6c6ec9a490274e7c313f78eb202aa1c5af84f9b7 100644 (file)
 /*
  * IRQ subsystem internal functions and variables:
+ *
+ * Do not ever include this file from anything else than
+ * kernel/irq/. Do not even think about using any information outside
+ * of this file for your non core code.
  */
 #include <linux/irqdesc.h>
 
+#ifdef CONFIG_SPARSE_IRQ
+# define IRQ_BITMAP_BITS       (NR_IRQS + 8196)
+#else
+# define IRQ_BITMAP_BITS       NR_IRQS
+#endif
+
+#define istate core_internal_state__do_not_mess_with_it
+
+#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
+# define status status_use_accessors
+#endif
+
 extern int noirqdebug;
 
+/*
+ * Bits used by threaded handlers:
+ * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
+ * IRQTF_DIED      - handler thread died
+ * IRQTF_WARNED    - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
+ * IRQTF_AFFINITY  - irq thread is requested to adjust affinity
+ * IRQTF_FORCED_THREAD  - irq action is force threaded
+ */
+enum {
+       IRQTF_RUNTHREAD,
+       IRQTF_DIED,
+       IRQTF_WARNED,
+       IRQTF_AFFINITY,
+       IRQTF_FORCED_THREAD,
+};
+
+/*
+ * Bit masks for desc->state
+ *
+ * IRQS_AUTODETECT             - autodetection in progress
+ * IRQS_SPURIOUS_DISABLED      - was disabled due to spurious interrupt
+ *                               detection
+ * IRQS_POLL_INPROGRESS                - polling in progress
+ * IRQS_INPROGRESS             - Interrupt in progress
+ * IRQS_ONESHOT                        - irq is not unmasked in primary handler
+ * IRQS_REPLAY                 - irq is replayed
+ * IRQS_WAITING                        - irq is waiting
+ * IRQS_DISABLED               - irq is disabled
+ * IRQS_PENDING                        - irq is pending and replayed later
+ * IRQS_MASKED                 - irq is masked
+ * IRQS_SUSPENDED              - irq is suspended
+ */
+enum {
+       IRQS_AUTODETECT         = 0x00000001,
+       IRQS_SPURIOUS_DISABLED  = 0x00000002,
+       IRQS_POLL_INPROGRESS    = 0x00000008,
+       IRQS_INPROGRESS         = 0x00000010,
+       IRQS_ONESHOT            = 0x00000020,
+       IRQS_REPLAY             = 0x00000040,
+       IRQS_WAITING            = 0x00000080,
+       IRQS_DISABLED           = 0x00000100,
+       IRQS_PENDING            = 0x00000200,
+       IRQS_MASKED             = 0x00000400,
+       IRQS_SUSPENDED          = 0x00000800,
+};
+
+#include "compat.h"
+#include "debug.h"
+#include "settings.h"
+
 #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
 
 /* Set default functions for irq_chip structures: */
 extern void irq_chip_set_defaults(struct irq_chip *chip);
 
-/* Set default handler: */
-extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
-
 extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                unsigned long flags);
 extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
 extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
 
+extern int irq_startup(struct irq_desc *desc);
+extern void irq_shutdown(struct irq_desc *desc);
+extern void irq_enable(struct irq_desc *desc);
+extern void irq_disable(struct irq_desc *desc);
+extern void mask_irq(struct irq_desc *desc);
+extern void unmask_irq(struct irq_desc *desc);
+
 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
 
+irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
+irqreturn_t handle_irq_event(struct irq_desc *desc);
+
 /* Resending of interrupts :*/
 void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+bool irq_wait_for_poll(struct irq_desc *desc);
 
 #ifdef CONFIG_PROC_FS
 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
@@ -37,20 +111,10 @@ static inline void unregister_handler_proc(unsigned int irq,
                                           struct irqaction *action) { }
 #endif
 
-extern int irq_select_affinity_usr(unsigned int irq);
+extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
 
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
-static inline void irq_end(unsigned int irq, struct irq_desc *desc)
-{
-       if (desc->irq_data.chip && desc->irq_data.chip->end)
-               desc->irq_data.chip->end(irq);
-}
-#else
-static inline void irq_end(unsigned int irq, struct irq_desc *desc) { }
-#endif
-
 /* Inline functions for support of irq chips on slow busses */
 static inline void chip_bus_lock(struct irq_desc *desc)
 {
@@ -64,43 +128,60 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
                desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
 }
 
+struct irq_desc *
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus);
+void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
+
+static inline struct irq_desc *
+irq_get_desc_buslock(unsigned int irq, unsigned long *flags)
+{
+       return __irq_get_desc_lock(irq, flags, true);
+}
+
+static inline void
+irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
+{
+       __irq_put_desc_unlock(desc, flags, true);
+}
+
+static inline struct irq_desc *
+irq_get_desc_lock(unsigned int irq, unsigned long *flags)
+{
+       return __irq_get_desc_lock(irq, flags, false);
+}
+
+static inline void
+irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
+{
+       __irq_put_desc_unlock(desc, flags, false);
+}
+
 /*
- * Debugging printout:
+ * Manipulation functions for irq_data.state
  */
+static inline void irqd_set_move_pending(struct irq_data *d)
+{
+       d->state_use_accessors |= IRQD_SETAFFINITY_PENDING;
+       irq_compat_set_move_pending(irq_data_to_desc(d));
+}
 
-#include <linux/kallsyms.h>
-
-#define P(f) if (desc->status & f) printk("%14s set\n", #f)
+static inline void irqd_clr_move_pending(struct irq_data *d)
+{
+       d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
+       irq_compat_clr_move_pending(irq_data_to_desc(d));
+}
 
-static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
+static inline void irqd_clear(struct irq_data *d, unsigned int mask)
 {
-       printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
-               irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
-       printk("->handle_irq():  %p, ", desc->handle_irq);
-       print_symbol("%s\n", (unsigned long)desc->handle_irq);
-       printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
-       print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
-       printk("->action(): %p\n", desc->action);
-       if (desc->action) {
-               printk("->action->handler(): %p, ", desc->action->handler);
-               print_symbol("%s\n", (unsigned long)desc->action->handler);
-       }
-
-       P(IRQ_INPROGRESS);
-       P(IRQ_DISABLED);
-       P(IRQ_PENDING);
-       P(IRQ_REPLAY);
-       P(IRQ_AUTODETECT);
-       P(IRQ_WAITING);
-       P(IRQ_LEVEL);
-       P(IRQ_MASKED);
-#ifdef CONFIG_IRQ_PER_CPU
-       P(IRQ_PER_CPU);
-#endif
-       P(IRQ_NOPROBE);
-       P(IRQ_NOREQUEST);
-       P(IRQ_NOAUTOEN);
+       d->state_use_accessors &= ~mask;
 }
 
-#undef P
+static inline void irqd_set(struct irq_data *d, unsigned int mask)
+{
+       d->state_use_accessors |= mask;
+}
 
+static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
+{
+       return d->state_use_accessors & mask;
+}
index 282f20230e67c9c13b12ef6f30f4fb363cc84acf..dbccc799407f08c3bd9fba76b8af4178868085ca 100644 (file)
@@ -79,7 +79,8 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
        desc->irq_data.chip_data = NULL;
        desc->irq_data.handler_data = NULL;
        desc->irq_data.msi_desc = NULL;
-       desc->status = IRQ_DEFAULT_INIT_FLAGS;
+       irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
+       desc->istate = IRQS_DISABLED;
        desc->handle_irq = handle_bad_irq;
        desc->depth = 1;
        desc->irq_count = 0;
@@ -94,7 +95,7 @@ int nr_irqs = NR_IRQS;
 EXPORT_SYMBOL_GPL(nr_irqs);
 
 static DEFINE_MUTEX(sparse_irq_lock);
-static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
+static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
 
 #ifdef CONFIG_SPARSE_IRQ
 
@@ -206,6 +207,14 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
        return NULL;
 }
 
+static int irq_expand_nr_irqs(unsigned int nr)
+{
+       if (nr > IRQ_BITMAP_BITS)
+               return -ENOMEM;
+       nr_irqs = nr;
+       return 0;
+}
+
 int __init early_irq_init(void)
 {
        int i, initcnt, node = first_online_node;
@@ -217,6 +226,15 @@ int __init early_irq_init(void)
        initcnt = arch_probe_nr_irqs();
        printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
 
+       if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
+               nr_irqs = IRQ_BITMAP_BITS;
+
+       if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
+               initcnt = IRQ_BITMAP_BITS;
+
+       if (initcnt > nr_irqs)
+               nr_irqs = initcnt;
+
        for (i = 0; i < initcnt; i++) {
                desc = alloc_desc(i, node);
                set_bit(i, allocated_irqs);
@@ -229,7 +247,7 @@ int __init early_irq_init(void)
 
 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
        [0 ... NR_IRQS-1] = {
-               .status         = IRQ_DEFAULT_INIT_FLAGS,
+               .istate         = IRQS_DISABLED,
                .handle_irq     = handle_bad_irq,
                .depth          = 1,
                .lock           = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
@@ -251,8 +269,8 @@ int __init early_irq_init(void)
        for (i = 0; i < count; i++) {
                desc[i].irq_data.irq = i;
                desc[i].irq_data.chip = &no_irq_chip;
-               /* TODO : do this allocation on-demand ... */
                desc[i].kstat_irqs = alloc_percpu(unsigned int);
+               irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
                alloc_masks(desc + i, GFP_KERNEL, node);
                desc_smp_init(desc + i, node);
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
@@ -277,24 +295,14 @@ static void free_desc(unsigned int irq)
 
 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
 {
-#if defined(CONFIG_KSTAT_IRQS_ONDEMAND)
-       struct irq_desc *desc;
-       unsigned int i;
-
-       for (i = 0; i < cnt; i++) {
-               desc = irq_to_desc(start + i);
-               if (desc && !desc->kstat_irqs) {
-                       unsigned int __percpu *stats = alloc_percpu(unsigned int);
-
-                       if (!stats)
-                               return -1;
-                       if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL)
-                               free_percpu(stats);
-               }
-       }
-#endif
        return start;
 }
+
+static int irq_expand_nr_irqs(unsigned int nr)
+{
+       return -ENOMEM;
+}
+
 #endif /* !CONFIG_SPARSE_IRQ */
 
 /* Dynamic interrupt handling */
@@ -338,14 +346,17 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
 
        mutex_lock(&sparse_irq_lock);
 
-       start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
+       start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
+                                          from, cnt, 0);
        ret = -EEXIST;
        if (irq >=0 && start != irq)
                goto err;
 
-       ret = -ENOMEM;
-       if (start >= nr_irqs)
-               goto err;
+       if (start + cnt > nr_irqs) {
+               ret = irq_expand_nr_irqs(start + cnt);
+               if (ret)
+                       goto err;
+       }
 
        bitmap_set(allocated_irqs, start, cnt);
        mutex_unlock(&sparse_irq_lock);
@@ -392,6 +403,26 @@ unsigned int irq_get_next_irq(unsigned int offset)
        return find_next_bit(allocated_irqs, nr_irqs, offset);
 }
 
+struct irq_desc *
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (desc) {
+               if (bus)
+                       chip_bus_lock(desc);
+               raw_spin_lock_irqsave(&desc->lock, *flags);
+       }
+       return desc;
+}
+
+void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
+{
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       if (bus)
+               chip_bus_sync_unlock(desc);
+}
+
 /**
  * dynamic_irq_cleanup - cleanup a dynamically allocated irq
  * @irq:       irq number to initialize
index 0caa59f747dda2e97677c0d9a203715e98e90048..acd599a43bfb56b6fdafe98d1bb531c16e71b77c 100644 (file)
 
 #include "internals.h"
 
+#ifdef CONFIG_IRQ_FORCED_THREADING
+__read_mostly bool force_irqthreads;
+
+static int __init setup_forced_irqthreads(char *arg)
+{
+       force_irqthreads = true;
+       return 0;
+}
+early_param("threadirqs", setup_forced_irqthreads);
+#endif
+
 /**
  *     synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  *     @irq: interrupt number to wait for
@@ -30,7 +41,7 @@
 void synchronize_irq(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
-       unsigned int status;
+       unsigned int state;
 
        if (!desc)
                return;
@@ -42,16 +53,16 @@ void synchronize_irq(unsigned int irq)
                 * Wait until we're out of the critical section.  This might
                 * give the wrong answer due to the lack of memory barriers.
                 */
-               while (desc->status & IRQ_INPROGRESS)
+               while (desc->istate & IRQS_INPROGRESS)
                        cpu_relax();
 
                /* Ok, that indicated we're done: double-check carefully. */
                raw_spin_lock_irqsave(&desc->lock, flags);
-               status = desc->status;
+               state = desc->istate;
                raw_spin_unlock_irqrestore(&desc->lock, flags);
 
                /* Oops, that failed? */
-       } while (status & IRQ_INPROGRESS);
+       } while (state & IRQS_INPROGRESS);
 
        /*
         * We made sure that no hardirq handler is running. Now verify
@@ -73,8 +84,8 @@ int irq_can_set_affinity(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
 
-       if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip ||
-           !desc->irq_data.chip->irq_set_affinity)
+       if (!desc || !irqd_can_balance(&desc->irq_data) ||
+           !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
                return 0;
 
        return 1;
@@ -100,67 +111,169 @@ void irq_set_thread_affinity(struct irq_desc *desc)
        }
 }
 
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+static inline bool irq_can_move_pcntxt(struct irq_desc *desc)
+{
+       return irq_settings_can_move_pcntxt(desc);
+}
+static inline bool irq_move_pending(struct irq_desc *desc)
+{
+       return irqd_is_setaffinity_pending(&desc->irq_data);
+}
+static inline void
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
+{
+       cpumask_copy(desc->pending_mask, mask);
+}
+static inline void
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
+{
+       cpumask_copy(mask, desc->pending_mask);
+}
+#else
+static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; }
+static inline bool irq_move_pending(struct irq_desc *desc) { return false; }
+static inline void
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
+static inline void
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
+#endif
+
 /**
  *     irq_set_affinity - Set the irq affinity of a given irq
  *     @irq:           Interrupt to set affinity
  *     @cpumask:       cpumask
  *
  */
-int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        struct irq_chip *chip = desc->irq_data.chip;
        unsigned long flags;
+       int ret = 0;
 
        if (!chip->irq_set_affinity)
                return -EINVAL;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
 
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-       if (desc->status & IRQ_MOVE_PCNTXT) {
-               if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
-                       cpumask_copy(desc->irq_data.affinity, cpumask);
+       if (irq_can_move_pcntxt(desc)) {
+               ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
+               switch (ret) {
+               case IRQ_SET_MASK_OK:
+                       cpumask_copy(desc->irq_data.affinity, mask);
+               case IRQ_SET_MASK_OK_NOCOPY:
                        irq_set_thread_affinity(desc);
+                       ret = 0;
                }
+       } else {
+               irqd_set_move_pending(&desc->irq_data);
+               irq_copy_pending(desc, mask);
        }
-       else {
-               desc->status |= IRQ_MOVE_PENDING;
-               cpumask_copy(desc->pending_mask, cpumask);
-       }
-#else
-       if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
-               cpumask_copy(desc->irq_data.affinity, cpumask);
-               irq_set_thread_affinity(desc);
+
+       if (desc->affinity_notify) {
+               kref_get(&desc->affinity_notify->kref);
+               schedule_work(&desc->affinity_notify->work);
        }
-#endif
-       desc->status |= IRQ_AFFINITY_SET;
+       irq_compat_set_affinity(desc);
+       irqd_set(&desc->irq_data, IRQD_AFFINITY_SET);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
-       return 0;
+       return ret;
 }
 
 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+       unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+
+       if (!desc)
+               return -EINVAL;
+       desc->affinity_hint = m;
+       irq_put_desc_unlock(desc, flags);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+
+static void irq_affinity_notify(struct work_struct *work)
+{
+       struct irq_affinity_notify *notify =
+               container_of(work, struct irq_affinity_notify, work);
+       struct irq_desc *desc = irq_to_desc(notify->irq);
+       cpumask_var_t cpumask;
+       unsigned long flags;
+
+       if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
+               goto out;
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       if (irq_move_pending(desc))
+               irq_get_pending(cpumask, desc);
+       else
+               cpumask_copy(cpumask, desc->irq_data.affinity);
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+       notify->notify(notify, cpumask);
+
+       free_cpumask_var(cpumask);
+out:
+       kref_put(&notify->kref, notify->release);
+}
+
+/**
+ *     irq_set_affinity_notifier - control notification of IRQ affinity changes
+ *     @irq:           Interrupt for which to enable/disable notification
+ *     @notify:        Context for notification, or %NULL to disable
+ *                     notification.  Function pointers must be initialised;
+ *                     the other fields will be initialised by this function.
+ *
+ *     Must be called in process context.  Notification may only be enabled
+ *     after the IRQ is allocated and must be disabled before the IRQ is
+ *     freed using free_irq().
+ */
+int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 {
        struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_affinity_notify *old_notify;
        unsigned long flags;
 
+       /* The release function is promised process context */
+       might_sleep();
+
        if (!desc)
                return -EINVAL;
 
+       /* Complete initialisation of *notify */
+       if (notify) {
+               notify->irq = irq;
+               kref_init(&notify->kref);
+               INIT_WORK(&notify->work, irq_affinity_notify);
+       }
+
        raw_spin_lock_irqsave(&desc->lock, flags);
-       desc->affinity_hint = m;
+       old_notify = desc->affinity_notify;
+       desc->affinity_notify = notify;
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
+       if (old_notify)
+               kref_put(&old_notify->kref, old_notify->release);
+
        return 0;
 }
-EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 
 #ifndef CONFIG_AUTO_IRQ_AFFINITY
 /*
  * Generic version of the affinity autoselector.
  */
-static int setup_affinity(unsigned int irq, struct irq_desc *desc)
+static int
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 {
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+       struct cpumask *set = irq_default_affinity;
+       int ret;
+
+       /* Excludes PER_CPU and NO_BALANCE interrupts */
        if (!irq_can_set_affinity(irq))
                return 0;
 
@@ -168,22 +281,29 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
         * Preserve an userspace affinity setup, but make sure that
         * one of the targets is online.
         */
-       if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
-               if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
-                   < nr_cpu_ids)
-                       goto set_affinity;
-               else
-                       desc->status &= ~IRQ_AFFINITY_SET;
+       if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
+               if (cpumask_intersects(desc->irq_data.affinity,
+                                      cpu_online_mask))
+                       set = desc->irq_data.affinity;
+               else {
+                       irq_compat_clr_affinity(desc);
+                       irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
+               }
        }
 
-       cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
-set_affinity:
-       desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
-
+       cpumask_and(mask, cpu_online_mask, set);
+       ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
+       switch (ret) {
+       case IRQ_SET_MASK_OK:
+               cpumask_copy(desc->irq_data.affinity, mask);
+       case IRQ_SET_MASK_OK_NOCOPY:
+               irq_set_thread_affinity(desc);
+       }
        return 0;
 }
 #else
-static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
+static inline int
+setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
 {
        return irq_select_affinity(irq);
 }
@@ -192,23 +312,21 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
 /*
  * Called when affinity is set via /proc/irq
  */
-int irq_select_affinity_usr(unsigned int irq)
+int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
        int ret;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       ret = setup_affinity(irq, desc);
-       if (!ret)
-               irq_set_thread_affinity(desc);
+       ret = setup_affinity(irq, desc, mask);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
-
        return ret;
 }
 
 #else
-static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
+static inline int
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 {
        return 0;
 }
@@ -219,13 +337,23 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
        if (suspend) {
                if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
                        return;
-               desc->status |= IRQ_SUSPENDED;
+               desc->istate |= IRQS_SUSPENDED;
        }
 
-       if (!desc->depth++) {
-               desc->status |= IRQ_DISABLED;
-               desc->irq_data.chip->irq_disable(&desc->irq_data);
-       }
+       if (!desc->depth++)
+               irq_disable(desc);
+}
+
+static int __disable_irq_nosync(unsigned int irq)
+{
+       unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+
+       if (!desc)
+               return -EINVAL;
+       __disable_irq(desc, irq, false);
+       irq_put_desc_busunlock(desc, flags);
+       return 0;
 }
 
 /**
@@ -241,17 +369,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
  */
 void disable_irq_nosync(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
-       unsigned long flags;
-
-       if (!desc)
-               return;
-
-       chip_bus_lock(desc);
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       __disable_irq(desc, irq, false);
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
-       chip_bus_sync_unlock(desc);
+       __disable_irq_nosync(irq);
 }
 EXPORT_SYMBOL(disable_irq_nosync);
 
@@ -269,21 +387,24 @@ EXPORT_SYMBOL(disable_irq_nosync);
  */
 void disable_irq(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       if (!desc)
-               return;
-
-       disable_irq_nosync(irq);
-       if (desc->action)
+       if (!__disable_irq_nosync(irq))
                synchronize_irq(irq);
 }
 EXPORT_SYMBOL(disable_irq);
 
 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
 {
-       if (resume)
-               desc->status &= ~IRQ_SUSPENDED;
+       if (resume) {
+               if (!(desc->istate & IRQS_SUSPENDED)) {
+                       if (!desc->action)
+                               return;
+                       if (!(desc->action->flags & IRQF_FORCE_RESUME))
+                               return;
+                       /* Pretend that it got disabled ! */
+                       desc->depth++;
+               }
+               desc->istate &= ~IRQS_SUSPENDED;
+       }
 
        switch (desc->depth) {
        case 0:
@@ -291,12 +412,11 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
                WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
                break;
        case 1: {
-               unsigned int status = desc->status & ~IRQ_DISABLED;
-
-               if (desc->status & IRQ_SUSPENDED)
+               if (desc->istate & IRQS_SUSPENDED)
                        goto err_out;
                /* Prevent probing on this irq: */
-               desc->status = status | IRQ_NOPROBE;
+               irq_settings_set_noprobe(desc);
+               irq_enable(desc);
                check_irq_resend(desc, irq);
                /* fall-through */
        }
@@ -318,21 +438,18 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
  */
 void enable_irq(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
 
        if (!desc)
                return;
+       if (WARN(!desc->irq_data.chip,
+                KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
+               goto out;
 
-       if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable,
-           KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
-               return;
-
-       chip_bus_lock(desc);
-       raw_spin_lock_irqsave(&desc->lock, flags);
        __enable_irq(desc, irq, false);
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
-       chip_bus_sync_unlock(desc);
+out:
+       irq_put_desc_busunlock(desc, flags);
 }
 EXPORT_SYMBOL(enable_irq);
 
@@ -348,7 +465,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
 }
 
 /**
- *     set_irq_wake - control irq power management wakeup
+ *     irq_set_irq_wake - control irq power management wakeup
  *     @irq:   interrupt to control
  *     @on:    enable/disable power management wakeup
  *
@@ -359,23 +476,22 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
  *     Wakeup mode lets this IRQ wake the system from sleep
  *     states like "suspend to RAM".
  */
-int set_irq_wake(unsigned int irq, unsigned int on)
+int irq_set_irq_wake(unsigned int irq, unsigned int on)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
        int ret = 0;
 
        /* wakeup-capable irqs can be shared between drivers that
         * don't need to have the same sleep mode behaviors.
         */
-       raw_spin_lock_irqsave(&desc->lock, flags);
        if (on) {
                if (desc->wake_depth++ == 0) {
                        ret = set_irq_wake_real(irq, on);
                        if (ret)
                                desc->wake_depth = 0;
                        else
-                               desc->status |= IRQ_WAKEUP;
+                               irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
                }
        } else {
                if (desc->wake_depth == 0) {
@@ -385,14 +501,13 @@ int set_irq_wake(unsigned int irq, unsigned int on)
                        if (ret)
                                desc->wake_depth = 1;
                        else
-                               desc->status &= ~IRQ_WAKEUP;
+                               irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
                }
        }
-
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       irq_put_desc_busunlock(desc, flags);
        return ret;
 }
-EXPORT_SYMBOL(set_irq_wake);
+EXPORT_SYMBOL(irq_set_irq_wake);
 
 /*
  * Internal function that tells the architecture code whether a
@@ -401,43 +516,27 @@ EXPORT_SYMBOL(set_irq_wake);
  */
 int can_request_irq(unsigned int irq, unsigned long irqflags)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
-       struct irqaction *action;
        unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+       int canrequest = 0;
 
        if (!desc)
                return 0;
 
-       if (desc->status & IRQ_NOREQUEST)
-               return 0;
-
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       action = desc->action;
-       if (action)
-               if (irqflags & action->flags & IRQF_SHARED)
-                       action = NULL;
-
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
-
-       return !action;
-}
-
-void compat_irq_chip_set_default_handler(struct irq_desc *desc)
-{
-       /*
-        * If the architecture still has not overriden
-        * the flow handler then zap the default. This
-        * should catch incorrect flow-type setting.
-        */
-       if (desc->handle_irq == &handle_bad_irq)
-               desc->handle_irq = NULL;
+       if (irq_settings_can_request(desc)) {
+               if (desc->action)
+                       if (irqflags & desc->action->flags & IRQF_SHARED)
+                               canrequest =1;
+       }
+       irq_put_desc_unlock(desc, flags);
+       return canrequest;
 }
 
 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                      unsigned long flags)
 {
-       int ret;
        struct irq_chip *chip = desc->irq_data.chip;
+       int ret, unmask = 0;
 
        if (!chip || !chip->irq_set_type) {
                /*
@@ -449,23 +548,43 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                return 0;
        }
 
+       flags &= IRQ_TYPE_SENSE_MASK;
+
+       if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
+               if (!(desc->istate & IRQS_MASKED))
+                       mask_irq(desc);
+               if (!(desc->istate & IRQS_DISABLED))
+                       unmask = 1;
+       }
+
        /* caller masked out all except trigger mode flags */
        ret = chip->irq_set_type(&desc->irq_data, flags);
 
-       if (ret)
-               pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
-                      flags, irq, chip->irq_set_type);
-       else {
-               if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-                       flags |= IRQ_LEVEL;
-               /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
-               desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
-               desc->status |= flags;
+       switch (ret) {
+       case IRQ_SET_MASK_OK:
+               irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
+               irqd_set(&desc->irq_data, flags);
+
+       case IRQ_SET_MASK_OK_NOCOPY:
+               flags = irqd_get_trigger_type(&desc->irq_data);
+               irq_settings_set_trigger_mask(desc, flags);
+               irqd_clear(&desc->irq_data, IRQD_LEVEL);
+               irq_settings_clr_level(desc);
+               if (flags & IRQ_TYPE_LEVEL_MASK) {
+                       irq_settings_set_level(desc);
+                       irqd_set(&desc->irq_data, IRQD_LEVEL);
+               }
 
                if (chip != desc->irq_data.chip)
                        irq_chip_set_defaults(desc->irq_data.chip);
+               ret = 0;
+               break;
+       default:
+               pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
+                      flags, irq, chip->irq_set_type);
        }
-
+       if (unmask)
+               unmask_irq(desc);
        return ret;
 }
 
@@ -509,8 +628,11 @@ static int irq_wait_for_interrupt(struct irqaction *action)
  * handler finished. unmask if the interrupt has not been disabled and
  * is marked MASKED.
  */
-static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
+static void irq_finalize_oneshot(struct irq_desc *desc,
+                                struct irqaction *action, bool force)
 {
+       if (!(desc->istate & IRQS_ONESHOT))
+               return;
 again:
        chip_bus_lock(desc);
        raw_spin_lock_irq(&desc->lock);
@@ -522,26 +644,44 @@ again:
         * The thread is faster done than the hard interrupt handler
         * on the other CPU. If we unmask the irq line then the
         * interrupt can come in again and masks the line, leaves due
-        * to IRQ_INPROGRESS and the irq line is masked forever.
+        * to IRQS_INPROGRESS and the irq line is masked forever.
+        *
+        * This also serializes the state of shared oneshot handlers
+        * versus "desc->threads_onehsot |= action->thread_mask;" in
+        * irq_wake_thread(). See the comment there which explains the
+        * serialization.
         */
-       if (unlikely(desc->status & IRQ_INPROGRESS)) {
+       if (unlikely(desc->istate & IRQS_INPROGRESS)) {
                raw_spin_unlock_irq(&desc->lock);
                chip_bus_sync_unlock(desc);
                cpu_relax();
                goto again;
        }
 
-       if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
-               desc->status &= ~IRQ_MASKED;
+       /*
+        * Now check again, whether the thread should run. Otherwise
+        * we would clear the threads_oneshot bit of this thread which
+        * was just set.
+        */
+       if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+               goto out_unlock;
+
+       desc->threads_oneshot &= ~action->thread_mask;
+
+       if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) &&
+           (desc->istate & IRQS_MASKED)) {
+               irq_compat_clr_masked(desc);
+               desc->istate &= ~IRQS_MASKED;
                desc->irq_data.chip->irq_unmask(&desc->irq_data);
        }
+out_unlock:
        raw_spin_unlock_irq(&desc->lock);
        chip_bus_sync_unlock(desc);
 }
 
 #ifdef CONFIG_SMP
 /*
- * Check whether we need to change the affinity of the interrupt thread.
+ * Check whether we need to chasnge the affinity of the interrupt thread.
  */
 static void
 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
@@ -572,6 +712,32 @@ static inline void
 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
 #endif
 
+/*
+ * Interrupts which are not explicitely requested as threaded
+ * interrupts rely on the implicit bh/preempt disable of the hard irq
+ * context. So we need to disable bh here to avoid deadlocks and other
+ * side effects.
+ */
+static void
+irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
+{
+       local_bh_disable();
+       action->thread_fn(action->irq, action->dev_id);
+       irq_finalize_oneshot(desc, action, false);
+       local_bh_enable();
+}
+
+/*
+ * Interrupts explicitely requested as threaded interupts want to be
+ * preemtible - many of them need to sleep and wait for slow busses to
+ * complete.
+ */
+static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
+{
+       action->thread_fn(action->irq, action->dev_id);
+       irq_finalize_oneshot(desc, action, false);
+}
+
 /*
  * Interrupt handler thread
  */
@@ -582,7 +748,14 @@ static int irq_thread(void *data)
        };
        struct irqaction *action = data;
        struct irq_desc *desc = irq_to_desc(action->irq);
-       int wake, oneshot = desc->status & IRQ_ONESHOT;
+       void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
+       int wake;
+
+       if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
+                                       &action->thread_flags))
+               handler_fn = irq_forced_thread_fn;
+       else
+               handler_fn = irq_thread_fn;
 
        sched_setscheduler(current, SCHED_FIFO, &param);
        current->irqaction = action;
@@ -594,23 +767,20 @@ static int irq_thread(void *data)
                atomic_inc(&desc->threads_active);
 
                raw_spin_lock_irq(&desc->lock);
-               if (unlikely(desc->status & IRQ_DISABLED)) {
+               if (unlikely(desc->istate & IRQS_DISABLED)) {
                        /*
                         * CHECKME: We might need a dedicated
                         * IRQ_THREAD_PENDING flag here, which
                         * retriggers the thread in check_irq_resend()
-                        * but AFAICT IRQ_PENDING should be fine as it
+                        * but AFAICT IRQS_PENDING should be fine as it
                         * retriggers the interrupt itself --- tglx
                         */
-                       desc->status |= IRQ_PENDING;
+                       irq_compat_set_pending(desc);
+                       desc->istate |= IRQS_PENDING;
                        raw_spin_unlock_irq(&desc->lock);
                } else {
                        raw_spin_unlock_irq(&desc->lock);
-
-                       action->thread_fn(action->irq, action->dev_id);
-
-                       if (oneshot)
-                               irq_finalize_oneshot(action->irq, desc);
+                       handler_fn(desc, action);
                }
 
                wake = atomic_dec_and_test(&desc->threads_active);
@@ -619,6 +789,9 @@ static int irq_thread(void *data)
                        wake_up(&desc->wait_for_threads);
        }
 
+       /* Prevent a stale desc->threads_oneshot */
+       irq_finalize_oneshot(desc, action, true);
+
        /*
         * Clear irqaction. Otherwise exit_irq_thread() would make
         * fuzz about an active irq thread going into nirvana.
@@ -633,6 +806,7 @@ static int irq_thread(void *data)
 void exit_irq_thread(void)
 {
        struct task_struct *tsk = current;
+       struct irq_desc *desc;
 
        if (!tsk->irqaction)
                return;
@@ -641,6 +815,14 @@ void exit_irq_thread(void)
               "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
               tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
 
+       desc = irq_to_desc(tsk->irqaction->irq);
+
+       /*
+        * Prevent a stale desc->threads_oneshot. Must be called
+        * before setting the IRQTF_DIED flag.
+        */
+       irq_finalize_oneshot(desc, tsk->irqaction, true);
+
        /*
         * Set the THREAD DIED flag to prevent further wakeups of the
         * soon to be gone threaded handler.
@@ -648,6 +830,22 @@ void exit_irq_thread(void)
        set_bit(IRQTF_DIED, &tsk->irqaction->flags);
 }
 
+static void irq_setup_forced_threading(struct irqaction *new)
+{
+       if (!force_irqthreads)
+               return;
+       if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
+               return;
+
+       new->flags |= IRQF_ONESHOT;
+
+       if (!new->thread_fn) {
+               set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
+               new->thread_fn = new->handler;
+               new->handler = irq_default_primary_handler;
+       }
+}
+
 /*
  * Internal function to register an irqaction - typically used to
  * allocate special interrupts that are part of the architecture.
@@ -657,9 +855,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 {
        struct irqaction *old, **old_ptr;
        const char *old_name = NULL;
-       unsigned long flags;
-       int nested, shared = 0;
-       int ret;
+       unsigned long flags, thread_mask = 0;
+       int ret, nested, shared = 0;
+       cpumask_var_t mask;
 
        if (!desc)
                return -EINVAL;
@@ -683,15 +881,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                rand_initialize_irq(irq);
        }
 
-       /* Oneshot interrupts are not allowed with shared */
-       if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
-               return -EINVAL;
-
        /*
         * Check whether the interrupt nests into another interrupt
         * thread.
         */
-       nested = desc->status & IRQ_NESTED_THREAD;
+       nested = irq_settings_is_nested_thread(desc);
        if (nested) {
                if (!new->thread_fn)
                        return -EINVAL;
@@ -701,6 +895,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 * dummy function which warns when called.
                 */
                new->handler = irq_nested_primary_handler;
+       } else {
+               irq_setup_forced_threading(new);
        }
 
        /*
@@ -724,6 +920,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                new->thread = t;
        }
 
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+               ret = -ENOMEM;
+               goto out_thread;
+       }
+
        /*
         * The following block of code has to be executed atomically
         */
@@ -735,29 +936,40 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 * Can't share interrupts unless both agree to and are
                 * the same type (level, edge, polarity). So both flag
                 * fields must have IRQF_SHARED set and the bits which
-                * set the trigger type must match.
+                * set the trigger type must match. Also all must
+                * agree on ONESHOT.
                 */
                if (!((old->flags & new->flags) & IRQF_SHARED) ||
-                   ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
+                   ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
+                   ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
                        old_name = old->name;
                        goto mismatch;
                }
 
-#if defined(CONFIG_IRQ_PER_CPU)
                /* All handlers must agree on per-cpuness */
                if ((old->flags & IRQF_PERCPU) !=
                    (new->flags & IRQF_PERCPU))
                        goto mismatch;
-#endif
 
                /* add new interrupt at end of irq queue */
                do {
+                       thread_mask |= old->thread_mask;
                        old_ptr = &old->next;
                        old = *old_ptr;
                } while (old);
                shared = 1;
        }
 
+       /*
+        * Setup the thread mask for this irqaction. Unlikely to have
+        * 32 resp 64 irqs sharing one line, but who knows.
+        */
+       if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
+               ret = -EBUSY;
+               goto out_mask;
+       }
+       new->thread_mask = 1 << ffz(thread_mask);
+
        if (!shared) {
                irq_chip_set_defaults(desc->irq_data.chip);
 
@@ -769,42 +981,44 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                                        new->flags & IRQF_TRIGGER_MASK);
 
                        if (ret)
-                               goto out_thread;
-               } else
-                       compat_irq_chip_set_default_handler(desc);
-#if defined(CONFIG_IRQ_PER_CPU)
-               if (new->flags & IRQF_PERCPU)
-                       desc->status |= IRQ_PER_CPU;
-#endif
+                               goto out_mask;
+               }
 
-               desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
-                                 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
+               desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
+                                 IRQS_INPROGRESS | IRQS_ONESHOT | \
+                                 IRQS_WAITING);
+
+               if (new->flags & IRQF_PERCPU) {
+                       irqd_set(&desc->irq_data, IRQD_PER_CPU);
+                       irq_settings_set_per_cpu(desc);
+               }
 
                if (new->flags & IRQF_ONESHOT)
-                       desc->status |= IRQ_ONESHOT;
+                       desc->istate |= IRQS_ONESHOT;
 
-               if (!(desc->status & IRQ_NOAUTOEN)) {
-                       desc->depth = 0;
-                       desc->status &= ~IRQ_DISABLED;
-                       desc->irq_data.chip->irq_startup(&desc->irq_data);
-               } else
+               if (irq_settings_can_autoenable(desc))
+                       irq_startup(desc);
+               else
                        /* Undo nested disables: */
                        desc->depth = 1;
 
                /* Exclude IRQ from balancing if requested */
-               if (new->flags & IRQF_NOBALANCING)
-                       desc->status |= IRQ_NO_BALANCING;
+               if (new->flags & IRQF_NOBALANCING) {
+                       irq_settings_set_no_balancing(desc);
+                       irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+               }
 
                /* Set default affinity mask once everything is setup */
-               setup_affinity(irq, desc);
-
-       } else if ((new->flags & IRQF_TRIGGER_MASK)
-                       && (new->flags & IRQF_TRIGGER_MASK)
-                               != (desc->status & IRQ_TYPE_SENSE_MASK)) {
-               /* hope the handler works with the actual trigger mode... */
-               pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
-                               irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
-                               (int)(new->flags & IRQF_TRIGGER_MASK));
+               setup_affinity(irq, desc, mask);
+
+       } else if (new->flags & IRQF_TRIGGER_MASK) {
+               unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
+               unsigned int omsk = irq_settings_get_trigger_mask(desc);
+
+               if (nmsk != omsk)
+                       /* hope the handler works with current  trigger mode */
+                       pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
+                                  irq, nmsk, omsk);
        }
 
        new->irq = irq;
@@ -818,8 +1032,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
         * Check whether we disabled the irq via the spurious handler
         * before. Reenable it and give it another chance.
         */
-       if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
-               desc->status &= ~IRQ_SPURIOUS_DISABLED;
+       if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
+               desc->istate &= ~IRQS_SPURIOUS_DISABLED;
                __enable_irq(desc, irq, false);
        }
 
@@ -849,6 +1063,9 @@ mismatch:
 #endif
        ret = -EBUSY;
 
+out_mask:
+       free_cpumask_var(mask);
+
 out_thread:
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        if (new->thread) {
@@ -871,9 +1088,14 @@ out_thread:
  */
 int setup_irq(unsigned int irq, struct irqaction *act)
 {
+       int retval;
        struct irq_desc *desc = irq_to_desc(irq);
 
-       return __setup_irq(irq, desc, act);
+       chip_bus_lock(desc);
+       retval = __setup_irq(irq, desc, act);
+       chip_bus_sync_unlock(desc);
+
+       return retval;
 }
 EXPORT_SYMBOL_GPL(setup_irq);
 
@@ -924,13 +1146,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
 #endif
 
        /* If this was the last handler, shut down the IRQ line: */
-       if (!desc->action) {
-               desc->status |= IRQ_DISABLED;
-               if (desc->irq_data.chip->irq_shutdown)
-                       desc->irq_data.chip->irq_shutdown(&desc->irq_data);
-               else
-                       desc->irq_data.chip->irq_disable(&desc->irq_data);
-       }
+       if (!desc->action)
+               irq_shutdown(desc);
 
 #ifdef CONFIG_SMP
        /* make sure affinity_hint is cleaned up */
@@ -1004,6 +1221,11 @@ void free_irq(unsigned int irq, void *dev_id)
        if (!desc)
                return;
 
+#ifdef CONFIG_SMP
+       if (WARN_ON(desc->affinity_notify))
+               desc->affinity_notify = NULL;
+#endif
+
        chip_bus_lock(desc);
        kfree(__free_irq(irq, dev_id));
        chip_bus_sync_unlock(desc);
@@ -1074,7 +1296,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        if (!desc)
                return -EINVAL;
 
-       if (desc->status & IRQ_NOREQUEST)
+       if (!irq_settings_can_request(desc))
                return -EINVAL;
 
        if (!handler) {
@@ -1100,7 +1322,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        if (retval)
                kfree(action);
 
-#ifdef CONFIG_DEBUG_SHIRQ
+#ifdef CONFIG_DEBUG_SHIRQ_FIXME
        if (!retval && (irqflags & IRQF_SHARED)) {
                /*
                 * It's a shared IRQ -- the driver ought to be prepared for it
@@ -1149,7 +1371,7 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
        if (!desc)
                return -EINVAL;
 
-       if (desc->status & IRQ_NESTED_THREAD) {
+       if (irq_settings_is_nested_thread(desc)) {
                ret = request_threaded_irq(irq, NULL, handler,
                                           flags, name, dev_id);
                return !ret ? IRQC_IS_NESTED : ret;
index 441fd629ff04b41db0bb044f06163f5dd8c08c58..ec4806d4778b2143c03f94c1d067f56647928e06 100644 (file)
@@ -4,23 +4,23 @@
 
 #include "internals.h"
 
-void move_masked_irq(int irq)
+void irq_move_masked_irq(struct irq_data *idata)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
-       struct irq_chip *chip = desc->irq_data.chip;
+       struct irq_desc *desc = irq_data_to_desc(idata);
+       struct irq_chip *chip = idata->chip;
 
-       if (likely(!(desc->status & IRQ_MOVE_PENDING)))
+       if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
                return;
 
        /*
         * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
         */
-       if (CHECK_IRQ_PER_CPU(desc->status)) {
+       if (!irqd_can_balance(&desc->irq_data)) {
                WARN_ON(1);
                return;
        }
 
-       desc->status &= ~IRQ_MOVE_PENDING;
+       irqd_clr_move_pending(&desc->irq_data);
 
        if (unlikely(cpumask_empty(desc->pending_mask)))
                return;
@@ -53,15 +53,20 @@ void move_masked_irq(int irq)
        cpumask_clear(desc->pending_mask);
 }
 
-void move_native_irq(int irq)
+void move_masked_irq(int irq)
+{
+       irq_move_masked_irq(irq_get_irq_data(irq));
+}
+
+void irq_move_irq(struct irq_data *idata)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_desc *desc = irq_data_to_desc(idata);
        bool masked;
 
-       if (likely(!(desc->status & IRQ_MOVE_PENDING)))
+       if (likely(!irqd_is_setaffinity_pending(idata)))
                return;
 
-       if (unlikely(desc->status & IRQ_DISABLED))
+       if (unlikely(desc->istate & IRQS_DISABLED))
                return;
 
        /*
@@ -69,10 +74,15 @@ void move_native_irq(int irq)
         * threaded interrupt with ONESHOT set, we can end up with an
         * interrupt storm.
         */
-       masked = desc->status & IRQ_MASKED;
+       masked = desc->istate & IRQS_MASKED;
        if (!masked)
-               desc->irq_data.chip->irq_mask(&desc->irq_data);
-       move_masked_irq(irq);
+               idata->chip->irq_mask(idata);
+       irq_move_masked_irq(idata);
        if (!masked)
-               desc->irq_data.chip->irq_unmask(&desc->irq_data);
+               idata->chip->irq_unmask(idata);
+}
+
+void move_native_irq(int irq)
+{
+       irq_move_irq(irq_get_irq_data(irq));
 }
index 0d4005d85b03243746f6c45d54cf1600bc3e5acf..f76fc00c98776746a12fb5dce9111cb4033b1553 100644 (file)
@@ -18,7 +18,7 @@
  * During system-wide suspend or hibernation device drivers need to be prevented
  * from receiving interrupts and this function is provided for this purpose.
  * It marks all interrupt lines in use, except for the timer ones, as disabled
- * and sets the IRQ_SUSPENDED flag for each of them.
+ * and sets the IRQS_SUSPENDED flag for each of them.
  */
 void suspend_device_irqs(void)
 {
@@ -34,7 +34,7 @@ void suspend_device_irqs(void)
        }
 
        for_each_irq_desc(irq, desc)
-               if (desc->status & IRQ_SUSPENDED)
+               if (desc->istate & IRQS_SUSPENDED)
                        synchronize_irq(irq);
 }
 EXPORT_SYMBOL_GPL(suspend_device_irqs);
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(suspend_device_irqs);
  * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
  *
  * Enable all interrupt lines previously disabled by suspend_device_irqs() that
- * have the IRQ_SUSPENDED flag set.
+ * have the IRQS_SUSPENDED flag set.
  */
 void resume_device_irqs(void)
 {
@@ -53,9 +53,6 @@ void resume_device_irqs(void)
        for_each_irq_desc(irq, desc) {
                unsigned long flags;
 
-               if (!(desc->status & IRQ_SUSPENDED))
-                       continue;
-
                raw_spin_lock_irqsave(&desc->lock, flags);
                __enable_irq(desc, irq, true);
                raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -71,9 +68,24 @@ int check_wakeup_irqs(void)
        struct irq_desc *desc;
        int irq;
 
-       for_each_irq_desc(irq, desc)
-               if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING))
-                       return -EBUSY;
+       for_each_irq_desc(irq, desc) {
+               if (irqd_is_wakeup_set(&desc->irq_data)) {
+                       if (desc->istate & IRQS_PENDING)
+                               return -EBUSY;
+                       continue;
+               }
+               /*
+                * Check the non wakeup interrupts whether they need
+                * to be masked before finally going into suspend
+                * state. That's for hardware which has no wakeup
+                * source configuration facility. The chip
+                * implementation indicates that with
+                * IRQCHIP_MASK_ON_SUSPEND.
+                */
+               if (desc->istate & IRQS_SUSPENDED &&
+                   irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
+                       mask_irq(desc);
+       }
 
        return 0;
 }
index 6c8a2a9f8a7bf802f72527831e0f5f32b773e964..4cc2e5ed0bec2e1228f741af005d581808a6c13a 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
 
 #include "internals.h"
 
@@ -24,7 +25,7 @@ static int irq_affinity_proc_show(struct seq_file *m, void *v)
        const struct cpumask *mask = desc->irq_data.affinity;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
-       if (desc->status & IRQ_MOVE_PENDING)
+       if (irqd_is_setaffinity_pending(&desc->irq_data))
                mask = desc->pending_mask;
 #endif
        seq_cpumask(m, mask);
@@ -65,8 +66,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
        cpumask_var_t new_value;
        int err;
 
-       if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity ||
-           irq_balancing_disabled(irq))
+       if (!irq_can_set_affinity(irq) || no_irq_affinity)
                return -EIO;
 
        if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
@@ -89,7 +89,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
        if (!cpumask_intersects(new_value, cpu_online_mask)) {
                /* Special case for empty set - allow the architecture
                   code to set default SMP affinity. */
-               err = irq_select_affinity_usr(irq) ? -EINVAL : count;
+               err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
        } else {
                irq_set_affinity(irq, new_value);
                err = count;
@@ -357,3 +357,65 @@ void init_irq_proc(void)
        }
 }
 
+#ifdef CONFIG_GENERIC_IRQ_SHOW
+
+int __weak arch_show_interrupts(struct seq_file *p, int prec)
+{
+       return 0;
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+       static int prec;
+
+       unsigned long flags, any_count = 0;
+       int i = *(loff_t *) v, j;
+       struct irqaction *action;
+       struct irq_desc *desc;
+
+       if (i > nr_irqs)
+               return 0;
+
+       if (i == nr_irqs)
+               return arch_show_interrupts(p, prec);
+
+       /* print header and calculate the width of the first column */
+       if (i == 0) {
+               for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
+                       j *= 10;
+
+               seq_printf(p, "%*s", prec + 8, "");
+               for_each_online_cpu(j)
+                       seq_printf(p, "CPU%-8d", j);
+               seq_putc(p, '\n');
+       }
+
+       desc = irq_to_desc(i);
+       if (!desc)
+               return 0;
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       for_each_online_cpu(j)
+               any_count |= kstat_irqs_cpu(i, j);
+       action = desc->action;
+       if (!action && !any_count)
+               goto out;
+
+       seq_printf(p, "%*d: ", prec, i);
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+       seq_printf(p, " %8s", desc->irq_data.chip->name);
+       seq_printf(p, "-%-8s", desc->name);
+
+       if (action) {
+               seq_printf(p, "  %s", action->name);
+               while ((action = action->next) != NULL)
+                       seq_printf(p, ", %s", action->name);
+       }
+
+       seq_putc(p, '\n');
+out:
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       return 0;
+}
+#endif
index 891115a929aa1dfe223b01c5f50efbc0ec35a2f7..ad683a99b1ec434681892a4a37bf9bcb67cc9763 100644 (file)
@@ -23,7 +23,7 @@
 #ifdef CONFIG_HARDIRQS_SW_RESEND
 
 /* Bitmap to handle software resend of interrupts: */
-static DECLARE_BITMAP(irqs_resend, NR_IRQS);
+static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
 
 /*
  * Run software resends of IRQ's
@@ -55,20 +55,19 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
  */
 void check_irq_resend(struct irq_desc *desc, unsigned int irq)
 {
-       unsigned int status = desc->status;
-
-       /*
-        * Make sure the interrupt is enabled, before resending it:
-        */
-       desc->irq_data.chip->irq_enable(&desc->irq_data);
-
        /*
         * We do not resend level type interrupts. Level type
         * interrupts are resent by hardware when they are still
         * active.
         */
-       if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
-               desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
+       if (irq_settings_is_level(desc))
+               return;
+       if (desc->istate & IRQS_REPLAY)
+               return;
+       if (desc->istate & IRQS_PENDING) {
+               irq_compat_clr_pending(desc);
+               desc->istate &= ~IRQS_PENDING;
+               desc->istate |= IRQS_REPLAY;
 
                if (!desc->irq_data.chip->irq_retrigger ||
                    !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
new file mode 100644 (file)
index 0000000..0227ad3
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Internal header to deal with irq_desc->status which will be renamed
+ * to irq_desc->settings.
+ */
+enum {
+       _IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS,
+       _IRQ_PER_CPU            = IRQ_PER_CPU,
+       _IRQ_LEVEL              = IRQ_LEVEL,
+       _IRQ_NOPROBE            = IRQ_NOPROBE,
+       _IRQ_NOREQUEST          = IRQ_NOREQUEST,
+       _IRQ_NOAUTOEN           = IRQ_NOAUTOEN,
+       _IRQ_MOVE_PCNTXT        = IRQ_MOVE_PCNTXT,
+       _IRQ_NO_BALANCING       = IRQ_NO_BALANCING,
+       _IRQ_NESTED_THREAD      = IRQ_NESTED_THREAD,
+       _IRQF_MODIFY_MASK       = IRQF_MODIFY_MASK,
+};
+
+#define IRQ_INPROGRESS         GOT_YOU_MORON
+#define IRQ_REPLAY             GOT_YOU_MORON
+#define IRQ_WAITING            GOT_YOU_MORON
+#define IRQ_DISABLED           GOT_YOU_MORON
+#define IRQ_PENDING            GOT_YOU_MORON
+#define IRQ_MASKED             GOT_YOU_MORON
+#define IRQ_WAKEUP             GOT_YOU_MORON
+#define IRQ_MOVE_PENDING       GOT_YOU_MORON
+#define IRQ_PER_CPU            GOT_YOU_MORON
+#define IRQ_NO_BALANCING       GOT_YOU_MORON
+#define IRQ_AFFINITY_SET       GOT_YOU_MORON
+#define IRQ_LEVEL              GOT_YOU_MORON
+#define IRQ_NOPROBE            GOT_YOU_MORON
+#define IRQ_NOREQUEST          GOT_YOU_MORON
+#define IRQ_NOAUTOEN           GOT_YOU_MORON
+#define IRQ_NESTED_THREAD      GOT_YOU_MORON
+#undef IRQF_MODIFY_MASK
+#define IRQF_MODIFY_MASK       GOT_YOU_MORON
+
+static inline void
+irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
+{
+       desc->status &= ~(clr & _IRQF_MODIFY_MASK);
+       desc->status |= (set & _IRQF_MODIFY_MASK);
+}
+
+static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
+{
+       return desc->status & _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
+{
+       desc->status |= _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
+{
+       desc->status |= _IRQ_NO_BALANCING;
+}
+
+static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
+{
+       return desc->status & _IRQ_NO_BALANCING;
+}
+
+static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
+{
+       return desc->status & IRQ_TYPE_SENSE_MASK;
+}
+
+static inline void
+irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask)
+{
+       desc->status &= ~IRQ_TYPE_SENSE_MASK;
+       desc->status |= mask & IRQ_TYPE_SENSE_MASK;
+}
+
+static inline bool irq_settings_is_level(struct irq_desc *desc)
+{
+       return desc->status & _IRQ_LEVEL;
+}
+
+static inline void irq_settings_clr_level(struct irq_desc *desc)
+{
+       desc->status &= ~_IRQ_LEVEL;
+}
+
+static inline void irq_settings_set_level(struct irq_desc *desc)
+{
+       desc->status |= _IRQ_LEVEL;
+}
+
+static inline bool irq_settings_can_request(struct irq_desc *desc)
+{
+       return !(desc->status & _IRQ_NOREQUEST);
+}
+
+static inline void irq_settings_clr_norequest(struct irq_desc *desc)
+{
+       desc->status &= ~_IRQ_NOREQUEST;
+}
+
+static inline void irq_settings_set_norequest(struct irq_desc *desc)
+{
+       desc->status |= _IRQ_NOREQUEST;
+}
+
+static inline bool irq_settings_can_probe(struct irq_desc *desc)
+{
+       return !(desc->status & _IRQ_NOPROBE);
+}
+
+static inline void irq_settings_clr_noprobe(struct irq_desc *desc)
+{
+       desc->status &= ~_IRQ_NOPROBE;
+}
+
+static inline void irq_settings_set_noprobe(struct irq_desc *desc)
+{
+       desc->status |= _IRQ_NOPROBE;
+}
+
+static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc)
+{
+       return desc->status & _IRQ_MOVE_PCNTXT;
+}
+
+static inline bool irq_settings_can_autoenable(struct irq_desc *desc)
+{
+       return !(desc->status & _IRQ_NOAUTOEN);
+}
+
+static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
+{
+       return desc->status & _IRQ_NESTED_THREAD;
+}
+
+/* Nothing should touch desc->status from now on */
+#undef status
+#define status         USE_THE_PROPER_WRAPPERS_YOU_MORON
index 3089d3b9d5f3912643d49bf46960fe7938a46e6a..dd586ebf9c8c277216c1aa42ac2e63dcabc314c1 100644 (file)
@@ -21,70 +21,94 @@ static int irqfixup __read_mostly;
 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
 static void poll_spurious_irqs(unsigned long dummy);
 static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
+static int irq_poll_cpu;
+static atomic_t irq_poll_active;
+
+/*
+ * We wait here for a poller to finish.
+ *
+ * If the poll runs on this CPU, then we yell loudly and return
+ * false. That will leave the interrupt line disabled in the worst
+ * case, but it should never happen.
+ *
+ * We wait until the poller is done and then recheck disabled and
+ * action (about to be disabled). Only if it's still active, we return
+ * true and let the handler run.
+ */
+bool irq_wait_for_poll(struct irq_desc *desc)
+{
+       if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
+                     "irq poll in progress on cpu %d for irq %d\n",
+                     smp_processor_id(), desc->irq_data.irq))
+               return false;
+
+#ifdef CONFIG_SMP
+       do {
+               raw_spin_unlock(&desc->lock);
+               while (desc->istate & IRQS_INPROGRESS)
+                       cpu_relax();
+               raw_spin_lock(&desc->lock);
+       } while (desc->istate & IRQS_INPROGRESS);
+       /* Might have been disabled in meantime */
+       return !(desc->istate & IRQS_DISABLED) && desc->action;
+#else
+       return false;
+#endif
+}
+
 
 /*
  * Recovery handler for misrouted interrupts.
  */
-static int try_one_irq(int irq, struct irq_desc *desc)
+static int try_one_irq(int irq, struct irq_desc *desc, bool force)
 {
+       irqreturn_t ret = IRQ_NONE;
        struct irqaction *action;
-       int ok = 0, work = 0;
 
        raw_spin_lock(&desc->lock);
-       /* Already running on another processor */
-       if (desc->status & IRQ_INPROGRESS) {
-               /*
-                * Already running: If it is shared get the other
-                * CPU to go looking for our mystery interrupt too
-                */
-               if (desc->action && (desc->action->flags & IRQF_SHARED))
-                       desc->status |= IRQ_PENDING;
-               raw_spin_unlock(&desc->lock);
-               return ok;
-       }
-       /* Honour the normal IRQ locking */
-       desc->status |= IRQ_INPROGRESS;
-       action = desc->action;
-       raw_spin_unlock(&desc->lock);
 
-       while (action) {
-               /* Only shared IRQ handlers are safe to call */
-               if (action->flags & IRQF_SHARED) {
-                       if (action->handler(irq, action->dev_id) ==
-                               IRQ_HANDLED)
-                               ok = 1;
-               }
-               action = action->next;
-       }
-       local_irq_disable();
-       /* Now clean up the flags */
-       raw_spin_lock(&desc->lock);
-       action = desc->action;
+       /* PER_CPU and nested thread interrupts are never polled */
+       if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc))
+               goto out;
 
        /*
-        * While we were looking for a fixup someone queued a real
-        * IRQ clashing with our walk:
+        * Do not poll disabled interrupts unless the spurious
+        * disabled poller asks explicitely.
         */
-       while ((desc->status & IRQ_PENDING) && action) {
+       if ((desc->istate & IRQS_DISABLED) && !force)
+               goto out;
+
+       /*
+        * All handlers must agree on IRQF_SHARED, so we test just the
+        * first. Check for action->next as well.
+        */
+       action = desc->action;
+       if (!action || !(action->flags & IRQF_SHARED) ||
+           (action->flags & __IRQF_TIMER) || !action->next)
+               goto out;
+
+       /* Already running on another processor */
+       if (desc->istate & IRQS_INPROGRESS) {
                /*
-                * Perform real IRQ processing for the IRQ we deferred
+                * Already running: If it is shared get the other
+                * CPU to go looking for our mystery interrupt too
                 */
-               work = 1;
-               raw_spin_unlock(&desc->lock);
-               handle_IRQ_event(irq, action);
-               raw_spin_lock(&desc->lock);
-               desc->status &= ~IRQ_PENDING;
+               irq_compat_set_pending(desc);
+               desc->istate |= IRQS_PENDING;
+               goto out;
        }
-       desc->status &= ~IRQ_INPROGRESS;
-       /*
-        * If we did actual work for the real IRQ line we must let the
-        * IRQ controller clean up too
-        */
-       if (work)
-               irq_end(irq, desc);
-       raw_spin_unlock(&desc->lock);
 
-       return ok;
+       /* Mark it poll in progress */
+       desc->istate |= IRQS_POLL_INPROGRESS;
+       do {
+               if (handle_irq_event(desc) == IRQ_HANDLED)
+                       ret = IRQ_HANDLED;
+               action = desc->action;
+       } while ((desc->istate & IRQS_PENDING) && action);
+       desc->istate &= ~IRQS_POLL_INPROGRESS;
+out:
+       raw_spin_unlock(&desc->lock);
+       return ret == IRQ_HANDLED;
 }
 
 static int misrouted_irq(int irq)
@@ -92,6 +116,11 @@ static int misrouted_irq(int irq)
        struct irq_desc *desc;
        int i, ok = 0;
 
+       if (atomic_inc_return(&irq_poll_active) == 1)
+               goto out;
+
+       irq_poll_cpu = smp_processor_id();
+
        for_each_irq_desc(i, desc) {
                if (!i)
                         continue;
@@ -99,9 +128,11 @@ static int misrouted_irq(int irq)
                if (i == irq)   /* Already tried */
                        continue;
 
-               if (try_one_irq(i, desc))
+               if (try_one_irq(i, desc, false))
                        ok = 1;
        }
+out:
+       atomic_dec(&irq_poll_active);
        /* So the caller can adjust the irq error counts */
        return ok;
 }
@@ -111,23 +142,28 @@ static void poll_spurious_irqs(unsigned long dummy)
        struct irq_desc *desc;
        int i;
 
+       if (atomic_inc_return(&irq_poll_active) != 1)
+               goto out;
+       irq_poll_cpu = smp_processor_id();
+
        for_each_irq_desc(i, desc) {
-               unsigned int status;
+               unsigned int state;
 
                if (!i)
                         continue;
 
                /* Racy but it doesn't matter */
-               status = desc->status;
+               state = desc->istate;
                barrier();
-               if (!(status & IRQ_SPURIOUS_DISABLED))
+               if (!(state & IRQS_SPURIOUS_DISABLED))
                        continue;
 
                local_irq_disable();
-               try_one_irq(i, desc);
+               try_one_irq(i, desc, true);
                local_irq_enable();
        }
-
+out:
+       atomic_dec(&irq_poll_active);
        mod_timer(&poll_spurious_irq_timer,
                  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
 }
@@ -139,15 +175,13 @@ static void poll_spurious_irqs(unsigned long dummy)
  *
  * (The other 100-of-100,000 interrupts may have been a correctly
  *  functioning device sharing an IRQ with the failing one)
- *
- * Called under desc->lock
  */
-
 static void
 __report_bad_irq(unsigned int irq, struct irq_desc *desc,
                 irqreturn_t action_ret)
 {
        struct irqaction *action;
+       unsigned long flags;
 
        if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
                printk(KERN_ERR "irq event %d: bogus return value %x\n",
@@ -159,6 +193,13 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
        dump_stack();
        printk(KERN_ERR "handlers:\n");
 
+       /*
+        * We need to take desc->lock here. note_interrupt() is called
+        * w/o desc->lock held, but IRQ_PROGRESS set. We might race
+        * with something else removing an action. It's ok to take
+        * desc->lock here. See synchronize_irq().
+        */
+       raw_spin_lock_irqsave(&desc->lock, flags);
        action = desc->action;
        while (action) {
                printk(KERN_ERR "[<%p>]", action->handler);
@@ -167,6 +208,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
                printk("\n");
                action = action->next;
        }
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
 
 static void
@@ -218,6 +260,9 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
 void note_interrupt(unsigned int irq, struct irq_desc *desc,
                    irqreturn_t action_ret)
 {
+       if (desc->istate & IRQS_POLL_INPROGRESS)
+               return;
+
        if (unlikely(action_ret != IRQ_HANDLED)) {
                /*
                 * If we are seeing only the odd spurious IRQ caused by
@@ -254,9 +299,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
                 * Now kill the IRQ
                 */
                printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
-               desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
+               desc->istate |= IRQS_SPURIOUS_DISABLED;
                desc->depth++;
-               desc->irq_data.chip->irq_disable(&desc->irq_data);
+               irq_disable(desc);
 
                mod_timer(&poll_spurious_irq_timer,
                          jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
index 999835b6112bc0705e6a511a63889a89c920b9d7..ed253aa24ba491d5861671aeedb636a9a9ad8a55 100644 (file)
 
 #include <asm/irq_regs.h>
 
+struct remote_function_call {
+       struct task_struct *p;
+       int (*func)(void *info);
+       void *info;
+       int ret;
+};
+
+static void remote_function(void *data)
+{
+       struct remote_function_call *tfc = data;
+       struct task_struct *p = tfc->p;
+
+       if (p) {
+               tfc->ret = -EAGAIN;
+               if (task_cpu(p) != smp_processor_id() || !task_curr(p))
+                       return;
+       }
+
+       tfc->ret = tfc->func(tfc->info);
+}
+
+/**
+ * task_function_call - call a function on the cpu on which a task runs
+ * @p:         the task to evaluate
+ * @func:      the function to be called
+ * @info:      the function call argument
+ *
+ * Calls the function @func when the task is currently running. This might
+ * be on the current CPU, which just calls the function directly
+ *
+ * returns: @func return value, or
+ *         -ESRCH  - when the process isn't running
+ *         -EAGAIN - when the process moved away
+ */
+static int
+task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
+{
+       struct remote_function_call data = {
+               .p = p,
+               .func = func,
+               .info = info,
+               .ret = -ESRCH, /* No such (running) process */
+       };
+
+       if (task_curr(p))
+               smp_call_function_single(task_cpu(p), remote_function, &data, 1);
+
+       return data.ret;
+}
+
+/**
+ * cpu_function_call - call a function on the cpu
+ * @func:      the function to be called
+ * @info:      the function call argument
+ *
+ * Calls the function @func on the remote cpu.
+ *
+ * returns: @func return value or -ENXIO when the cpu is offline
+ */
+static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
+{
+       struct remote_function_call data = {
+               .p = NULL,
+               .func = func,
+               .info = info,
+               .ret = -ENXIO, /* No such CPU */
+       };
+
+       smp_call_function_single(cpu, remote_function, &data, 1);
+
+       return data.ret;
+}
+
+#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
+                      PERF_FLAG_FD_OUTPUT  |\
+                      PERF_FLAG_PID_CGROUP)
+
 enum event_type_t {
        EVENT_FLEXIBLE = 0x1,
        EVENT_PINNED = 0x2,
        EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
 };
 
-atomic_t perf_task_events __read_mostly;
+/*
+ * perf_sched_events : >0 events exist
+ * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
+ */
+atomic_t perf_sched_events __read_mostly;
+static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
+
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
 static atomic_t nr_task_events __read_mostly;
@@ -67,7 +150,24 @@ int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
 /*
  * max perf event sample rate
  */
-int sysctl_perf_event_sample_rate __read_mostly = 100000;
+#define DEFAULT_MAX_SAMPLE_RATE 100000
+int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
+static int max_samples_per_tick __read_mostly =
+       DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
+
+int perf_proc_update_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+
+       if (ret || !write)
+               return ret;
+
+       max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
+
+       return 0;
+}
 
 static atomic64_t perf_event_id;
 
@@ -75,7 +175,11 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
                              enum event_type_t event_type);
 
 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
-                            enum event_type_t event_type);
+                            enum event_type_t event_type,
+                            struct task_struct *task);
+
+static void update_context_time(struct perf_event_context *ctx);
+static u64 perf_event_time(struct perf_event *event);
 
 void __weak perf_event_print_debug(void)       { }
 
@@ -89,6 +193,360 @@ static inline u64 perf_clock(void)
        return local_clock();
 }
 
+static inline struct perf_cpu_context *
+__get_cpu_context(struct perf_event_context *ctx)
+{
+       return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
+}
+
+#ifdef CONFIG_CGROUP_PERF
+
+/*
+ * Must ensure cgroup is pinned (css_get) before calling
+ * this function. In other words, we cannot call this function
+ * if there is no cgroup event for the current CPU context.
+ */
+static inline struct perf_cgroup *
+perf_cgroup_from_task(struct task_struct *task)
+{
+       return container_of(task_subsys_state(task, perf_subsys_id),
+                       struct perf_cgroup, css);
+}
+
+static inline bool
+perf_cgroup_match(struct perf_event *event)
+{
+       struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+
+       return !event->cgrp || event->cgrp == cpuctx->cgrp;
+}
+
+static inline void perf_get_cgroup(struct perf_event *event)
+{
+       css_get(&event->cgrp->css);
+}
+
+static inline void perf_put_cgroup(struct perf_event *event)
+{
+       css_put(&event->cgrp->css);
+}
+
+static inline void perf_detach_cgroup(struct perf_event *event)
+{
+       perf_put_cgroup(event);
+       event->cgrp = NULL;
+}
+
+static inline int is_cgroup_event(struct perf_event *event)
+{
+       return event->cgrp != NULL;
+}
+
+static inline u64 perf_cgroup_event_time(struct perf_event *event)
+{
+       struct perf_cgroup_info *t;
+
+       t = per_cpu_ptr(event->cgrp->info, event->cpu);
+       return t->time;
+}
+
+static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
+{
+       struct perf_cgroup_info *info;
+       u64 now;
+
+       now = perf_clock();
+
+       info = this_cpu_ptr(cgrp->info);
+
+       info->time += now - info->timestamp;
+       info->timestamp = now;
+}
+
+static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
+{
+       struct perf_cgroup *cgrp_out = cpuctx->cgrp;
+       if (cgrp_out)
+               __update_cgrp_time(cgrp_out);
+}
+
+static inline void update_cgrp_time_from_event(struct perf_event *event)
+{
+       struct perf_cgroup *cgrp;
+
+       /*
+        * ensure we access cgroup data only when needed and
+        * when we know the cgroup is pinned (css_get)
+        */
+       if (!is_cgroup_event(event))
+               return;
+
+       cgrp = perf_cgroup_from_task(current);
+       /*
+        * Do not update time when cgroup is not active
+        */
+       if (cgrp == event->cgrp)
+               __update_cgrp_time(event->cgrp);
+}
+
+static inline void
+perf_cgroup_set_timestamp(struct task_struct *task,
+                         struct perf_event_context *ctx)
+{
+       struct perf_cgroup *cgrp;
+       struct perf_cgroup_info *info;
+
+       /*
+        * ctx->lock held by caller
+        * ensure we do not access cgroup data
+        * unless we have the cgroup pinned (css_get)
+        */
+       if (!task || !ctx->nr_cgroups)
+               return;
+
+       cgrp = perf_cgroup_from_task(task);
+       info = this_cpu_ptr(cgrp->info);
+       info->timestamp = ctx->timestamp;
+}
+
+#define PERF_CGROUP_SWOUT      0x1 /* cgroup switch out every event */
+#define PERF_CGROUP_SWIN       0x2 /* cgroup switch in events based on task */
+
+/*
+ * reschedule events based on the cgroup constraint of task.
+ *
+ * mode SWOUT : schedule out everything
+ * mode SWIN : schedule in based on cgroup for next
+ */
+void perf_cgroup_switch(struct task_struct *task, int mode)
+{
+       struct perf_cpu_context *cpuctx;
+       struct pmu *pmu;
+       unsigned long flags;
+
+       /*
+        * disable interrupts to avoid geting nr_cgroup
+        * changes via __perf_event_disable(). Also
+        * avoids preemption.
+        */
+       local_irq_save(flags);
+
+       /*
+        * we reschedule only in the presence of cgroup
+        * constrained events.
+        */
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+
+               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+
+               perf_pmu_disable(cpuctx->ctx.pmu);
+
+               /*
+                * perf_cgroup_events says at least one
+                * context on this CPU has cgroup events.
+                *
+                * ctx->nr_cgroups reports the number of cgroup
+                * events for a context.
+                */
+               if (cpuctx->ctx.nr_cgroups > 0) {
+
+                       if (mode & PERF_CGROUP_SWOUT) {
+                               cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+                               /*
+                                * must not be done before ctxswout due
+                                * to event_filter_match() in event_sched_out()
+                                */
+                               cpuctx->cgrp = NULL;
+                       }
+
+                       if (mode & PERF_CGROUP_SWIN) {
+                               /* set cgrp before ctxsw in to
+                                * allow event_filter_match() to not
+                                * have to pass task around
+                                */
+                               cpuctx->cgrp = perf_cgroup_from_task(task);
+                               cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
+                       }
+               }
+
+               perf_pmu_enable(cpuctx->ctx.pmu);
+       }
+
+       rcu_read_unlock();
+
+       local_irq_restore(flags);
+}
+
+static inline void perf_cgroup_sched_out(struct task_struct *task)
+{
+       perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+}
+
+static inline void perf_cgroup_sched_in(struct task_struct *task)
+{
+       perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+}
+
+static inline int perf_cgroup_connect(int fd, struct perf_event *event,
+                                     struct perf_event_attr *attr,
+                                     struct perf_event *group_leader)
+{
+       struct perf_cgroup *cgrp;
+       struct cgroup_subsys_state *css;
+       struct file *file;
+       int ret = 0, fput_needed;
+
+       file = fget_light(fd, &fput_needed);
+       if (!file)
+               return -EBADF;
+
+       css = cgroup_css_from_dir(file, perf_subsys_id);
+       if (IS_ERR(css)) {
+               ret = PTR_ERR(css);
+               goto out;
+       }
+
+       cgrp = container_of(css, struct perf_cgroup, css);
+       event->cgrp = cgrp;
+
+       /* must be done before we fput() the file */
+       perf_get_cgroup(event);
+
+       /*
+        * all events in a group must monitor
+        * the same cgroup because a task belongs
+        * to only one perf cgroup at a time
+        */
+       if (group_leader && group_leader->cgrp != cgrp) {
+               perf_detach_cgroup(event);
+               ret = -EINVAL;
+       }
+out:
+       fput_light(file, fput_needed);
+       return ret;
+}
+
+static inline void
+perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
+{
+       struct perf_cgroup_info *t;
+       t = per_cpu_ptr(event->cgrp->info, event->cpu);
+       event->shadow_ctx_time = now - t->timestamp;
+}
+
+static inline void
+perf_cgroup_defer_enabled(struct perf_event *event)
+{
+       /*
+        * when the current task's perf cgroup does not match
+        * the event's, we need to remember to call the
+        * perf_mark_enable() function the first time a task with
+        * a matching perf cgroup is scheduled in.
+        */
+       if (is_cgroup_event(event) && !perf_cgroup_match(event))
+               event->cgrp_defer_enabled = 1;
+}
+
+static inline void
+perf_cgroup_mark_enabled(struct perf_event *event,
+                        struct perf_event_context *ctx)
+{
+       struct perf_event *sub;
+       u64 tstamp = perf_event_time(event);
+
+       if (!event->cgrp_defer_enabled)
+               return;
+
+       event->cgrp_defer_enabled = 0;
+
+       event->tstamp_enabled = tstamp - event->total_time_enabled;
+       list_for_each_entry(sub, &event->sibling_list, group_entry) {
+               if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
+                       sub->tstamp_enabled = tstamp - sub->total_time_enabled;
+                       sub->cgrp_defer_enabled = 0;
+               }
+       }
+}
+#else /* !CONFIG_CGROUP_PERF */
+
+static inline bool
+perf_cgroup_match(struct perf_event *event)
+{
+       return true;
+}
+
+static inline void perf_detach_cgroup(struct perf_event *event)
+{}
+
+static inline int is_cgroup_event(struct perf_event *event)
+{
+       return 0;
+}
+
+static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
+{
+       return 0;
+}
+
+static inline void update_cgrp_time_from_event(struct perf_event *event)
+{
+}
+
+static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
+{
+}
+
+static inline void perf_cgroup_sched_out(struct task_struct *task)
+{
+}
+
+static inline void perf_cgroup_sched_in(struct task_struct *task)
+{
+}
+
+static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
+                                     struct perf_event_attr *attr,
+                                     struct perf_event *group_leader)
+{
+       return -EINVAL;
+}
+
+static inline void
+perf_cgroup_set_timestamp(struct task_struct *task,
+                         struct perf_event_context *ctx)
+{
+}
+
+void
+perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
+{
+}
+
+static inline void
+perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
+{
+}
+
+static inline u64 perf_cgroup_event_time(struct perf_event *event)
+{
+       return 0;
+}
+
+static inline void
+perf_cgroup_defer_enabled(struct perf_event *event)
+{
+}
+
+static inline void
+perf_cgroup_mark_enabled(struct perf_event *event,
+                        struct perf_event_context *ctx)
+{
+}
+#endif
+
 void perf_pmu_disable(struct pmu *pmu)
 {
        int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -254,7 +712,6 @@ static void perf_unpin_context(struct perf_event_context *ctx)
        raw_spin_lock_irqsave(&ctx->lock, flags);
        --ctx->pin_count;
        raw_spin_unlock_irqrestore(&ctx->lock, flags);
-       put_ctx(ctx);
 }
 
 /*
@@ -271,6 +728,10 @@ static void update_context_time(struct perf_event_context *ctx)
 static u64 perf_event_time(struct perf_event *event)
 {
        struct perf_event_context *ctx = event->ctx;
+
+       if (is_cgroup_event(event))
+               return perf_cgroup_event_time(event);
+
        return ctx ? ctx->time : 0;
 }
 
@@ -285,9 +746,20 @@ static void update_event_times(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE ||
            event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
                return;
-
-       if (ctx->is_active)
+       /*
+        * in cgroup mode, time_enabled represents
+        * the time the event was enabled AND active
+        * tasks were in the monitored cgroup. This is
+        * independent of the activity of the context as
+        * there may be a mix of cgroup and non-cgroup events.
+        *
+        * That is why we treat cgroup events differently
+        * here.
+        */
+       if (is_cgroup_event(event))
                run_end = perf_event_time(event);
+       else if (ctx->is_active)
+               run_end = ctx->time;
        else
                run_end = event->tstamp_stopped;
 
@@ -299,6 +771,7 @@ static void update_event_times(struct perf_event *event)
                run_end = perf_event_time(event);
 
        event->total_time_running = run_end - event->tstamp_running;
+
 }
 
 /*
@@ -347,6 +820,9 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
                list_add_tail(&event->group_entry, list);
        }
 
+       if (is_cgroup_event(event))
+               ctx->nr_cgroups++;
+
        list_add_rcu(&event->event_entry, &ctx->event_list);
        if (!ctx->nr_events)
                perf_pmu_rotate_start(ctx->pmu);
@@ -473,6 +949,9 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 
        event->attach_state &= ~PERF_ATTACH_CONTEXT;
 
+       if (is_cgroup_event(event))
+               ctx->nr_cgroups--;
+
        ctx->nr_events--;
        if (event->attr.inherit_stat)
                ctx->nr_stat--;
@@ -544,7 +1023,8 @@ out:
 static inline int
 event_filter_match(struct perf_event *event)
 {
-       return event->cpu == -1 || event->cpu == smp_processor_id();
+       return (event->cpu == -1 || event->cpu == smp_processor_id())
+           && perf_cgroup_match(event);
 }
 
 static void
@@ -562,7 +1042,7 @@ event_sched_out(struct perf_event *event,
         */
        if (event->state == PERF_EVENT_STATE_INACTIVE
            && !event_filter_match(event)) {
-               delta = ctx->time - event->tstamp_stopped;
+               delta = tstamp - event->tstamp_stopped;
                event->tstamp_running += delta;
                event->tstamp_stopped = tstamp;
        }
@@ -606,47 +1086,30 @@ group_sched_out(struct perf_event *group_event,
                cpuctx->exclusive = 0;
 }
 
-static inline struct perf_cpu_context *
-__get_cpu_context(struct perf_event_context *ctx)
-{
-       return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
-}
-
 /*
  * Cross CPU call to remove a performance event
  *
  * We disable the event on the hardware level first. After that we
  * remove it from the context list.
  */
-static void __perf_event_remove_from_context(void *info)
+static int __perf_remove_from_context(void *info)
 {
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
-       /*
-        * If this is a task context, we need to check whether it is
-        * the current task context of this cpu. If not it has been
-        * scheduled out before the smp call arrived.
-        */
-       if (ctx->task && cpuctx->task_ctx != ctx)
-               return;
-
        raw_spin_lock(&ctx->lock);
-
        event_sched_out(event, cpuctx, ctx);
-
        list_del_event(event, ctx);
-
        raw_spin_unlock(&ctx->lock);
+
+       return 0;
 }
 
 
 /*
  * Remove the event from a task's (or a CPU's) list of events.
  *
- * Must be called with ctx->mutex held.
- *
  * CPU events are removed with a smp call. For task events we only
  * call when the task is on a CPU.
  *
@@ -657,49 +1120,48 @@ static void __perf_event_remove_from_context(void *info)
  * When called from perf_event_exit_task, it's OK because the
  * context has been detached from its task.
  */
-static void perf_event_remove_from_context(struct perf_event *event)
+static void perf_remove_from_context(struct perf_event *event)
 {
        struct perf_event_context *ctx = event->ctx;
        struct task_struct *task = ctx->task;
 
+       lockdep_assert_held(&ctx->mutex);
+
        if (!task) {
                /*
                 * Per cpu events are removed via an smp call and
                 * the removal is always successful.
                 */
-               smp_call_function_single(event->cpu,
-                                        __perf_event_remove_from_context,
-                                        event, 1);
+               cpu_function_call(event->cpu, __perf_remove_from_context, event);
                return;
        }
 
 retry:
-       task_oncpu_function_call(task, __perf_event_remove_from_context,
-                                event);
+       if (!task_function_call(task, __perf_remove_from_context, event))
+               return;
 
        raw_spin_lock_irq(&ctx->lock);
        /*
-        * If the context is active we need to retry the smp call.
+        * If we failed to find a running task, but find the context active now
+        * that we've acquired the ctx->lock, retry.
         */
-       if (ctx->nr_active && !list_empty(&event->group_entry)) {
+       if (ctx->is_active) {
                raw_spin_unlock_irq(&ctx->lock);
                goto retry;
        }
 
        /*
-        * The lock prevents that this context is scheduled in so we
-        * can remove the event safely, if the call above did not
-        * succeed.
+        * Since the task isn't running, its safe to remove the event, us
+        * holding the ctx->lock ensures the task won't get scheduled in.
         */
-       if (!list_empty(&event->group_entry))
-               list_del_event(event, ctx);
+       list_del_event(event, ctx);
        raw_spin_unlock_irq(&ctx->lock);
 }
 
 /*
  * Cross CPU call to disable a performance event
  */
-static void __perf_event_disable(void *info)
+static int __perf_event_disable(void *info)
 {
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
@@ -708,9 +1170,12 @@ static void __perf_event_disable(void *info)
        /*
         * If this is a per-task event, need to check whether this
         * event's task is the current task on this cpu.
+        *
+        * Can trigger due to concurrent perf_event_context_sched_out()
+        * flipping contexts around.
         */
        if (ctx->task && cpuctx->task_ctx != ctx)
-               return;
+               return -EINVAL;
 
        raw_spin_lock(&ctx->lock);
 
@@ -720,6 +1185,7 @@ static void __perf_event_disable(void *info)
         */
        if (event->state >= PERF_EVENT_STATE_INACTIVE) {
                update_context_time(ctx);
+               update_cgrp_time_from_event(event);
                update_group_times(event);
                if (event == event->group_leader)
                        group_sched_out(event, cpuctx, ctx);
@@ -729,6 +1195,8 @@ static void __perf_event_disable(void *info)
        }
 
        raw_spin_unlock(&ctx->lock);
+
+       return 0;
 }
 
 /*
@@ -753,13 +1221,13 @@ void perf_event_disable(struct perf_event *event)
                /*
                 * Disable the event on the cpu that it's on
                 */
-               smp_call_function_single(event->cpu, __perf_event_disable,
-                                        event, 1);
+               cpu_function_call(event->cpu, __perf_event_disable, event);
                return;
        }
 
 retry:
-       task_oncpu_function_call(task, __perf_event_disable, event);
+       if (!task_function_call(task, __perf_event_disable, event))
+               return;
 
        raw_spin_lock_irq(&ctx->lock);
        /*
@@ -767,6 +1235,11 @@ retry:
         */
        if (event->state == PERF_EVENT_STATE_ACTIVE) {
                raw_spin_unlock_irq(&ctx->lock);
+               /*
+                * Reload the task pointer, it might have been changed by
+                * a concurrent perf_event_context_sched_out().
+                */
+               task = ctx->task;
                goto retry;
        }
 
@@ -778,10 +1251,48 @@ retry:
                update_group_times(event);
                event->state = PERF_EVENT_STATE_OFF;
        }
-
        raw_spin_unlock_irq(&ctx->lock);
 }
 
+static void perf_set_shadow_time(struct perf_event *event,
+                                struct perf_event_context *ctx,
+                                u64 tstamp)
+{
+       /*
+        * use the correct time source for the time snapshot
+        *
+        * We could get by without this by leveraging the
+        * fact that to get to this function, the caller
+        * has most likely already called update_context_time()
+        * and update_cgrp_time_xx() and thus both timestamp
+        * are identical (or very close). Given that tstamp is,
+        * already adjusted for cgroup, we could say that:
+        *    tstamp - ctx->timestamp
+        * is equivalent to
+        *    tstamp - cgrp->timestamp.
+        *
+        * Then, in perf_output_read(), the calculation would
+        * work with no changes because:
+        * - event is guaranteed scheduled in
+        * - no scheduled out in between
+        * - thus the timestamp would be the same
+        *
+        * But this is a bit hairy.
+        *
+        * So instead, we have an explicit cgroup call to remain
+        * within the time time source all along. We believe it
+        * is cleaner and simpler to understand.
+        */
+       if (is_cgroup_event(event))
+               perf_cgroup_set_shadow_time(event, tstamp);
+       else
+               event->shadow_ctx_time = tstamp - ctx->timestamp;
+}
+
+#define MAX_INTERRUPTS (~0ULL)
+
+static void perf_log_throttle(struct perf_event *event, int enable);
+
 static int
 event_sched_in(struct perf_event *event,
                 struct perf_cpu_context *cpuctx,
@@ -794,6 +1305,17 @@ event_sched_in(struct perf_event *event,
 
        event->state = PERF_EVENT_STATE_ACTIVE;
        event->oncpu = smp_processor_id();
+
+       /*
+        * Unthrottle events, since we scheduled we might have missed several
+        * ticks already, also for a heavily scheduling task there is little
+        * guarantee it'll get a tick in a timely manner.
+        */
+       if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
+               perf_log_throttle(event, 1);
+               event->hw.interrupts = 0;
+       }
+
        /*
         * The new state must be visible before we turn it on in the hardware:
         */
@@ -807,7 +1329,7 @@ event_sched_in(struct perf_event *event,
 
        event->tstamp_running += tstamp - event->tstamp_stopped;
 
-       event->shadow_ctx_time = tstamp - ctx->timestamp;
+       perf_set_shadow_time(event, ctx, tstamp);
 
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
@@ -928,12 +1450,15 @@ static void add_event_to_ctx(struct perf_event *event,
        event->tstamp_stopped = tstamp;
 }
 
+static void perf_event_context_sched_in(struct perf_event_context *ctx,
+                                       struct task_struct *tsk);
+
 /*
  * Cross CPU call to install and enable a performance event
  *
  * Must be called with ctx->mutex held
  */
-static void __perf_install_in_context(void *info)
+static int  __perf_install_in_context(void *info)
 {
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
@@ -942,21 +1467,22 @@ static void __perf_install_in_context(void *info)
        int err;
 
        /*
-        * If this is a task context, we need to check whether it is
-        * the current task context of this cpu. If not it has been
-        * scheduled out before the smp call arrived.
-        * Or possibly this is the right context but it isn't
-        * on this cpu because it had no events.
+        * In case we're installing a new context to an already running task,
+        * could also happen before perf_event_task_sched_in() on architectures
+        * which do context switches with IRQs enabled.
         */
-       if (ctx->task && cpuctx->task_ctx != ctx) {
-               if (cpuctx->task_ctx || ctx->task != current)
-                       return;
-               cpuctx->task_ctx = ctx;
-       }
+       if (ctx->task && !cpuctx->task_ctx)
+               perf_event_context_sched_in(ctx, ctx->task);
 
        raw_spin_lock(&ctx->lock);
        ctx->is_active = 1;
        update_context_time(ctx);
+       /*
+        * update cgrp time only if current cgrp
+        * matches event->cgrp. Must be done before
+        * calling add_event_to_ctx()
+        */
+       update_cgrp_time_from_event(event);
 
        add_event_to_ctx(event, ctx);
 
@@ -997,6 +1523,8 @@ static void __perf_install_in_context(void *info)
 
 unlock:
        raw_spin_unlock(&ctx->lock);
+
+       return 0;
 }
 
 /*
@@ -1008,8 +1536,6 @@ unlock:
  * If the event is attached to a task which is on a CPU we use a smp
  * call to enable it in the task context. The task might have been
  * scheduled away, but we check this in the smp call again.
- *
- * Must be called with ctx->mutex held.
  */
 static void
 perf_install_in_context(struct perf_event_context *ctx,
@@ -1018,6 +1544,8 @@ perf_install_in_context(struct perf_event_context *ctx,
 {
        struct task_struct *task = ctx->task;
 
+       lockdep_assert_held(&ctx->mutex);
+
        event->ctx = ctx;
 
        if (!task) {
@@ -1025,31 +1553,29 @@ perf_install_in_context(struct perf_event_context *ctx,
                 * Per cpu events are installed via an smp call and
                 * the install is always successful.
                 */
-               smp_call_function_single(cpu, __perf_install_in_context,
-                                        event, 1);
+               cpu_function_call(cpu, __perf_install_in_context, event);
                return;
        }
 
 retry:
-       task_oncpu_function_call(task, __perf_install_in_context,
-                                event);
+       if (!task_function_call(task, __perf_install_in_context, event))
+               return;
 
        raw_spin_lock_irq(&ctx->lock);
        /*
-        * we need to retry the smp call.
+        * If we failed to find a running task, but find the context active now
+        * that we've acquired the ctx->lock, retry.
         */
-       if (ctx->is_active && list_empty(&event->group_entry)) {
+       if (ctx->is_active) {
                raw_spin_unlock_irq(&ctx->lock);
                goto retry;
        }
 
        /*
-        * The lock prevents that this context is scheduled in so we
-        * can add the event safely, if it the call above did not
-        * succeed.
+        * Since the task isn't running, its safe to add the event, us holding
+        * the ctx->lock ensures the task won't get scheduled in.
         */
-       if (list_empty(&event->group_entry))
-               add_event_to_ctx(event, ctx);
+       add_event_to_ctx(event, ctx);
        raw_spin_unlock_irq(&ctx->lock);
 }
 
@@ -1078,7 +1604,7 @@ static void __perf_event_mark_enabled(struct perf_event *event,
 /*
  * Cross CPU call to enable a performance event
  */
-static void __perf_event_enable(void *info)
+static int __perf_event_enable(void *info)
 {
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
@@ -1086,26 +1612,27 @@ static void __perf_event_enable(void *info)
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        int err;
 
-       /*
-        * If this is a per-task event, need to check whether this
-        * event's task is the current task on this cpu.
-        */
-       if (ctx->task && cpuctx->task_ctx != ctx) {
-               if (cpuctx->task_ctx || ctx->task != current)
-                       return;
-               cpuctx->task_ctx = ctx;
-       }
+       if (WARN_ON_ONCE(!ctx->is_active))
+               return -EINVAL;
 
        raw_spin_lock(&ctx->lock);
-       ctx->is_active = 1;
        update_context_time(ctx);
 
        if (event->state >= PERF_EVENT_STATE_INACTIVE)
                goto unlock;
+
+       /*
+        * set current task's cgroup time reference point
+        */
+       perf_cgroup_set_timestamp(current, ctx);
+
        __perf_event_mark_enabled(event, ctx);
 
-       if (!event_filter_match(event))
+       if (!event_filter_match(event)) {
+               if (is_cgroup_event(event))
+                       perf_cgroup_defer_enabled(event);
                goto unlock;
+       }
 
        /*
         * If the event is in a group and isn't the group leader,
@@ -1138,6 +1665,8 @@ static void __perf_event_enable(void *info)
 
 unlock:
        raw_spin_unlock(&ctx->lock);
+
+       return 0;
 }
 
 /*
@@ -1158,8 +1687,7 @@ void perf_event_enable(struct perf_event *event)
                /*
                 * Enable the event on the cpu that it's on
                 */
-               smp_call_function_single(event->cpu, __perf_event_enable,
-                                        event, 1);
+               cpu_function_call(event->cpu, __perf_event_enable, event);
                return;
        }
 
@@ -1178,8 +1706,15 @@ void perf_event_enable(struct perf_event *event)
                event->state = PERF_EVENT_STATE_OFF;
 
 retry:
+       if (!ctx->is_active) {
+               __perf_event_mark_enabled(event, ctx);
+               goto out;
+       }
+
        raw_spin_unlock_irq(&ctx->lock);
-       task_oncpu_function_call(task, __perf_event_enable, event);
+
+       if (!task_function_call(task, __perf_event_enable, event))
+               return;
 
        raw_spin_lock_irq(&ctx->lock);
 
@@ -1187,15 +1722,14 @@ retry:
         * If the context is active and the event is still off,
         * we need to retry the cross-call.
         */
-       if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
+       if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
+               /*
+                * task could have been flipped by a concurrent
+                * perf_event_context_sched_out()
+                */
+               task = ctx->task;
                goto retry;
-
-       /*
-        * Since we have the lock this context can't be scheduled
-        * in, so we can change the state safely.
-        */
-       if (event->state == PERF_EVENT_STATE_OFF)
-               __perf_event_mark_enabled(event, ctx);
+       }
 
 out:
        raw_spin_unlock_irq(&ctx->lock);
@@ -1227,6 +1761,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
        if (likely(!ctx->nr_events))
                goto out;
        update_context_time(ctx);
+       update_cgrp_time_from_cpuctx(cpuctx);
 
        if (!ctx->nr_active)
                goto out;
@@ -1339,8 +1874,8 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
        }
 }
 
-void perf_event_context_sched_out(struct task_struct *task, int ctxn,
-                                 struct task_struct *next)
+static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
+                                        struct task_struct *next)
 {
        struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
        struct perf_event_context *next_ctx;
@@ -1416,6 +1951,14 @@ void __perf_event_task_sched_out(struct task_struct *task,
 
        for_each_task_context_nr(ctxn)
                perf_event_context_sched_out(task, ctxn, next);
+
+       /*
+        * if cgroup events exist on this CPU, then we need
+        * to check if we have to switch out PMU state.
+        * cgroup event are system-wide mode only
+        */
+       if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
+               perf_cgroup_sched_out(task);
 }
 
 static void task_ctx_sched_out(struct perf_event_context *ctx,
@@ -1454,6 +1997,10 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
                if (!event_filter_match(event))
                        continue;
 
+               /* may need to reset tstamp_enabled */
+               if (is_cgroup_event(event))
+                       perf_cgroup_mark_enabled(event, ctx);
+
                if (group_can_go_on(event, cpuctx, 1))
                        group_sched_in(event, cpuctx, ctx);
 
@@ -1486,6 +2033,10 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
                if (!event_filter_match(event))
                        continue;
 
+               /* may need to reset tstamp_enabled */
+               if (is_cgroup_event(event))
+                       perf_cgroup_mark_enabled(event, ctx);
+
                if (group_can_go_on(event, cpuctx, can_add_hw)) {
                        if (group_sched_in(event, cpuctx, ctx))
                                can_add_hw = 0;
@@ -1496,15 +2047,19 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
 static void
 ctx_sched_in(struct perf_event_context *ctx,
             struct perf_cpu_context *cpuctx,
-            enum event_type_t event_type)
+            enum event_type_t event_type,
+            struct task_struct *task)
 {
+       u64 now;
+
        raw_spin_lock(&ctx->lock);
        ctx->is_active = 1;
        if (likely(!ctx->nr_events))
                goto out;
 
-       ctx->timestamp = perf_clock();
-
+       now = perf_clock();
+       ctx->timestamp = now;
+       perf_cgroup_set_timestamp(task, ctx);
        /*
         * First go through the list and put on any pinned groups
         * in order to give them the best chance of going on.
@@ -1521,11 +2076,12 @@ out:
 }
 
 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
-                            enum event_type_t event_type)
+                            enum event_type_t event_type,
+                            struct task_struct *task)
 {
        struct perf_event_context *ctx = &cpuctx->ctx;
 
-       ctx_sched_in(ctx, cpuctx, event_type);
+       ctx_sched_in(ctx, cpuctx, event_type, task);
 }
 
 static void task_ctx_sched_in(struct perf_event_context *ctx,
@@ -1533,15 +2089,16 @@ static void task_ctx_sched_in(struct perf_event_context *ctx,
 {
        struct perf_cpu_context *cpuctx;
 
-               cpuctx = __get_cpu_context(ctx);
+       cpuctx = __get_cpu_context(ctx);
        if (cpuctx->task_ctx == ctx)
                return;
 
-       ctx_sched_in(ctx, cpuctx, event_type);
+       ctx_sched_in(ctx, cpuctx, event_type, NULL);
        cpuctx->task_ctx = ctx;
 }
 
-void perf_event_context_sched_in(struct perf_event_context *ctx)
+static void perf_event_context_sched_in(struct perf_event_context *ctx,
+                                       struct task_struct *task)
 {
        struct perf_cpu_context *cpuctx;
 
@@ -1557,9 +2114,9 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
         */
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
-       ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
-       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
-       ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
+       ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
+       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
+       ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
 
        cpuctx->task_ctx = ctx;
 
@@ -1592,14 +2149,17 @@ void __perf_event_task_sched_in(struct task_struct *task)
                if (likely(!ctx))
                        continue;
 
-               perf_event_context_sched_in(ctx);
+               perf_event_context_sched_in(ctx, task);
        }
+       /*
+        * if cgroup events exist on this CPU, then we need
+        * to check if we have to switch in PMU state.
+        * cgroup event are system-wide mode only
+        */
+       if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
+               perf_cgroup_sched_in(task);
 }
 
-#define MAX_INTERRUPTS (~0ULL)
-
-static void perf_log_throttle(struct perf_event *event, int enable);
-
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
 {
        u64 frequency = event->attr.sample_freq;
@@ -1627,7 +2187,7 @@ static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
         * Reduce accuracy by one bit such that @a and @b converge
         * to a similar magnitude.
         */
-#define REDUCE_FLS(a, b)               \
+#define REDUCE_FLS(a, b)               \
 do {                                   \
        if (a##_fls > b##_fls) {        \
                a >>= 1;                \
@@ -1797,7 +2357,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
        if (ctx)
                rotate_ctx(ctx);
 
-       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
+       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
        if (ctx)
                task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
 
@@ -1876,7 +2436,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
 
        raw_spin_unlock(&ctx->lock);
 
-       perf_event_context_sched_in(ctx);
+       perf_event_context_sched_in(ctx, ctx->task);
 out:
        local_irq_restore(flags);
 }
@@ -1901,8 +2461,10 @@ static void __perf_event_read(void *info)
                return;
 
        raw_spin_lock(&ctx->lock);
-       if (ctx->is_active)
+       if (ctx->is_active) {
                update_context_time(ctx);
+               update_cgrp_time_from_event(event);
+       }
        update_event_times(event);
        if (event->state == PERF_EVENT_STATE_ACTIVE)
                event->pmu->read(event);
@@ -1933,8 +2495,10 @@ static u64 perf_event_read(struct perf_event *event)
                 * (e.g., thread is blocked), in that case
                 * we cannot update context time
                 */
-               if (ctx->is_active)
+               if (ctx->is_active) {
                        update_context_time(ctx);
+                       update_cgrp_time_from_event(event);
+               }
                update_event_times(event);
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        }
@@ -2213,6 +2777,9 @@ errout:
 
 }
 
+/*
+ * Returns a matching context with refcount and pincount.
+ */
 static struct perf_event_context *
 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
 {
@@ -2237,6 +2804,7 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
                ctx = &cpuctx->ctx;
                get_ctx(ctx);
+               ++ctx->pin_count;
 
                return ctx;
        }
@@ -2250,6 +2818,7 @@ retry:
        ctx = perf_lock_task_context(task, ctxn, &flags);
        if (ctx) {
                unclone_ctx(ctx);
+               ++ctx->pin_count;
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        }
 
@@ -2271,8 +2840,10 @@ retry:
                        err = -ESRCH;
                else if (task->perf_event_ctxp[ctxn])
                        err = -EAGAIN;
-               else
+               else {
+                       ++ctx->pin_count;
                        rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
+               }
                mutex_unlock(&task->perf_event_mutex);
 
                if (unlikely(err)) {
@@ -2312,7 +2883,7 @@ static void free_event(struct perf_event *event)
 
        if (!event->parent) {
                if (event->attach_state & PERF_ATTACH_TASK)
-                       jump_label_dec(&perf_task_events);
+                       jump_label_dec(&perf_sched_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_dec(&nr_mmap_events);
                if (event->attr.comm)
@@ -2321,6 +2892,10 @@ static void free_event(struct perf_event *event)
                        atomic_dec(&nr_task_events);
                if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
                        put_callchain_buffers();
+               if (is_cgroup_event(event)) {
+                       atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
+                       jump_label_dec(&perf_sched_events);
+               }
        }
 
        if (event->buffer) {
@@ -2328,6 +2903,9 @@ static void free_event(struct perf_event *event)
                event->buffer = NULL;
        }
 
+       if (is_cgroup_event(event))
+               perf_detach_cgroup(event);
+
        if (event->destroy)
                event->destroy(event);
 
@@ -4395,26 +4973,14 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
        if (unlikely(!is_sampling_event(event)))
                return 0;
 
-       if (!throttle) {
-               hwc->interrupts++;
-       } else {
-               if (hwc->interrupts != MAX_INTERRUPTS) {
-                       hwc->interrupts++;
-                       if (HZ * hwc->interrupts >
-                                       (u64)sysctl_perf_event_sample_rate) {
-                               hwc->interrupts = MAX_INTERRUPTS;
-                               perf_log_throttle(event, 0);
-                               ret = 1;
-                       }
-               } else {
-                       /*
-                        * Keep re-disabling events even though on the previous
-                        * pass we disabled it - just in case we raced with a
-                        * sched-in and the event got enabled again:
-                        */
+       if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
+               if (throttle) {
+                       hwc->interrupts = MAX_INTERRUPTS;
+                       perf_log_throttle(event, 0);
                        ret = 1;
                }
-       }
+       } else
+               hwc->interrupts++;
 
        if (event->attr.freq) {
                u64 now = perf_clock();
@@ -5051,6 +5617,10 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
        u64 period;
 
        event = container_of(hrtimer, struct perf_event, hw.hrtimer);
+
+       if (event->state != PERF_EVENT_STATE_ACTIVE)
+               return HRTIMER_NORESTART;
+
        event->pmu->read(event);
 
        perf_sample_data_init(&data, 0);
@@ -5077,9 +5647,6 @@ static void perf_swevent_start_hrtimer(struct perf_event *event)
        if (!is_sampling_event(event))
                return;
 
-       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       hwc->hrtimer.function = perf_swevent_hrtimer;
-
        period = local64_read(&hwc->period_left);
        if (period) {
                if (period < 0)
@@ -5106,6 +5673,30 @@ static void perf_swevent_cancel_hrtimer(struct perf_event *event)
        }
 }
 
+static void perf_swevent_init_hrtimer(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (!is_sampling_event(event))
+               return;
+
+       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hwc->hrtimer.function = perf_swevent_hrtimer;
+
+       /*
+        * Since hrtimers have a fixed rate, we can do a static freq->period
+        * mapping and avoid the whole period adjust feedback stuff.
+        */
+       if (event->attr.freq) {
+               long freq = event->attr.sample_freq;
+
+               event->attr.sample_period = NSEC_PER_SEC / freq;
+               hwc->sample_period = event->attr.sample_period;
+               local64_set(&hwc->period_left, hwc->sample_period);
+               event->attr.freq = 0;
+       }
+}
+
 /*
  * Software event: cpu wall time clock
  */
@@ -5158,6 +5749,8 @@ static int cpu_clock_event_init(struct perf_event *event)
        if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
                return -ENOENT;
 
+       perf_swevent_init_hrtimer(event);
+
        return 0;
 }
 
@@ -5213,16 +5806,9 @@ static void task_clock_event_del(struct perf_event *event, int flags)
 
 static void task_clock_event_read(struct perf_event *event)
 {
-       u64 time;
-
-       if (!in_nmi()) {
-               update_context_time(event->ctx);
-               time = event->ctx->time;
-       } else {
-               u64 now = perf_clock();
-               u64 delta = now - event->ctx->timestamp;
-               time = event->ctx->time + delta;
-       }
+       u64 now = perf_clock();
+       u64 delta = now - event->ctx->timestamp;
+       u64 time = event->ctx->time + delta;
 
        task_clock_event_update(event, time);
 }
@@ -5235,6 +5821,8 @@ static int task_clock_event_init(struct perf_event *event)
        if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
                return -ENOENT;
 
+       perf_swevent_init_hrtimer(event);
+
        return 0;
 }
 
@@ -5506,17 +6094,22 @@ struct pmu *perf_init_event(struct perf_event *event)
 {
        struct pmu *pmu = NULL;
        int idx;
+       int ret;
 
        idx = srcu_read_lock(&pmus_srcu);
 
        rcu_read_lock();
        pmu = idr_find(&pmu_idr, event->attr.type);
        rcu_read_unlock();
-       if (pmu)
+       if (pmu) {
+               ret = pmu->event_init(event);
+               if (ret)
+                       pmu = ERR_PTR(ret);
                goto unlock;
+       }
 
        list_for_each_entry_rcu(pmu, &pmus, entry) {
-               int ret = pmu->event_init(event);
+               ret = pmu->event_init(event);
                if (!ret)
                        goto unlock;
 
@@ -5642,7 +6235,7 @@ done:
 
        if (!event->parent) {
                if (event->attach_state & PERF_ATTACH_TASK)
-                       jump_label_inc(&perf_task_events);
+                       jump_label_inc(&perf_sched_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_inc(&nr_mmap_events);
                if (event->attr.comm)
@@ -5817,7 +6410,7 @@ SYSCALL_DEFINE5(perf_event_open,
        int err;
 
        /* for future expandability... */
-       if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
+       if (flags & ~PERF_FLAG_ALL)
                return -EINVAL;
 
        err = perf_copy_attr(attr_uptr, &attr);
@@ -5834,6 +6427,15 @@ SYSCALL_DEFINE5(perf_event_open,
                        return -EINVAL;
        }
 
+       /*
+        * In cgroup mode, the pid argument is used to pass the fd
+        * opened to the cgroup directory in cgroupfs. The cpu argument
+        * designates the cpu on which to monitor threads from that
+        * cgroup.
+        */
+       if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
+               return -EINVAL;
+
        event_fd = get_unused_fd_flags(O_RDWR);
        if (event_fd < 0)
                return event_fd;
@@ -5851,7 +6453,7 @@ SYSCALL_DEFINE5(perf_event_open,
                        group_leader = NULL;
        }
 
-       if (pid != -1) {
+       if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
                task = find_lively_task_by_vpid(pid);
                if (IS_ERR(task)) {
                        err = PTR_ERR(task);
@@ -5865,6 +6467,19 @@ SYSCALL_DEFINE5(perf_event_open,
                goto err_task;
        }
 
+       if (flags & PERF_FLAG_PID_CGROUP) {
+               err = perf_cgroup_connect(pid, event, &attr, group_leader);
+               if (err)
+                       goto err_alloc;
+               /*
+                * one more event:
+                * - that has cgroup constraint on event->cpu
+                * - that may need work on context switch
+                */
+               atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
+               jump_label_inc(&perf_sched_events);
+       }
+
        /*
         * Special case software events and allow them to be part of
         * any hardware group.
@@ -5950,10 +6565,10 @@ SYSCALL_DEFINE5(perf_event_open,
                struct perf_event_context *gctx = group_leader->ctx;
 
                mutex_lock(&gctx->mutex);
-               perf_event_remove_from_context(group_leader);
+               perf_remove_from_context(group_leader);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_event_remove_from_context(sibling);
+                       perf_remove_from_context(sibling);
                        put_ctx(gctx);
                }
                mutex_unlock(&gctx->mutex);
@@ -5976,6 +6591,7 @@ SYSCALL_DEFINE5(perf_event_open,
 
        perf_install_in_context(ctx, event, cpu);
        ++ctx->generation;
+       perf_unpin_context(ctx);
        mutex_unlock(&ctx->mutex);
 
        event->owner = current;
@@ -6001,6 +6617,7 @@ SYSCALL_DEFINE5(perf_event_open,
        return event_fd;
 
 err_context:
+       perf_unpin_context(ctx);
        put_ctx(ctx);
 err_alloc:
        free_event(event);
@@ -6051,6 +6668,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
        mutex_lock(&ctx->mutex);
        perf_install_in_context(ctx, event, cpu);
        ++ctx->generation;
+       perf_unpin_context(ctx);
        mutex_unlock(&ctx->mutex);
 
        return event;
@@ -6104,7 +6722,7 @@ __perf_event_exit_task(struct perf_event *child_event,
 {
        struct perf_event *parent_event;
 
-       perf_event_remove_from_context(child_event);
+       perf_remove_from_context(child_event);
 
        parent_event = child_event->parent;
        /*
@@ -6411,7 +7029,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
                return 0;
        }
 
-               child_ctx = child->perf_event_ctxp[ctxn];
+       child_ctx = child->perf_event_ctxp[ctxn];
        if (!child_ctx) {
                /*
                 * This is executed from the parent task context, so
@@ -6526,6 +7144,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
        mutex_unlock(&parent_ctx->mutex);
 
        perf_unpin_context(parent_ctx);
+       put_ctx(parent_ctx);
 
        return ret;
 }
@@ -6595,9 +7214,9 @@ static void __perf_event_exit_context(void *__info)
        perf_pmu_rotate_stop(ctx->pmu);
 
        list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
-               __perf_event_remove_from_context(event);
+               __perf_remove_from_context(event);
        list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
-               __perf_event_remove_from_context(event);
+               __perf_remove_from_context(event);
 }
 
 static void perf_event_exit_cpu_context(int cpu)
@@ -6721,3 +7340,83 @@ unlock:
        return ret;
 }
 device_initcall(perf_event_sysfs_init);
+
+#ifdef CONFIG_CGROUP_PERF
+static struct cgroup_subsys_state *perf_cgroup_create(
+       struct cgroup_subsys *ss, struct cgroup *cont)
+{
+       struct perf_cgroup *jc;
+
+       jc = kzalloc(sizeof(*jc), GFP_KERNEL);
+       if (!jc)
+               return ERR_PTR(-ENOMEM);
+
+       jc->info = alloc_percpu(struct perf_cgroup_info);
+       if (!jc->info) {
+               kfree(jc);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return &jc->css;
+}
+
+static void perf_cgroup_destroy(struct cgroup_subsys *ss,
+                               struct cgroup *cont)
+{
+       struct perf_cgroup *jc;
+       jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
+                         struct perf_cgroup, css);
+       free_percpu(jc->info);
+       kfree(jc);
+}
+
+static int __perf_cgroup_move(void *info)
+{
+       struct task_struct *task = info;
+       perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+       return 0;
+}
+
+static void perf_cgroup_move(struct task_struct *task)
+{
+       task_function_call(task, __perf_cgroup_move, task);
+}
+
+static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+               struct cgroup *old_cgrp, struct task_struct *task,
+               bool threadgroup)
+{
+       perf_cgroup_move(task);
+       if (threadgroup) {
+               struct task_struct *c;
+               rcu_read_lock();
+               list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
+                       perf_cgroup_move(c);
+               }
+               rcu_read_unlock();
+       }
+}
+
+static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
+               struct cgroup *old_cgrp, struct task_struct *task)
+{
+       /*
+        * cgroup_exit() is called in the copy_process() failure path.
+        * Ignore this case since the task hasn't ran yet, this avoids
+        * trying to poke a half freed task state from generic code.
+        */
+       if (!(task->flags & PF_EXITING))
+               return;
+
+       perf_cgroup_move(task);
+}
+
+struct cgroup_subsys perf_subsys = {
+       .name = "perf_event",
+       .subsys_id = perf_subsys_id,
+       .create = perf_cgroup_create,
+       .destroy = perf_cgroup_destroy,
+       .exit = perf_cgroup_exit,
+       .attach = perf_cgroup_attach,
+};
+#endif /* CONFIG_CGROUP_PERF */
index 05bb7173850e065a0899821d8e41c922d3548c07..67fea9d25d5559b1e00819bec189ea0be45c0657 100644 (file)
@@ -176,7 +176,8 @@ static inline cputime_t virt_ticks(struct task_struct *p)
        return p->utime;
 }
 
-int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
+static int
+posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
 {
        int error = check_clock(which_clock);
        if (!error) {
@@ -194,7 +195,8 @@ int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
        return error;
 }
 
-int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
+static int
+posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
 {
        /*
         * You can never reset a CPU clock, but we check for other errors
@@ -317,7 +319,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
 }
 
 
-int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
+static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
 {
        const pid_t pid = CPUCLOCK_PID(which_clock);
        int error = -EINVAL;
@@ -379,7 +381,7 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
  * This is called from sys_timer_create() and do_cpu_nanosleep() with the
  * new timer already all-zeros initialized.
  */
-int posix_cpu_timer_create(struct k_itimer *new_timer)
+static int posix_cpu_timer_create(struct k_itimer *new_timer)
 {
        int ret = 0;
        const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
@@ -425,7 +427,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
  * If we return TIMER_RETRY, it's necessary to release the timer's lock
  * and try again.  (This happens when the timer is in the middle of firing.)
  */
-int posix_cpu_timer_del(struct k_itimer *timer)
+static int posix_cpu_timer_del(struct k_itimer *timer)
 {
        struct task_struct *p = timer->it.cpu.task;
        int ret = 0;
@@ -665,8 +667,8 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
  * If we return TIMER_RETRY, it's necessary to release the timer's lock
  * and try again.  (This happens when the timer is in the middle of firing.)
  */
-int posix_cpu_timer_set(struct k_itimer *timer, int flags,
-                       struct itimerspec *new, struct itimerspec *old)
+static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
+                              struct itimerspec *new, struct itimerspec *old)
 {
        struct task_struct *p = timer->it.cpu.task;
        union cpu_time_count old_expires, new_expires, old_incr, val;
@@ -820,7 +822,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
        return ret;
 }
 
-void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
+static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
 {
        union cpu_time_count now;
        struct task_struct *p = timer->it.cpu.task;
@@ -1481,11 +1483,13 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
        return error;
 }
 
-int posix_cpu_nsleep(const clockid_t which_clock, int flags,
-                    struct timespec *rqtp, struct timespec __user *rmtp)
+static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
+
+static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
+                           struct timespec *rqtp, struct timespec __user *rmtp)
 {
        struct restart_block *restart_block =
-           &current_thread_info()->restart_block;
+               &current_thread_info()->restart_block;
        struct itimerspec it;
        int error;
 
@@ -1501,56 +1505,47 @@ int posix_cpu_nsleep(const clockid_t which_clock, int flags,
 
        if (error == -ERESTART_RESTARTBLOCK) {
 
-               if (flags & TIMER_ABSTIME)
+               if (flags & TIMER_ABSTIME)
                        return -ERESTARTNOHAND;
                /*
-                * Report back to the user the time still remaining.
-                */
-               if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
+                * Report back to the user the time still remaining.
+                */
+               if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
                        return -EFAULT;
 
                restart_block->fn = posix_cpu_nsleep_restart;
-               restart_block->arg0 = which_clock;
-               restart_block->arg1 = (unsigned long) rmtp;
-               restart_block->arg2 = rqtp->tv_sec;
-               restart_block->arg3 = rqtp->tv_nsec;
+               restart_block->nanosleep.index = which_clock;
+               restart_block->nanosleep.rmtp = rmtp;
+               restart_block->nanosleep.expires = timespec_to_ns(rqtp);
        }
        return error;
 }
 
-long posix_cpu_nsleep_restart(struct restart_block *restart_block)
+static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
 {
-       clockid_t which_clock = restart_block->arg0;
-       struct timespec __user *rmtp;
+       clockid_t which_clock = restart_block->nanosleep.index;
        struct timespec t;
        struct itimerspec it;
        int error;
 
-       rmtp = (struct timespec __user *) restart_block->arg1;
-       t.tv_sec = restart_block->arg2;
-       t.tv_nsec = restart_block->arg3;
+       t = ns_to_timespec(restart_block->nanosleep.expires);
 
-       restart_block->fn = do_no_restart_syscall;
        error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
 
        if (error == -ERESTART_RESTARTBLOCK) {
+               struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
                /*
-                * Report back to the user the time still remaining.
-                */
-               if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
+                * Report back to the user the time still remaining.
+                */
+               if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
                        return -EFAULT;
 
-               restart_block->fn = posix_cpu_nsleep_restart;
-               restart_block->arg0 = which_clock;
-               restart_block->arg1 = (unsigned long) rmtp;
-               restart_block->arg2 = t.tv_sec;
-               restart_block->arg3 = t.tv_nsec;
+               restart_block->nanosleep.expires = timespec_to_ns(&t);
        }
        return error;
 
 }
 
-
 #define PROCESS_CLOCK  MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
 #define THREAD_CLOCK   MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
 
@@ -1594,38 +1589,37 @@ static int thread_cpu_timer_create(struct k_itimer *timer)
        timer->it_clock = THREAD_CLOCK;
        return posix_cpu_timer_create(timer);
 }
-static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
-                             struct timespec *rqtp, struct timespec __user *rmtp)
-{
-       return -EINVAL;
-}
-static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
-{
-       return -EINVAL;
-}
+
+struct k_clock clock_posix_cpu = {
+       .clock_getres   = posix_cpu_clock_getres,
+       .clock_set      = posix_cpu_clock_set,
+       .clock_get      = posix_cpu_clock_get,
+       .timer_create   = posix_cpu_timer_create,
+       .nsleep         = posix_cpu_nsleep,
+       .nsleep_restart = posix_cpu_nsleep_restart,
+       .timer_set      = posix_cpu_timer_set,
+       .timer_del      = posix_cpu_timer_del,
+       .timer_get      = posix_cpu_timer_get,
+};
 
 static __init int init_posix_cpu_timers(void)
 {
        struct k_clock process = {
-               .clock_getres = process_cpu_clock_getres,
-               .clock_get = process_cpu_clock_get,
-               .clock_set = do_posix_clock_nosettime,
-               .timer_create = process_cpu_timer_create,
-               .nsleep = process_cpu_nsleep,
-               .nsleep_restart = process_cpu_nsleep_restart,
+               .clock_getres   = process_cpu_clock_getres,
+               .clock_get      = process_cpu_clock_get,
+               .timer_create   = process_cpu_timer_create,
+               .nsleep         = process_cpu_nsleep,
+               .nsleep_restart = process_cpu_nsleep_restart,
        };
        struct k_clock thread = {
-               .clock_getres = thread_cpu_clock_getres,
-               .clock_get = thread_cpu_clock_get,
-               .clock_set = do_posix_clock_nosettime,
-               .timer_create = thread_cpu_timer_create,
-               .nsleep = thread_cpu_nsleep,
-               .nsleep_restart = thread_cpu_nsleep_restart,
+               .clock_getres   = thread_cpu_clock_getres,
+               .clock_get      = thread_cpu_clock_get,
+               .timer_create   = thread_cpu_timer_create,
        };
        struct timespec ts;
 
-       register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
-       register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
+       posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
+       posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
 
        cputime_to_timespec(cputime_one_jiffy, &ts);
        onecputick = ts.tv_nsec;
index 93bd2eb2bc53efe76dd120501b0cbda115b71bfd..4c0124919f9a36fa905cc1266fa132de27f01550 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/init.h>
 #include <linux/compiler.h>
 #include <linux/idr.h>
+#include <linux/posix-clock.h>
 #include <linux/posix-timers.h>
 #include <linux/syscalls.h>
 #include <linux/wait.h>
@@ -81,6 +82,14 @@ static DEFINE_SPINLOCK(idr_lock);
 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
 #endif
 
+/*
+ * parisc wants ENOTSUP instead of EOPNOTSUPP
+ */
+#ifndef ENOTSUP
+# define ENANOSLEEP_NOTSUP EOPNOTSUPP
+#else
+# define ENANOSLEEP_NOTSUP ENOTSUP
+#endif
 
 /*
  * The timer ID is turned into a timer address by idr_find().
@@ -94,11 +103,7 @@ static DEFINE_SPINLOCK(idr_lock);
 /*
  * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
  *         to implement others.  This structure defines the various
- *         clocks and allows the possibility of adding others.  We
- *         provide an interface to add clocks to the table and expect
- *         the "arch" code to add at least one clock that is high
- *         resolution.  Here we define the standard CLOCK_REALTIME as a
- *         1/HZ resolution clock.
+ *         clocks.
  *
  * RESOLUTION: Clock resolution is used to round up timer and interval
  *         times, NOT to report clock times, which are reported with as
@@ -108,20 +113,13 @@ static DEFINE_SPINLOCK(idr_lock);
  *         necessary code is written.  The standard says we should say
  *         something about this issue in the documentation...
  *
- * FUNCTIONS: The CLOCKs structure defines possible functions to handle
- *         various clock functions.  For clocks that use the standard
- *         system timer code these entries should be NULL.  This will
- *         allow dispatch without the overhead of indirect function
- *         calls.  CLOCKS that depend on other sources (e.g. WWV or GPS)
- *         must supply functions here, even if the function just returns
- *         ENOSYS.  The standard POSIX timer management code assumes the
- *         following: 1.) The k_itimer struct (sched.h) is used for the
- *         timer.  2.) The list, it_lock, it_clock, it_id and it_pid
- *         fields are not modified by timer code.
+ * FUNCTIONS: The CLOCKs structure defines possible functions to
+ *         handle various clock functions.
  *
- *          At this time all functions EXCEPT clock_nanosleep can be
- *          redirected by the CLOCKS structure.  Clock_nanosleep is in
- *          there, but the code ignores it.
+ *         The standard POSIX timer management code assumes the
+ *         following: 1.) The k_itimer struct (sched.h) is used for
+ *         the timer.  2.) The list, it_lock, it_clock, it_id and
+ *         it_pid fields are not modified by timer code.
  *
  * Permissions: It is assumed that the clock_settime() function defined
  *         for each clock will take care of permission checks.  Some
@@ -138,6 +136,7 @@ static struct k_clock posix_clocks[MAX_CLOCKS];
  */
 static int common_nsleep(const clockid_t, int flags, struct timespec *t,
                         struct timespec __user *rmtp);
+static int common_timer_create(struct k_itimer *new_timer);
 static void common_timer_get(struct k_itimer *, struct itimerspec *);
 static int common_timer_set(struct k_itimer *, int,
                            struct itimerspec *, struct itimerspec *);
@@ -158,76 +157,24 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
        spin_unlock_irqrestore(&timr->it_lock, flags);
 }
 
-/*
- * Call the k_clock hook function if non-null, or the default function.
- */
-#define CLOCK_DISPATCH(clock, call, arglist) \
-       ((clock) < 0 ? posix_cpu_##call arglist : \
-        (posix_clocks[clock].call != NULL \
-         ? (*posix_clocks[clock].call) arglist : common_##call arglist))
-
-/*
- * Default clock hook functions when the struct k_clock passed
- * to register_posix_clock leaves a function pointer null.
- *
- * The function common_CALL is the default implementation for
- * the function pointer CALL in struct k_clock.
- */
-
-static inline int common_clock_getres(const clockid_t which_clock,
-                                     struct timespec *tp)
-{
-       tp->tv_sec = 0;
-       tp->tv_nsec = posix_clocks[which_clock].res;
-       return 0;
-}
-
-/*
- * Get real time for posix timers
- */
-static int common_clock_get(clockid_t which_clock, struct timespec *tp)
+/* Get clock_realtime */
+static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp)
 {
        ktime_get_real_ts(tp);
        return 0;
 }
 
-static inline int common_clock_set(const clockid_t which_clock,
-                                  struct timespec *tp)
+/* Set clock_realtime */
+static int posix_clock_realtime_set(const clockid_t which_clock,
+                                   const struct timespec *tp)
 {
        return do_sys_settimeofday(tp, NULL);
 }
 
-static int common_timer_create(struct k_itimer *new_timer)
-{
-       hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
-       return 0;
-}
-
-static int no_timer_create(struct k_itimer *new_timer)
-{
-       return -EOPNOTSUPP;
-}
-
-static int no_nsleep(const clockid_t which_clock, int flags,
-                    struct timespec *tsave, struct timespec __user *rmtp)
-{
-       return -EOPNOTSUPP;
-}
-
-/*
- * Return nonzero if we know a priori this clockid_t value is bogus.
- */
-static inline int invalid_clockid(const clockid_t which_clock)
+static int posix_clock_realtime_adj(const clockid_t which_clock,
+                                   struct timex *t)
 {
-       if (which_clock < 0)    /* CPU clock, posix_cpu_* will check it */
-               return 0;
-       if ((unsigned) which_clock >= MAX_CLOCKS)
-               return 1;
-       if (posix_clocks[which_clock].clock_getres != NULL)
-               return 0;
-       if (posix_clocks[which_clock].res != 0)
-               return 0;
-       return 1;
+       return do_adjtimex(t);
 }
 
 /*
@@ -240,7 +187,7 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
 }
 
 /*
- * Get monotonic time for posix timers
+ * Get monotonic-raw time for posix timers
  */
 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
 {
@@ -267,46 +214,70 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp
        *tp = ktime_to_timespec(KTIME_LOW_RES);
        return 0;
 }
+
+static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
+{
+       get_monotonic_boottime(tp);
+       return 0;
+}
+
+
 /*
  * Initialize everything, well, just everything in Posix clocks/timers ;)
  */
 static __init int init_posix_timers(void)
 {
        struct k_clock clock_realtime = {
-               .clock_getres = hrtimer_get_res,
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_clock_realtime_get,
+               .clock_set      = posix_clock_realtime_set,
+               .clock_adj      = posix_clock_realtime_adj,
+               .nsleep         = common_nsleep,
+               .nsleep_restart = hrtimer_nanosleep_restart,
+               .timer_create   = common_timer_create,
+               .timer_set      = common_timer_set,
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
        };
        struct k_clock clock_monotonic = {
-               .clock_getres = hrtimer_get_res,
-               .clock_get = posix_ktime_get_ts,
-               .clock_set = do_posix_clock_nosettime,
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_ktime_get_ts,
+               .nsleep         = common_nsleep,
+               .nsleep_restart = hrtimer_nanosleep_restart,
+               .timer_create   = common_timer_create,
+               .timer_set      = common_timer_set,
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
        };
        struct k_clock clock_monotonic_raw = {
-               .clock_getres = hrtimer_get_res,
-               .clock_get = posix_get_monotonic_raw,
-               .clock_set = do_posix_clock_nosettime,
-               .timer_create = no_timer_create,
-               .nsleep = no_nsleep,
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_get_monotonic_raw,
        };
        struct k_clock clock_realtime_coarse = {
-               .clock_getres = posix_get_coarse_res,
-               .clock_get = posix_get_realtime_coarse,
-               .clock_set = do_posix_clock_nosettime,
-               .timer_create = no_timer_create,
-               .nsleep = no_nsleep,
+               .clock_getres   = posix_get_coarse_res,
+               .clock_get      = posix_get_realtime_coarse,
        };
        struct k_clock clock_monotonic_coarse = {
-               .clock_getres = posix_get_coarse_res,
-               .clock_get = posix_get_monotonic_coarse,
-               .clock_set = do_posix_clock_nosettime,
-               .timer_create = no_timer_create,
-               .nsleep = no_nsleep,
+               .clock_getres   = posix_get_coarse_res,
+               .clock_get      = posix_get_monotonic_coarse,
+       };
+       struct k_clock clock_boottime = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_get_boottime,
+               .nsleep         = common_nsleep,
+               .nsleep_restart = hrtimer_nanosleep_restart,
+               .timer_create   = common_timer_create,
+               .timer_set      = common_timer_set,
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
        };
 
-       register_posix_clock(CLOCK_REALTIME, &clock_realtime);
-       register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
-       register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
-       register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
-       register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
+       posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
+       posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
+       posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
+       posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
+       posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
+       posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
 
        posix_timers_cache = kmem_cache_create("posix_timers_cache",
                                        sizeof (struct k_itimer), 0, SLAB_PANIC,
@@ -482,17 +453,29 @@ static struct pid *good_sigevent(sigevent_t * event)
        return task_pid(rtn);
 }
 
-void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
+void posix_timers_register_clock(const clockid_t clock_id,
+                                struct k_clock *new_clock)
 {
        if ((unsigned) clock_id >= MAX_CLOCKS) {
-               printk("POSIX clock register failed for clock_id %d\n",
+               printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n",
+                      clock_id);
+               return;
+       }
+
+       if (!new_clock->clock_get) {
+               printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n",
+                      clock_id);
+               return;
+       }
+       if (!new_clock->clock_getres) {
+               printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n",
                       clock_id);
                return;
        }
 
        posix_clocks[clock_id] = *new_clock;
 }
-EXPORT_SYMBOL_GPL(register_posix_clock);
+EXPORT_SYMBOL_GPL(posix_timers_register_clock);
 
 static struct k_itimer * alloc_posix_timer(void)
 {
@@ -523,19 +506,39 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
        kmem_cache_free(posix_timers_cache, tmr);
 }
 
+static struct k_clock *clockid_to_kclock(const clockid_t id)
+{
+       if (id < 0)
+               return (id & CLOCKFD_MASK) == CLOCKFD ?
+                       &clock_posix_dynamic : &clock_posix_cpu;
+
+       if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
+               return NULL;
+       return &posix_clocks[id];
+}
+
+static int common_timer_create(struct k_itimer *new_timer)
+{
+       hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
+       return 0;
+}
+
 /* Create a POSIX.1b interval timer. */
 
 SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
                struct sigevent __user *, timer_event_spec,
                timer_t __user *, created_timer_id)
 {
+       struct k_clock *kc = clockid_to_kclock(which_clock);
        struct k_itimer *new_timer;
        int error, new_timer_id;
        sigevent_t event;
        int it_id_set = IT_ID_NOT_SET;
 
-       if (invalid_clockid(which_clock))
+       if (!kc)
                return -EINVAL;
+       if (!kc->timer_create)
+               return -EOPNOTSUPP;
 
        new_timer = alloc_posix_timer();
        if (unlikely(!new_timer))
@@ -597,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
                goto out;
        }
 
-       error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
+       error = kc->timer_create(new_timer);
        if (error)
                goto out;
 
@@ -607,7 +610,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
        spin_unlock_irq(&current->sighand->siglock);
 
        return 0;
-       /*
+       /*
         * In the case of the timer belonging to another task, after
         * the task is unlocked, the timer is owned by the other task
         * and may cease to exist at any time.  Don't use or modify
@@ -709,22 +712,28 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
                struct itimerspec __user *, setting)
 {
-       struct k_itimer *timr;
        struct itimerspec cur_setting;
+       struct k_itimer *timr;
+       struct k_clock *kc;
        unsigned long flags;
+       int ret = 0;
 
        timr = lock_timer(timer_id, &flags);
        if (!timr)
                return -EINVAL;
 
-       CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting));
+       kc = clockid_to_kclock(timr->it_clock);
+       if (WARN_ON_ONCE(!kc || !kc->timer_get))
+               ret = -EINVAL;
+       else
+               kc->timer_get(timr, &cur_setting);
 
        unlock_timer(timr, flags);
 
-       if (copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
+       if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
                return -EFAULT;
 
-       return 0;
+       return ret;
 }
 
 /*
@@ -813,6 +822,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
        int error = 0;
        unsigned long flag;
        struct itimerspec *rtn = old_setting ? &old_spec : NULL;
+       struct k_clock *kc;
 
        if (!new_setting)
                return -EINVAL;
@@ -828,8 +838,11 @@ retry:
        if (!timr)
                return -EINVAL;
 
-       error = CLOCK_DISPATCH(timr->it_clock, timer_set,
-                              (timr, flags, &new_spec, rtn));
+       kc = clockid_to_kclock(timr->it_clock);
+       if (WARN_ON_ONCE(!kc || !kc->timer_set))
+               error = -EINVAL;
+       else
+               error = kc->timer_set(timr, flags, &new_spec, rtn);
 
        unlock_timer(timr, flag);
        if (error == TIMER_RETRY) {
@@ -844,7 +857,7 @@ retry:
        return error;
 }
 
-static inline int common_timer_del(struct k_itimer *timer)
+static int common_timer_del(struct k_itimer *timer)
 {
        timer->it.real.interval.tv64 = 0;
 
@@ -855,7 +868,11 @@ static inline int common_timer_del(struct k_itimer *timer)
 
 static inline int timer_delete_hook(struct k_itimer *timer)
 {
-       return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer));
+       struct k_clock *kc = clockid_to_kclock(timer->it_clock);
+
+       if (WARN_ON_ONCE(!kc || !kc->timer_del))
+               return -EINVAL;
+       return kc->timer_del(timer);
 }
 
 /* Delete a POSIX.1b interval timer. */
@@ -927,69 +944,76 @@ void exit_itimers(struct signal_struct *sig)
        }
 }
 
-/* Not available / possible... functions */
-int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
-{
-       return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);
-
-int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
-                              struct timespec *t, struct timespec __user *r)
-{
-#ifndef ENOTSUP
-       return -EOPNOTSUPP;     /* aka ENOTSUP in userland for POSIX */
-#else  /*  parisc does define it separately.  */
-       return -ENOTSUP;
-#endif
-}
-EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
-
 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
                const struct timespec __user *, tp)
 {
+       struct k_clock *kc = clockid_to_kclock(which_clock);
        struct timespec new_tp;
 
-       if (invalid_clockid(which_clock))
+       if (!kc || !kc->clock_set)
                return -EINVAL;
+
        if (copy_from_user(&new_tp, tp, sizeof (*tp)))
                return -EFAULT;
 
-       return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
+       return kc->clock_set(which_clock, &new_tp);
 }
 
 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
                struct timespec __user *,tp)
 {
+       struct k_clock *kc = clockid_to_kclock(which_clock);
        struct timespec kernel_tp;
        int error;
 
-       if (invalid_clockid(which_clock))
+       if (!kc)
                return -EINVAL;
-       error = CLOCK_DISPATCH(which_clock, clock_get,
-                              (which_clock, &kernel_tp));
+
+       error = kc->clock_get(which_clock, &kernel_tp);
+
        if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
                error = -EFAULT;
 
        return error;
+}
+
+SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
+               struct timex __user *, utx)
+{
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+       struct timex ktx;
+       int err;
+
+       if (!kc)
+               return -EINVAL;
+       if (!kc->clock_adj)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&ktx, utx, sizeof(ktx)))
+               return -EFAULT;
+
+       err = kc->clock_adj(which_clock, &ktx);
+
+       if (!err && copy_to_user(utx, &ktx, sizeof(ktx)))
+               return -EFAULT;
 
+       return err;
 }
 
 SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
                struct timespec __user *, tp)
 {
+       struct k_clock *kc = clockid_to_kclock(which_clock);
        struct timespec rtn_tp;
        int error;
 
-       if (invalid_clockid(which_clock))
+       if (!kc)
                return -EINVAL;
 
-       error = CLOCK_DISPATCH(which_clock, clock_getres,
-                              (which_clock, &rtn_tp));
+       error = kc->clock_getres(which_clock, &rtn_tp);
 
-       if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) {
+       if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
                error = -EFAULT;
-       }
 
        return error;
 }
@@ -1009,10 +1033,13 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
                const struct timespec __user *, rqtp,
                struct timespec __user *, rmtp)
 {
+       struct k_clock *kc = clockid_to_kclock(which_clock);
        struct timespec t;
 
-       if (invalid_clockid(which_clock))
+       if (!kc)
                return -EINVAL;
+       if (!kc->nsleep)
+               return -ENANOSLEEP_NOTSUP;
 
        if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
                return -EFAULT;
@@ -1020,27 +1047,20 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
        if (!timespec_valid(&t))
                return -EINVAL;
 
-       return CLOCK_DISPATCH(which_clock, nsleep,
-                             (which_clock, flags, &t, rmtp));
-}
-
-/*
- * nanosleep_restart for monotonic and realtime clocks
- */
-static int common_nsleep_restart(struct restart_block *restart_block)
-{
-       return hrtimer_nanosleep_restart(restart_block);
+       return kc->nsleep(which_clock, flags, &t, rmtp);
 }
 
 /*
  * This will restart clock_nanosleep. This is required only by
  * compat_clock_nanosleep_restart for now.
  */
-long
-clock_nanosleep_restart(struct restart_block *restart_block)
+long clock_nanosleep_restart(struct restart_block *restart_block)
 {
-       clockid_t which_clock = restart_block->arg0;
+       clockid_t which_clock = restart_block->nanosleep.index;
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+
+       if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
+               return -EINVAL;
 
-       return CLOCK_DISPATCH(which_clock, nsleep_restart,
-                             (restart_block));
+       return kc->nsleep_restart(restart_block);
 }
index 1708b1e2972d60df5ad7e461a57b4e8dbf0f103d..e2302e40b360006d671b4fe5cad9baf5de7419e3 100644 (file)
@@ -163,7 +163,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
        return !err;
 }
 
-int ptrace_attach(struct task_struct *task)
+static int ptrace_attach(struct task_struct *task)
 {
        int retval;
 
@@ -219,7 +219,7 @@ out:
  * Performs checks and sets PT_PTRACED.
  * Should be used by all ptrace implementations for PTRACE_TRACEME.
  */
-int ptrace_traceme(void)
+static int ptrace_traceme(void)
 {
        int ret = -EPERM;
 
@@ -293,7 +293,7 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
        return false;
 }
 
-int ptrace_detach(struct task_struct *child, unsigned int data)
+static int ptrace_detach(struct task_struct *child, unsigned int data)
 {
        bool dead = false;
 
index ddabb54bb5c81c70958a441d302e80543cbcae37..3c7cbc2c33befdd3c35fab2c2748862d21e7525d 100644 (file)
@@ -215,7 +215,6 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
        put_pid(waiter->deadlock_task_pid);
        TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
        TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
-       TRACE_WARN_ON(waiter->task);
        memset(waiter, 0x22, sizeof(*waiter));
 }
 
index 66cb89bc5ef1203f08b5d17d140b0786a8f80bbd..5c9ccd3809668b48b1f9bcdfd3098eef726101ed 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/sched.h>
-#include <linux/smp_lock.h>
 #include <linux/spinlock.h>
 #include <linux/sysdev.h>
 #include <linux/timer.h>
@@ -27,7 +26,6 @@ struct test_thread_data {
        int                     opcode;
        int                     opdata;
        int                     mutexes[MAX_RT_TEST_MUTEXES];
-       int                     bkl;
        int                     event;
        struct sys_device       sysdev;
 };
@@ -46,9 +44,8 @@ enum test_opcodes {
        RTTEST_LOCKINTNOWAIT,   /* 6 Lock interruptible no wait in wakeup, data = lockindex */
        RTTEST_LOCKCONT,        /* 7 Continue locking after the wakeup delay */
        RTTEST_UNLOCK,          /* 8 Unlock, data = lockindex */
-       RTTEST_LOCKBKL,         /* 9 Lock BKL */
-       RTTEST_UNLOCKBKL,       /* 10 Unlock BKL */
-       RTTEST_SIGNAL,          /* 11 Signal other test thread, data = thread id */
+       /* 9, 10 - reserved for BKL commemoration */
+       RTTEST_SIGNAL = 11,     /* 11 Signal other test thread, data = thread id */
        RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
        RTTEST_RESET = 99,      /* 99 Reset all pending operations */
 };
@@ -74,13 +71,6 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
                                td->mutexes[i] = 0;
                        }
                }
-
-               if (!lockwakeup && td->bkl == 4) {
-#ifdef CONFIG_LOCK_KERNEL
-                       unlock_kernel();
-#endif
-                       td->bkl = 0;
-               }
                return 0;
 
        case RTTEST_RESETEVENT:
@@ -131,25 +121,6 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
                td->mutexes[id] = 0;
                return 0;
 
-       case RTTEST_LOCKBKL:
-               if (td->bkl)
-                       return 0;
-               td->bkl = 1;
-#ifdef CONFIG_LOCK_KERNEL
-               lock_kernel();
-#endif
-               td->bkl = 4;
-               return 0;
-
-       case RTTEST_UNLOCKBKL:
-               if (td->bkl != 4)
-                       break;
-#ifdef CONFIG_LOCK_KERNEL
-               unlock_kernel();
-#endif
-               td->bkl = 0;
-               return 0;
-
        default:
                break;
        }
@@ -196,7 +167,6 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
                td->event = atomic_add_return(1, &rttest_event);
                break;
 
-       case RTTEST_LOCKBKL:
        default:
                break;
        }
@@ -229,8 +199,6 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
                td->event = atomic_add_return(1, &rttest_event);
                return;
 
-       case RTTEST_LOCKBKL:
-               return;
        default:
                return;
        }
@@ -380,11 +348,11 @@ static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute
        spin_lock(&rttest_lock);
 
        curr += sprintf(curr,
-               "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:",
+               "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:",
                td->opcode, td->event, tsk->state,
                        (MAX_RT_PRIO - 1) - tsk->prio,
                        (MAX_RT_PRIO - 1) - tsk->normal_prio,
-               tsk->pi_blocked_on, td->bkl);
+               tsk->pi_blocked_on);
 
        for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
                curr += sprintf(curr, "%d", td->mutexes[i]);
index a9604815786afcf781b0dc5787335aee4fd54e7f..ab449117aaf293e5135a58bc7b66b6cd8b1d669e 100644 (file)
 /*
  * lock->owner state tracking:
  *
- * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
- * are used to keep track of the "owner is pending" and "lock has
- * waiters" state.
+ * lock->owner holds the task_struct pointer of the owner. Bit 0
+ * is used to keep track of the "lock has waiters" state.
  *
- * owner       bit1    bit0
- * NULL                0       0       lock is free (fast acquire possible)
- * NULL                0       1       invalid state
- * NULL                1       0       Transitional State*
- * NULL                1       1       invalid state
- * taskpointer 0       0       lock is held (fast release possible)
- * taskpointer 0       1       task is pending owner
- * taskpointer 1       0       lock is held and has waiters
- * taskpointer 1       1       task is pending owner and lock has more waiters
- *
- * Pending ownership is assigned to the top (highest priority)
- * waiter of the lock, when the lock is released. The thread is woken
- * up and can now take the lock. Until the lock is taken (bit 0
- * cleared) a competing higher priority thread can steal the lock
- * which puts the woken up thread back on the waiters list.
+ * owner       bit0
+ * NULL                0       lock is free (fast acquire possible)
+ * NULL                1       lock is free and has waiters and the top waiter
+ *                             is going to take the lock*
+ * taskpointer 0       lock is held (fast release possible)
+ * taskpointer 1       lock is held and has waiters**
  *
  * The fast atomic compare exchange based acquire and release is only
- * possible when bit 0 and 1 of lock->owner are 0.
+ * possible when bit 0 of lock->owner is 0.
+ *
+ * (*) It also can be a transitional state when grabbing the lock
+ * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
+ * we need to set the bit0 before looking at the lock, and the owner may be
+ * NULL in this small time, hence this can be a transitional state.
  *
- * (*) There's a small time where the owner can be NULL and the
- * "lock has waiters" bit is set.  This can happen when grabbing the lock.
- * To prevent a cmpxchg of the owner releasing the lock, we need to set this
- * bit before looking at the lock, hence the reason this is a transitional
- * state.
+ * (**) There is a small time when bit 0 is set but there are no
+ * waiters. This can happen when grabbing the lock in the slow path.
+ * To prevent a cmpxchg of the owner releasing the lock, we need to
+ * set this bit before looking at the lock.
  */
 
 static void
-rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
-                  unsigned long mask)
+rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
 {
-       unsigned long val = (unsigned long)owner | mask;
+       unsigned long val = (unsigned long)owner;
 
        if (rt_mutex_has_waiters(lock))
                val |= RT_MUTEX_HAS_WAITERS;
@@ -203,15 +196,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
         * reached or the state of the chain has changed while we
         * dropped the locks.
         */
-       if (!waiter || !waiter->task)
+       if (!waiter)
                goto out_unlock_pi;
 
        /*
         * Check the orig_waiter state. After we dropped the locks,
-        * the previous owner of the lock might have released the lock
-        * and made us the pending owner:
+        * the previous owner of the lock might have released the lock.
         */
-       if (orig_waiter && !orig_waiter->task)
+       if (orig_waiter && !rt_mutex_owner(orig_lock))
                goto out_unlock_pi;
 
        /*
@@ -254,6 +246,17 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
 
        /* Release the task */
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       if (!rt_mutex_owner(lock)) {
+               /*
+                * If the requeue above changed the top waiter, then we need
+                * to wake the new top waiter up to try to get the lock.
+                */
+
+               if (top_waiter != rt_mutex_top_waiter(lock))
+                       wake_up_process(rt_mutex_top_waiter(lock)->task);
+               raw_spin_unlock(&lock->wait_lock);
+               goto out_put_task;
+       }
        put_task_struct(task);
 
        /* Grab the next task */
@@ -295,79 +298,17 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        return ret;
 }
 
-/*
- * Optimization: check if we can steal the lock from the
- * assigned pending owner [which might not have taken the
- * lock yet]:
- */
-static inline int try_to_steal_lock(struct rt_mutex *lock,
-                                   struct task_struct *task)
-{
-       struct task_struct *pendowner = rt_mutex_owner(lock);
-       struct rt_mutex_waiter *next;
-       unsigned long flags;
-
-       if (!rt_mutex_owner_pending(lock))
-               return 0;
-
-       if (pendowner == task)
-               return 1;
-
-       raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
-       if (task->prio >= pendowner->prio) {
-               raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
-               return 0;
-       }
-
-       /*
-        * Check if a waiter is enqueued on the pending owners
-        * pi_waiters list. Remove it and readjust pending owners
-        * priority.
-        */
-       if (likely(!rt_mutex_has_waiters(lock))) {
-               raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
-               return 1;
-       }
-
-       /* No chain handling, pending owner is not blocked on anything: */
-       next = rt_mutex_top_waiter(lock);
-       plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
-       __rt_mutex_adjust_prio(pendowner);
-       raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
-
-       /*
-        * We are going to steal the lock and a waiter was
-        * enqueued on the pending owners pi_waiters queue. So
-        * we have to enqueue this waiter into
-        * task->pi_waiters list. This covers the case,
-        * where task is boosted because it holds another
-        * lock and gets unboosted because the booster is
-        * interrupted, so we would delay a waiter with higher
-        * priority as task->normal_prio.
-        *
-        * Note: in the rare case of a SCHED_OTHER task changing
-        * its priority and thus stealing the lock, next->task
-        * might be task:
-        */
-       if (likely(next->task != task)) {
-               raw_spin_lock_irqsave(&task->pi_lock, flags);
-               plist_add(&next->pi_list_entry, &task->pi_waiters);
-               __rt_mutex_adjust_prio(task);
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-       }
-       return 1;
-}
-
 /*
  * Try to take an rt-mutex
  *
- * This fails
- * - when the lock has a real owner
- * - when a different pending owner exists and has higher priority than current
- *
  * Must be called with lock->wait_lock held.
+ *
+ * @lock:   the lock to be acquired.
+ * @task:   the task which wants to acquire the lock
+ * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
  */
-static int try_to_take_rt_mutex(struct rt_mutex *lock)
+static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+               struct rt_mutex_waiter *waiter)
 {
        /*
         * We have to be careful here if the atomic speedups are
@@ -390,15 +331,52 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock)
         */
        mark_rt_mutex_waiters(lock);
 
-       if (rt_mutex_owner(lock) && !try_to_steal_lock(lock, current))
+       if (rt_mutex_owner(lock))
                return 0;
 
+       /*
+        * It will get the lock because of one of these conditions:
+        * 1) there is no waiter
+        * 2) higher priority than waiters
+        * 3) it is top waiter
+        */
+       if (rt_mutex_has_waiters(lock)) {
+               if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
+                       if (!waiter || waiter != rt_mutex_top_waiter(lock))
+                               return 0;
+               }
+       }
+
+       if (waiter || rt_mutex_has_waiters(lock)) {
+               unsigned long flags;
+               struct rt_mutex_waiter *top;
+
+               raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+               /* remove the queued waiter. */
+               if (waiter) {
+                       plist_del(&waiter->list_entry, &lock->wait_list);
+                       task->pi_blocked_on = NULL;
+               }
+
+               /*
+                * We have to enqueue the top waiter(if it exists) into
+                * task->pi_waiters list.
+                */
+               if (rt_mutex_has_waiters(lock)) {
+                       top = rt_mutex_top_waiter(lock);
+                       top->pi_list_entry.prio = top->list_entry.prio;
+                       plist_add(&top->pi_list_entry, &task->pi_waiters);
+               }
+               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       }
+
        /* We got the lock. */
        debug_rt_mutex_lock(lock);
 
-       rt_mutex_set_owner(lock, current, 0);
+       rt_mutex_set_owner(lock, task);
 
-       rt_mutex_deadlock_account_lock(lock, current);
+       rt_mutex_deadlock_account_lock(lock, task);
 
        return 1;
 }
@@ -436,6 +414,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
+       if (!owner)
+               return 0;
+
        if (waiter == rt_mutex_top_waiter(lock)) {
                raw_spin_lock_irqsave(&owner->pi_lock, flags);
                plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
@@ -472,21 +453,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 /*
  * Wake up the next waiter on the lock.
  *
- * Remove the top waiter from the current tasks waiter list and from
- * the lock waiter list. Set it as pending owner. Then wake it up.
+ * Remove the top waiter from the current tasks waiter list and wake it up.
  *
  * Called with lock->wait_lock held.
  */
 static void wakeup_next_waiter(struct rt_mutex *lock)
 {
        struct rt_mutex_waiter *waiter;
-       struct task_struct *pendowner;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&current->pi_lock, flags);
 
        waiter = rt_mutex_top_waiter(lock);
-       plist_del(&waiter->list_entry, &lock->wait_list);
 
        /*
         * Remove it from current->pi_waiters. We do not adjust a
@@ -495,43 +473,19 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
         * lock->wait_lock.
         */
        plist_del(&waiter->pi_list_entry, &current->pi_waiters);
-       pendowner = waiter->task;
-       waiter->task = NULL;
 
-       rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
+       rt_mutex_set_owner(lock, NULL);
 
        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 
-       /*
-        * Clear the pi_blocked_on variable and enqueue a possible
-        * waiter into the pi_waiters list of the pending owner. This
-        * prevents that in case the pending owner gets unboosted a
-        * waiter with higher priority than pending-owner->normal_prio
-        * is blocked on the unboosted (pending) owner.
-        */
-       raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
-
-       WARN_ON(!pendowner->pi_blocked_on);
-       WARN_ON(pendowner->pi_blocked_on != waiter);
-       WARN_ON(pendowner->pi_blocked_on->lock != lock);
-
-       pendowner->pi_blocked_on = NULL;
-
-       if (rt_mutex_has_waiters(lock)) {
-               struct rt_mutex_waiter *next;
-
-               next = rt_mutex_top_waiter(lock);
-               plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
-       }
-       raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
-
-       wake_up_process(pendowner);
+       wake_up_process(waiter->task);
 }
 
 /*
- * Remove a waiter from a lock
+ * Remove a waiter from a lock and give up
  *
- * Must be called with lock->wait_lock held
+ * Must be called with lock->wait_lock held and
+ * have just failed to try_to_take_rt_mutex().
  */
 static void remove_waiter(struct rt_mutex *lock,
                          struct rt_mutex_waiter *waiter)
@@ -543,11 +497,13 @@ static void remove_waiter(struct rt_mutex *lock,
 
        raw_spin_lock_irqsave(&current->pi_lock, flags);
        plist_del(&waiter->list_entry, &lock->wait_list);
-       waiter->task = NULL;
        current->pi_blocked_on = NULL;
        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 
-       if (first && owner != current) {
+       if (!owner)
+               return;
+
+       if (first) {
 
                raw_spin_lock_irqsave(&owner->pi_lock, flags);
 
@@ -614,21 +570,19 @@ void rt_mutex_adjust_pi(struct task_struct *task)
  *                      or TASK_UNINTERRUPTIBLE)
  * @timeout:            the pre-initialized and started timer, or NULL for none
  * @waiter:             the pre-initialized rt_mutex_waiter
- * @detect_deadlock:    passed to task_blocks_on_rt_mutex
  *
  * lock->wait_lock must be held by the caller.
  */
 static int __sched
 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
                    struct hrtimer_sleeper *timeout,
-                   struct rt_mutex_waiter *waiter,
-                   int detect_deadlock)
+                   struct rt_mutex_waiter *waiter)
 {
        int ret = 0;
 
        for (;;) {
                /* Try to acquire the lock: */
-               if (try_to_take_rt_mutex(lock))
+               if (try_to_take_rt_mutex(lock, current, waiter))
                        break;
 
                /*
@@ -645,39 +599,11 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
                                break;
                }
 
-               /*
-                * waiter->task is NULL the first time we come here and
-                * when we have been woken up by the previous owner
-                * but the lock got stolen by a higher prio task.
-                */
-               if (!waiter->task) {
-                       ret = task_blocks_on_rt_mutex(lock, waiter, current,
-                                                     detect_deadlock);
-                       /*
-                        * If we got woken up by the owner then start loop
-                        * all over without going into schedule to try
-                        * to get the lock now:
-                        */
-                       if (unlikely(!waiter->task)) {
-                               /*
-                                * Reset the return value. We might
-                                * have returned with -EDEADLK and the
-                                * owner released the lock while we
-                                * were walking the pi chain.
-                                */
-                               ret = 0;
-                               continue;
-                       }
-                       if (unlikely(ret))
-                               break;
-               }
-
                raw_spin_unlock(&lock->wait_lock);
 
                debug_rt_mutex_print_deadlock(waiter);
 
-               if (waiter->task)
-                       schedule_rt_mutex(lock);
+               schedule_rt_mutex(lock);
 
                raw_spin_lock(&lock->wait_lock);
                set_current_state(state);
@@ -698,12 +624,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
        int ret = 0;
 
        debug_rt_mutex_init_waiter(&waiter);
-       waiter.task = NULL;
 
        raw_spin_lock(&lock->wait_lock);
 
        /* Try to acquire the lock again: */
-       if (try_to_take_rt_mutex(lock)) {
+       if (try_to_take_rt_mutex(lock, current, NULL)) {
                raw_spin_unlock(&lock->wait_lock);
                return 0;
        }
@@ -717,12 +642,14 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
                        timeout->task = NULL;
        }
 
-       ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
-                                 detect_deadlock);
+       ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
+
+       if (likely(!ret))
+               ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
 
        set_current_state(TASK_RUNNING);
 
-       if (unlikely(waiter.task))
+       if (unlikely(ret))
                remove_waiter(lock, &waiter);
 
        /*
@@ -737,14 +664,6 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
        if (unlikely(timeout))
                hrtimer_cancel(&timeout->timer);
 
-       /*
-        * Readjust priority, when we did not get the lock. We might
-        * have been the pending owner and boosted. Since we did not
-        * take the lock, the PI boost has to go.
-        */
-       if (unlikely(ret))
-               rt_mutex_adjust_prio(current);
-
        debug_rt_mutex_free_waiter(&waiter);
 
        return ret;
@@ -762,7 +681,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
 
        if (likely(rt_mutex_owner(lock) != current)) {
 
-               ret = try_to_take_rt_mutex(lock);
+               ret = try_to_take_rt_mutex(lock, current, NULL);
                /*
                 * try_to_take_rt_mutex() sets the lock waiters
                 * bit unconditionally. Clean this up.
@@ -992,7 +911,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
 {
        __rt_mutex_init(lock, NULL);
        debug_rt_mutex_proxy_lock(lock, proxy_owner);
-       rt_mutex_set_owner(lock, proxy_owner, 0);
+       rt_mutex_set_owner(lock, proxy_owner);
        rt_mutex_deadlock_account_lock(lock, proxy_owner);
 }
 
@@ -1008,7 +927,7 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
                           struct task_struct *proxy_owner)
 {
        debug_rt_mutex_proxy_unlock(lock);
-       rt_mutex_set_owner(lock, NULL, 0);
+       rt_mutex_set_owner(lock, NULL);
        rt_mutex_deadlock_account_unlock(proxy_owner);
 }
 
@@ -1034,20 +953,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
 
        raw_spin_lock(&lock->wait_lock);
 
-       mark_rt_mutex_waiters(lock);
-
-       if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) {
-               /* We got the lock for task. */
-               debug_rt_mutex_lock(lock);
-               rt_mutex_set_owner(lock, task, 0);
+       if (try_to_take_rt_mutex(lock, task, NULL)) {
                raw_spin_unlock(&lock->wait_lock);
-               rt_mutex_deadlock_account_lock(lock, task);
                return 1;
        }
 
        ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
 
-       if (ret && !waiter->task) {
+       if (ret && !rt_mutex_owner(lock)) {
                /*
                 * Reset the return value. We might have
                 * returned with -EDEADLK and the owner
@@ -1056,6 +969,10 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                 */
                ret = 0;
        }
+
+       if (unlikely(ret))
+               remove_waiter(lock, waiter);
+
        raw_spin_unlock(&lock->wait_lock);
 
        debug_rt_mutex_print_deadlock(waiter);
@@ -1110,12 +1027,11 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
 
        set_current_state(TASK_INTERRUPTIBLE);
 
-       ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter,
-                                 detect_deadlock);
+       ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
 
        set_current_state(TASK_RUNNING);
 
-       if (unlikely(waiter->task))
+       if (unlikely(ret))
                remove_waiter(lock, waiter);
 
        /*
@@ -1126,13 +1042,5 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
 
        raw_spin_unlock(&lock->wait_lock);
 
-       /*
-        * Readjust priority, when we did not get the lock. We might have been
-        * the pending owner and boosted. Since we did not take the lock, the
-        * PI boost has to go.
-        */
-       if (unlikely(ret))
-               rt_mutex_adjust_prio(current);
-
        return ret;
 }
index 97a2f81866afdb6507607c4a30348f3f74564e0e..53a66c85261bffc91142709f65610154a011541e 100644 (file)
@@ -91,9 +91,8 @@ task_top_pi_waiter(struct task_struct *p)
 /*
  * lock->owner state tracking:
  */
-#define RT_MUTEX_OWNER_PENDING 1UL
-#define RT_MUTEX_HAS_WAITERS   2UL
-#define RT_MUTEX_OWNER_MASKALL 3UL
+#define RT_MUTEX_HAS_WAITERS   1UL
+#define RT_MUTEX_OWNER_MASKALL 1UL
 
 static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
 {
@@ -101,17 +100,6 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
                ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
 }
 
-static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
-{
-       return (struct task_struct *)
-               ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
-}
-
-static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
-{
-       return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING;
-}
-
 /*
  * PI-futex support (proxy locking functions, etc.):
  */
index 18d38e4ec7ba249ba46079beab5a773700a7eb3b..c8e40b7005c0c2da5ae92d966cd52d8458ba82d1 100644 (file)
@@ -324,7 +324,7 @@ struct cfs_rq {
         * 'curr' points to currently running entity on this cfs_rq.
         * It is set to NULL otherwise (i.e when none are currently running).
         */
-       struct sched_entity *curr, *next, *last;
+       struct sched_entity *curr, *next, *last, *skip;
 
        unsigned int nr_spread_over;
 
@@ -606,9 +606,6 @@ static inline struct task_group *task_group(struct task_struct *p)
        struct task_group *tg;
        struct cgroup_subsys_state *css;
 
-       if (p->flags & PF_EXITING)
-               return &root_task_group;
-
        css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
                        lockdep_is_held(&task_rq(p)->lock));
        tg = container_of(css, struct task_group, css);
@@ -1686,6 +1683,39 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
                __release(rq2->lock);
 }
 
+#else /* CONFIG_SMP */
+
+/*
+ * double_rq_lock - safely lock two runqueues
+ *
+ * Note this does not disable interrupts like task_rq_lock,
+ * you need to do so manually before calling.
+ */
+static void double_rq_lock(struct rq *rq1, struct rq *rq2)
+       __acquires(rq1->lock)
+       __acquires(rq2->lock)
+{
+       BUG_ON(!irqs_disabled());
+       BUG_ON(rq1 != rq2);
+       raw_spin_lock(&rq1->lock);
+       __acquire(rq2->lock);   /* Fake it out ;) */
+}
+
+/*
+ * double_rq_unlock - safely unlock two runqueues
+ *
+ * Note this does not restore interrupts like task_rq_unlock,
+ * you need to do so manually after calling.
+ */
+static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+       __releases(rq1->lock)
+       __releases(rq2->lock)
+{
+       BUG_ON(rq1 != rq2);
+       raw_spin_unlock(&rq1->lock);
+       __release(rq2->lock);
+}
+
 #endif
 
 static void calc_load_account_idle(struct rq *this_rq);
@@ -1880,7 +1910,7 @@ void account_system_vtime(struct task_struct *curr)
         */
        if (hardirq_count())
                __this_cpu_add(cpu_hardirq_time, delta);
-       else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
+       else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
                __this_cpu_add(cpu_softirq_time, delta);
 
        irq_time_write_end();
@@ -1920,8 +1950,40 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
                sched_rt_avg_update(rq, irq_delta);
 }
 
+static int irqtime_account_hi_update(void)
+{
+       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+       unsigned long flags;
+       u64 latest_ns;
+       int ret = 0;
+
+       local_irq_save(flags);
+       latest_ns = this_cpu_read(cpu_hardirq_time);
+       if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
+               ret = 1;
+       local_irq_restore(flags);
+       return ret;
+}
+
+static int irqtime_account_si_update(void)
+{
+       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+       unsigned long flags;
+       u64 latest_ns;
+       int ret = 0;
+
+       local_irq_save(flags);
+       latest_ns = this_cpu_read(cpu_softirq_time);
+       if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
+               ret = 1;
+       local_irq_restore(flags);
+       return ret;
+}
+
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
 
+#define sched_clock_irqtime    (0)
+
 static void update_rq_clock_task(struct rq *rq, s64 delta)
 {
        rq->clock_task += delta;
@@ -2025,14 +2087,14 @@ inline int task_curr(const struct task_struct *p)
 
 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                                       const struct sched_class *prev_class,
-                                      int oldprio, int running)
+                                      int oldprio)
 {
        if (prev_class != p->sched_class) {
                if (prev_class->switched_from)
-                       prev_class->switched_from(rq, p, running);
-               p->sched_class->switched_to(rq, p, running);
-       } else
-               p->sched_class->prio_changed(rq, p, oldprio, running);
+                       prev_class->switched_from(rq, p);
+               p->sched_class->switched_to(rq, p);
+       } else if (oldprio != p->prio)
+               p->sched_class->prio_changed(rq, p, oldprio);
 }
 
 static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
@@ -2224,7 +2286,10 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                 * yield - it could be a while.
                 */
                if (unlikely(on_rq)) {
-                       schedule_timeout_uninterruptible(1);
+                       ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
+
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       schedule_hrtimeout(&to, HRTIMER_MODE_REL);
                        continue;
                }
 
@@ -2265,27 +2330,6 @@ void kick_process(struct task_struct *p)
 EXPORT_SYMBOL_GPL(kick_process);
 #endif /* CONFIG_SMP */
 
-/**
- * task_oncpu_function_call - call a function on the cpu on which a task runs
- * @p:         the task to evaluate
- * @func:      the function to be called
- * @info:      the function call argument
- *
- * Calls the function @func when the task is currently running. This might
- * be on the current CPU, which just calls the function directly
- */
-void task_oncpu_function_call(struct task_struct *p,
-                             void (*func) (void *info), void *info)
-{
-       int cpu;
-
-       preempt_disable();
-       cpu = task_cpu(p);
-       if (task_curr(p))
-               smp_call_function_single(cpu, func, info, 1);
-       preempt_enable();
-}
-
 #ifdef CONFIG_SMP
 /*
  * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
@@ -2566,6 +2610,7 @@ static void __sched_fork(struct task_struct *p)
        p->se.sum_exec_runtime          = 0;
        p->se.prev_sum_exec_runtime     = 0;
        p->se.nr_migrations             = 0;
+       p->se.vruntime                  = 0;
 
 #ifdef CONFIG_SCHEDSTATS
        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -2776,9 +2821,12 @@ static inline void
 prepare_task_switch(struct rq *rq, struct task_struct *prev,
                    struct task_struct *next)
 {
+       sched_info_switch(prev, next);
+       perf_event_task_sched_out(prev, next);
        fire_sched_out_preempt_notifiers(prev, next);
        prepare_lock_switch(rq, next);
        prepare_arch_switch(next);
+       trace_sched_switch(prev, next);
 }
 
 /**
@@ -2911,7 +2959,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm, *oldmm;
 
        prepare_task_switch(rq, prev, next);
-       trace_sched_switch(prev, next);
+
        mm = next->mm;
        oldmm = prev->active_mm;
        /*
@@ -3567,6 +3615,32 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
        }
 }
 
+/*
+ * Account system cpu time to a process and desired cpustat field
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in kernel space since the last update
+ * @cputime_scaled: cputime scaled by cpu frequency
+ * @target_cputime64: pointer to cpustat field that has to be updated
+ */
+static inline
+void __account_system_time(struct task_struct *p, cputime_t cputime,
+                       cputime_t cputime_scaled, cputime64_t *target_cputime64)
+{
+       cputime64_t tmp = cputime_to_cputime64(cputime);
+
+       /* Add system time to process. */
+       p->stime = cputime_add(p->stime, cputime);
+       p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
+       account_group_system_time(p, cputime);
+
+       /* Add system time to cpustat. */
+       *target_cputime64 = cputime64_add(*target_cputime64, tmp);
+       cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
+
+       /* Account for system time used */
+       acct_update_integrals(p);
+}
+
 /*
  * Account system cpu time to a process.
  * @p: the process that the cpu time gets accounted to
@@ -3578,36 +3652,26 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
                         cputime_t cputime, cputime_t cputime_scaled)
 {
        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
-       cputime64_t tmp;
+       cputime64_t *target_cputime64;
 
        if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
                account_guest_time(p, cputime, cputime_scaled);
                return;
        }
 
-       /* Add system time to process. */
-       p->stime = cputime_add(p->stime, cputime);
-       p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
-       account_group_system_time(p, cputime);
-
-       /* Add system time to cpustat. */
-       tmp = cputime_to_cputime64(cputime);
        if (hardirq_count() - hardirq_offset)
-               cpustat->irq = cputime64_add(cpustat->irq, tmp);
+               target_cputime64 = &cpustat->irq;
        else if (in_serving_softirq())
-               cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
+               target_cputime64 = &cpustat->softirq;
        else
-               cpustat->system = cputime64_add(cpustat->system, tmp);
+               target_cputime64 = &cpustat->system;
 
-       cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
-
-       /* Account for system time used */
-       acct_update_integrals(p);
+       __account_system_time(p, cputime, cputime_scaled, target_cputime64);
 }
 
 /*
  * Account for involuntary wait time.
- * @steal: the cpu time spent in involuntary wait
+ * @cputime: the cpu time spent in involuntary wait
  */
 void account_steal_time(cputime_t cputime)
 {
@@ -3635,6 +3699,73 @@ void account_idle_time(cputime_t cputime)
 
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
 
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+/*
+ * Account a tick to a process and cpustat
+ * @p: the process that the cpu time gets accounted to
+ * @user_tick: is the tick from userspace
+ * @rq: the pointer to rq
+ *
+ * Tick demultiplexing follows the order
+ * - pending hardirq update
+ * - pending softirq update
+ * - user_time
+ * - idle_time
+ * - system time
+ *   - check for guest_time
+ *   - else account as system_time
+ *
+ * Check for hardirq is done both for system and user time as there is
+ * no timer going off while we are on hardirq and hence we may never get an
+ * opportunity to update it solely in system time.
+ * p->stime and friends are only updated on system time and not on irq
+ * softirq as those do not count in task exec_runtime any more.
+ */
+static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
+                                               struct rq *rq)
+{
+       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+       cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
+       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+
+       if (irqtime_account_hi_update()) {
+               cpustat->irq = cputime64_add(cpustat->irq, tmp);
+       } else if (irqtime_account_si_update()) {
+               cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
+       } else if (this_cpu_ksoftirqd() == p) {
+               /*
+                * ksoftirqd time do not get accounted in cpu_softirq_time.
+                * So, we have to handle it separately here.
+                * Also, p->stime needs to be updated for ksoftirqd.
+                */
+               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
+                                       &cpustat->softirq);
+       } else if (user_tick) {
+               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
+       } else if (p == rq->idle) {
+               account_idle_time(cputime_one_jiffy);
+       } else if (p->flags & PF_VCPU) { /* System time or guest time */
+               account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
+       } else {
+               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
+                                       &cpustat->system);
+       }
+}
+
+static void irqtime_account_idle_ticks(int ticks)
+{
+       int i;
+       struct rq *rq = this_rq();
+
+       for (i = 0; i < ticks; i++)
+               irqtime_account_process_tick(current, 0, rq);
+}
+#else /* CONFIG_IRQ_TIME_ACCOUNTING */
+static void irqtime_account_idle_ticks(int ticks) {}
+static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
+                                               struct rq *rq) {}
+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
 /*
  * Account a single tick of cpu time.
  * @p: the process that the cpu time gets accounted to
@@ -3645,6 +3776,11 @@ void account_process_tick(struct task_struct *p, int user_tick)
        cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
        struct rq *rq = this_rq();
 
+       if (sched_clock_irqtime) {
+               irqtime_account_process_tick(p, user_tick, rq);
+               return;
+       }
+
        if (user_tick)
                account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
        else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
@@ -3670,6 +3806,12 @@ void account_steal_ticks(unsigned long ticks)
  */
 void account_idle_ticks(unsigned long ticks)
 {
+
+       if (sched_clock_irqtime) {
+               irqtime_account_idle_ticks(ticks);
+               return;
+       }
+
        account_idle_time(jiffies_to_cputime(ticks));
 }
 
@@ -3989,9 +4131,6 @@ need_resched_nonpreemptible:
        rq->skip_clock_update = 0;
 
        if (likely(prev != next)) {
-               sched_info_switch(prev, next);
-               perf_event_task_sched_out(prev, next);
-
                rq->nr_switches++;
                rq->curr = next;
                ++*switch_count;
@@ -4213,6 +4352,7 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
 {
        __wake_up_common(q, mode, 1, 0, key);
 }
+EXPORT_SYMBOL_GPL(__wake_up_locked_key);
 
 /**
  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
@@ -4570,11 +4710,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq) {
+       if (on_rq)
                enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
 
-               check_class_changed(rq, p, prev_class, oldprio, running);
-       }
+       check_class_changed(rq, p, prev_class, oldprio);
        task_rq_unlock(rq, &flags);
 }
 
@@ -4822,12 +4961,15 @@ recheck:
                            param->sched_priority > rlim_rtprio)
                                return -EPERM;
                }
+
                /*
-                * Like positive nice levels, dont allow tasks to
-                * move out of SCHED_IDLE either:
+                * Treat SCHED_IDLE as nice 20. Only allow a switch to
+                * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
                 */
-               if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
-                       return -EPERM;
+               if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
+                       if (!can_nice(p, TASK_NICE(p)))
+                               return -EPERM;
+               }
 
                /* can't change other user's priorities */
                if (!check_same_owner(p))
@@ -4902,11 +5044,10 @@ recheck:
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq) {
+       if (on_rq)
                activate_task(rq, p, 0);
 
-               check_class_changed(rq, p, prev_class, oldprio, running);
-       }
+       check_class_changed(rq, p, prev_class, oldprio);
        __task_rq_unlock(rq);
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
@@ -5323,6 +5464,65 @@ void __sched yield(void)
 }
 EXPORT_SYMBOL(yield);
 
+/**
+ * yield_to - yield the current processor to another thread in
+ * your thread group, or accelerate that thread toward the
+ * processor it's on.
+ *
+ * It's the caller's job to ensure that the target task struct
+ * can't go away on us before we can do any checks.
+ *
+ * Returns true if we indeed boosted the target task.
+ */
+bool __sched yield_to(struct task_struct *p, bool preempt)
+{
+       struct task_struct *curr = current;
+       struct rq *rq, *p_rq;
+       unsigned long flags;
+       bool yielded = 0;
+
+       local_irq_save(flags);
+       rq = this_rq();
+
+again:
+       p_rq = task_rq(p);
+       double_rq_lock(rq, p_rq);
+       while (task_rq(p) != p_rq) {
+               double_rq_unlock(rq, p_rq);
+               goto again;
+       }
+
+       if (!curr->sched_class->yield_to_task)
+               goto out;
+
+       if (curr->sched_class != p->sched_class)
+               goto out;
+
+       if (task_running(p_rq, p) || p->state)
+               goto out;
+
+       yielded = curr->sched_class->yield_to_task(rq, p, preempt);
+       if (yielded) {
+               schedstat_inc(rq, yld_count);
+               /*
+                * Make p's CPU reschedule; pick_next_entity takes care of
+                * fairness.
+                */
+               if (preempt && rq != p_rq)
+                       resched_task(p_rq->curr);
+       }
+
+out:
+       double_rq_unlock(rq, p_rq);
+       local_irq_restore(flags);
+
+       if (yielded)
+               schedule();
+
+       return yielded;
+}
+EXPORT_SYMBOL_GPL(yield_to);
+
 /*
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
@@ -5571,7 +5771,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         * The idle tasks have their own, simple scheduling class:
         */
        idle->sched_class = &idle_sched_class;
-       ftrace_graph_init_task(idle);
+       ftrace_graph_init_idle_task(idle, cpu);
 }
 
 /*
@@ -7796,6 +7996,10 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
        INIT_LIST_HEAD(&cfs_rq->tasks);
 #ifdef CONFIG_FAIR_GROUP_SCHED
        cfs_rq->rq = rq;
+       /* allow initial update_cfs_load() to truncate */
+#ifdef CONFIG_SMP
+       cfs_rq->load_stamp = 1;
+#endif
 #endif
        cfs_rq->min_vruntime = (u64)(-(1LL << 20));
 }
@@ -8109,6 +8313,8 @@ EXPORT_SYMBOL(__might_sleep);
 #ifdef CONFIG_MAGIC_SYSRQ
 static void normalize_task(struct rq *rq, struct task_struct *p)
 {
+       const struct sched_class *prev_class = p->sched_class;
+       int old_prio = p->prio;
        int on_rq;
 
        on_rq = p->se.on_rq;
@@ -8119,6 +8325,8 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
                activate_task(rq, p, 0);
                resched_task(rq->curr);
        }
+
+       check_class_changed(rq, p, prev_class, old_prio);
 }
 
 void normalize_rt_tasks(void)
@@ -8510,7 +8718,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
                /* Propagate contribution to hierarchy */
                raw_spin_lock_irqsave(&rq->lock, flags);
                for_each_sched_entity(se)
-                       update_cfs_shares(group_cfs_rq(se), 0);
+                       update_cfs_shares(group_cfs_rq(se));
                raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
 
@@ -8884,7 +9092,8 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
 }
 
 static void
-cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
+cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
+               struct cgroup *old_cgrp, struct task_struct *task)
 {
        /*
         * cgroup_exit() is called in the copy_process() failure path.
index 9fb65628315709b36d4bb6d9bd100644d2a00992..5946ac51560244c9443be785b652886405de1888 100644 (file)
@@ -12,7 +12,6 @@ static atomic_t autogroup_seq_nr;
 static void __init autogroup_init(struct task_struct *init_task)
 {
        autogroup_default.tg = &root_task_group;
-       root_task_group.autogroup = &autogroup_default;
        kref_init(&autogroup_default.kref);
        init_rwsem(&autogroup_default.lock);
        init_task->signal->autogroup = &autogroup_default;
@@ -130,7 +129,7 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg)
 
 static inline bool task_group_is_autogroup(struct task_group *tg)
 {
-       return tg != &root_task_group && tg->autogroup;
+       return !!tg->autogroup;
 }
 
 static inline struct task_group *
@@ -161,11 +160,15 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
 
        p->signal->autogroup = autogroup_kref_get(ag);
 
+       if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
+               goto out;
+
        t = p;
        do {
                sched_move_task(t);
        } while_each_thread(p, t);
 
+out:
        unlock_task_sighand(p, &flags);
        autogroup_kref_put(prev);
 }
@@ -247,10 +250,14 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
 {
        struct autogroup *ag = autogroup_task_get(p);
 
+       if (!task_group_is_autogroup(ag->tg))
+               goto out;
+
        down_read(&ag->lock);
        seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
        up_read(&ag->lock);
 
+out:
        autogroup_kref_put(ag);
 }
 #endif /* CONFIG_PROC_FS */
@@ -258,9 +265,7 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
 #ifdef CONFIG_SCHED_DEBUG
 static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
 {
-       int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
-
-       if (!enabled || !tg->autogroup)
+       if (!task_group_is_autogroup(tg))
                return 0;
 
        return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
index 7b859ffe5dadd677cce89f833ee43a242ea3a992..05577055cfcaa4dc7ac055cb495f8e4ac97d96c4 100644 (file)
@@ -1,6 +1,11 @@
 #ifdef CONFIG_SCHED_AUTOGROUP
 
 struct autogroup {
+       /*
+        * reference doesn't mean how many thread attach to this
+        * autogroup now. It just stands for the number of task
+        * could use this autogroup.
+        */
        struct kref             kref;
        struct task_group       *tg;
        struct rw_semaphore     lock;
index eb6cb8edd075d9372b0547162a2930b06d14cacc..7bacd83a4158ca7157ae60e5e0fcec2fe8ff1e82 100644 (file)
@@ -179,7 +179,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 
        raw_spin_lock_irqsave(&rq->lock, flags);
        if (cfs_rq->rb_leftmost)
-               MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
+               MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
        last = __pick_last_entity(cfs_rq);
        if (last)
                max_vruntime = last->vruntime;
index 0c26e2df450ee534e79f1265851100245b30a9cd..3f7ec9e27ee1a259f223549211ab9fe6ec75c3fa 100644 (file)
@@ -68,14 +68,6 @@ static unsigned int sched_nr_latency = 8;
  */
 unsigned int sysctl_sched_child_runs_first __read_mostly;
 
-/*
- * sys_sched_yield() compat mode
- *
- * This option switches the agressive yield implementation of the
- * old scheduler back on.
- */
-unsigned int __read_mostly sysctl_sched_compat_yield;
-
 /*
  * SCHED_OTHER wake-up granularity.
  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
@@ -419,7 +411,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
        rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
 }
 
-static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
+static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
 {
        struct rb_node *left = cfs_rq->rb_leftmost;
 
@@ -429,6 +421,17 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
        return rb_entry(left, struct sched_entity, run_node);
 }
 
+static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+{
+       struct rb_node *next = rb_next(&se->run_node);
+
+       if (!next)
+               return NULL;
+
+       return rb_entry(next, struct sched_entity, run_node);
+}
+
+#ifdef CONFIG_SCHED_DEBUG
 static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 {
        struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
@@ -443,7 +446,6 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
  * Scheduling class statistics methods:
  */
 
-#ifdef CONFIG_SCHED_DEBUG
 int sched_proc_update_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
@@ -540,7 +542,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 }
 
 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
-static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);
+static void update_cfs_shares(struct cfs_rq *cfs_rq);
 
 /*
  * Update the current task's runtime statistics. Skip current tasks that
@@ -733,6 +735,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
            now - cfs_rq->load_last > 4 * period) {
                cfs_rq->load_period = 0;
                cfs_rq->load_avg = 0;
+               delta = period - 1;
        }
 
        cfs_rq->load_stamp = now;
@@ -763,16 +766,15 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
                list_del_leaf_cfs_rq(cfs_rq);
 }
 
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
-                               long weight_delta)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
        long load_weight, load, shares;
 
-       load = cfs_rq->load.weight + weight_delta;
+       load = cfs_rq->load.weight;
 
        load_weight = atomic_read(&tg->load_weight);
-       load_weight -= cfs_rq->load_contribution;
        load_weight += load;
+       load_weight -= cfs_rq->load_contribution;
 
        shares = (tg->shares * load);
        if (load_weight)
@@ -790,7 +792,7 @@ static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
 {
        if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
                update_cfs_load(cfs_rq, 0);
-               update_cfs_shares(cfs_rq, 0);
+               update_cfs_shares(cfs_rq);
        }
 }
 # else /* CONFIG_SMP */
@@ -798,8 +800,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 {
 }
 
-static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
-                               long weight_delta)
+static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
        return tg->shares;
 }
@@ -824,7 +825,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                account_entity_enqueue(cfs_rq, se);
 }
 
-static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
+static void update_cfs_shares(struct cfs_rq *cfs_rq)
 {
        struct task_group *tg;
        struct sched_entity *se;
@@ -838,7 +839,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
        if (likely(se->load.weight == tg->shares))
                return;
 #endif
-       shares = calc_cfs_shares(cfs_rq, tg, weight_delta);
+       shares = calc_cfs_shares(cfs_rq, tg);
 
        reweight_entity(cfs_rq_of(se), se, shares);
 }
@@ -847,7 +848,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 {
 }
 
-static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
+static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
 {
 }
 
@@ -978,8 +979,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
         */
        update_curr(cfs_rq);
        update_cfs_load(cfs_rq, 0);
-       update_cfs_shares(cfs_rq, se->load.weight);
        account_entity_enqueue(cfs_rq, se);
+       update_cfs_shares(cfs_rq);
 
        if (flags & ENQUEUE_WAKEUP) {
                place_entity(cfs_rq, se, 0);
@@ -996,19 +997,49 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
                list_add_leaf_cfs_rq(cfs_rq);
 }
 
-static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __clear_buddies_last(struct sched_entity *se)
+{
+       for_each_sched_entity(se) {
+               struct cfs_rq *cfs_rq = cfs_rq_of(se);
+               if (cfs_rq->last == se)
+                       cfs_rq->last = NULL;
+               else
+                       break;
+       }
+}
+
+static void __clear_buddies_next(struct sched_entity *se)
 {
-       if (!se || cfs_rq->last == se)
-               cfs_rq->last = NULL;
+       for_each_sched_entity(se) {
+               struct cfs_rq *cfs_rq = cfs_rq_of(se);
+               if (cfs_rq->next == se)
+                       cfs_rq->next = NULL;
+               else
+                       break;
+       }
+}
 
-       if (!se || cfs_rq->next == se)
-               cfs_rq->next = NULL;
+static void __clear_buddies_skip(struct sched_entity *se)
+{
+       for_each_sched_entity(se) {
+               struct cfs_rq *cfs_rq = cfs_rq_of(se);
+               if (cfs_rq->skip == se)
+                       cfs_rq->skip = NULL;
+               else
+                       break;
+       }
 }
 
 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       for_each_sched_entity(se)
-               __clear_buddies(cfs_rq_of(se), se);
+       if (cfs_rq->last == se)
+               __clear_buddies_last(se);
+
+       if (cfs_rq->next == se)
+               __clear_buddies_next(se);
+
+       if (cfs_rq->skip == se)
+               __clear_buddies_skip(se);
 }
 
 static void
@@ -1041,7 +1072,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
        update_cfs_load(cfs_rq, 0);
        account_entity_dequeue(cfs_rq, se);
        update_min_vruntime(cfs_rq);
-       update_cfs_shares(cfs_rq, 0);
+       update_cfs_shares(cfs_rq);
 
        /*
         * Normalize the entity after updating the min_vruntime because the
@@ -1084,7 +1115,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
                return;
 
        if (cfs_rq->nr_running > 1) {
-               struct sched_entity *se = __pick_next_entity(cfs_rq);
+               struct sched_entity *se = __pick_first_entity(cfs_rq);
                s64 delta = curr->vruntime - se->vruntime;
 
                if (delta < 0)
@@ -1128,13 +1159,27 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 static int
 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
 
+/*
+ * Pick the next process, keeping these things in mind, in this order:
+ * 1) keep things fair between processes/task groups
+ * 2) pick the "next" process, since someone really wants that to run
+ * 3) pick the "last" process, for cache locality
+ * 4) do not run the "skip" process, if something else is available
+ */
 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
 {
-       struct sched_entity *se = __pick_next_entity(cfs_rq);
+       struct sched_entity *se = __pick_first_entity(cfs_rq);
        struct sched_entity *left = se;
 
-       if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
-               se = cfs_rq->next;
+       /*
+        * Avoid running the skip buddy, if running something else can
+        * be done without getting too unfair.
+        */
+       if (cfs_rq->skip == se) {
+               struct sched_entity *second = __pick_next_entity(se);
+               if (second && wakeup_preempt_entity(second, left) < 1)
+                       se = second;
+       }
 
        /*
         * Prefer last buddy, try to return the CPU to a preempted task.
@@ -1142,6 +1187,12 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
        if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
                se = cfs_rq->last;
 
+       /*
+        * Someone really wants this to run. If it's not unfair, run it.
+        */
+       if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
+               se = cfs_rq->next;
+
        clear_buddies(cfs_rq, se);
 
        return se;
@@ -1282,7 +1333,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
                update_cfs_load(cfs_rq, 0);
-               update_cfs_shares(cfs_rq, 0);
+               update_cfs_shares(cfs_rq);
        }
 
        hrtick_update(rq);
@@ -1312,58 +1363,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
                update_cfs_load(cfs_rq, 0);
-               update_cfs_shares(cfs_rq, 0);
+               update_cfs_shares(cfs_rq);
        }
 
        hrtick_update(rq);
 }
 
-/*
- * sched_yield() support is very simple - we dequeue and enqueue.
- *
- * If compat_yield is turned on then we requeue to the end of the tree.
- */
-static void yield_task_fair(struct rq *rq)
-{
-       struct task_struct *curr = rq->curr;
-       struct cfs_rq *cfs_rq = task_cfs_rq(curr);
-       struct sched_entity *rightmost, *se = &curr->se;
-
-       /*
-        * Are we the only task in the tree?
-        */
-       if (unlikely(cfs_rq->nr_running == 1))
-               return;
-
-       clear_buddies(cfs_rq, se);
-
-       if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
-               update_rq_clock(rq);
-               /*
-                * Update run-time statistics of the 'current'.
-                */
-               update_curr(cfs_rq);
-
-               return;
-       }
-       /*
-        * Find the rightmost entry in the rbtree:
-        */
-       rightmost = __pick_last_entity(cfs_rq);
-       /*
-        * Already in the rightmost position?
-        */
-       if (unlikely(!rightmost || entity_before(rightmost, se)))
-               return;
-
-       /*
-        * Minimally necessary key value to be last in the tree:
-        * Upon rescheduling, sched_class::put_prev_task() will place
-        * 'current' within the tree based on its new key value.
-        */
-       se->vruntime = rightmost->vruntime + 1;
-}
-
 #ifdef CONFIG_SMP
 
 static void task_waking_fair(struct rq *rq, struct task_struct *p)
@@ -1834,6 +1839,14 @@ static void set_next_buddy(struct sched_entity *se)
        }
 }
 
+static void set_skip_buddy(struct sched_entity *se)
+{
+       if (likely(task_of(se)->policy != SCHED_IDLE)) {
+               for_each_sched_entity(se)
+                       cfs_rq_of(se)->skip = se;
+       }
+}
+
 /*
  * Preempt the current task with a newly woken task if needed:
  */
@@ -1857,16 +1870,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        if (test_tsk_need_resched(curr))
                return;
 
+       /* Idle tasks are by definition preempted by non-idle tasks. */
+       if (unlikely(curr->policy == SCHED_IDLE) &&
+           likely(p->policy != SCHED_IDLE))
+               goto preempt;
+
        /*
-        * Batch and idle tasks do not preempt (their preemption is driven by
-        * the tick):
+        * Batch and idle tasks do not preempt non-idle tasks (their preemption
+        * is driven by the tick):
         */
        if (unlikely(p->policy != SCHED_NORMAL))
                return;
 
-       /* Idle tasks are by definition preempted by everybody. */
-       if (unlikely(curr->policy == SCHED_IDLE))
-               goto preempt;
 
        if (!sched_feat(WAKEUP_PREEMPT))
                return;
@@ -1932,6 +1947,51 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
        }
 }
 
+/*
+ * sched_yield() is very simple
+ *
+ * The magic of dealing with the ->skip buddy is in pick_next_entity.
+ */
+static void yield_task_fair(struct rq *rq)
+{
+       struct task_struct *curr = rq->curr;
+       struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+       struct sched_entity *se = &curr->se;
+
+       /*
+        * Are we the only task in the tree?
+        */
+       if (unlikely(rq->nr_running == 1))
+               return;
+
+       clear_buddies(cfs_rq, se);
+
+       if (curr->policy != SCHED_BATCH) {
+               update_rq_clock(rq);
+               /*
+                * Update run-time statistics of the 'current'.
+                */
+               update_curr(cfs_rq);
+       }
+
+       set_skip_buddy(se);
+}
+
+static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
+{
+       struct sched_entity *se = &p->se;
+
+       if (!se->on_rq)
+               return false;
+
+       /* Tell the scheduler that we'd really like pse to run next. */
+       set_next_buddy(se);
+
+       yield_task_fair(rq);
+
+       return true;
+}
+
 #ifdef CONFIG_SMP
 /**************************************************
  * Fair scheduling class load-balancing methods:
@@ -2123,7 +2183,7 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
         * We need to update shares after updating tg->load_weight in
         * order to adjust the weight of groups with long running tasks.
         */
-       update_cfs_shares(cfs_rq, 0);
+       update_cfs_shares(cfs_rq);
 
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 
@@ -2610,7 +2670,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
  * @this_cpu: Cpu for which load balance is currently performed.
  * @idle: Idle status of this_cpu
  * @load_idx: Load index of sched_domain of this_cpu for load calc.
- * @sd_idle: Idle status of the sched_domain containing group.
  * @local_group: Does group contain this_cpu.
  * @cpus: Set of cpus considered for load balancing.
  * @balance: Should we balance.
@@ -2618,7 +2677,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
  */
 static inline void update_sg_lb_stats(struct sched_domain *sd,
                        struct sched_group *group, int this_cpu,
-                       enum cpu_idle_type idle, int load_idx, int *sd_idle,
+                       enum cpu_idle_type idle, int load_idx,
                        int local_group, const struct cpumask *cpus,
                        int *balance, struct sg_lb_stats *sgs)
 {
@@ -2638,9 +2697,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        for_each_cpu_and(i, sched_group_cpus(group), cpus) {
                struct rq *rq = cpu_rq(i);
 
-               if (*sd_idle && rq->nr_running)
-                       *sd_idle = 0;
-
                /* Bias balancing toward cpus of our domain */
                if (local_group) {
                        if (idle_cpu(i) && !first_idle_cpu) {
@@ -2685,7 +2741,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
 
        /*
         * Consider the group unbalanced when the imbalance is larger
-        * than the average weight of two tasks.
+        * than the average weight of a task.
         *
         * APZ: with cgroup the avg task weight can vary wildly and
         *      might not be a suitable number - should we keep a
@@ -2695,7 +2751,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        if (sgs->sum_nr_running)
                avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
 
-       if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
+       if ((max_cpu_load - min_cpu_load) >avg_load_per_task && max_nr_running > 1)
                sgs->group_imb = 1;
 
        sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
@@ -2755,15 +2811,13 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
  * @sd: sched_domain whose statistics are to be updated.
  * @this_cpu: Cpu for which load balance is currently performed.
  * @idle: Idle status of this_cpu
- * @sd_idle: Idle status of the sched_domain containing sg.
  * @cpus: Set of cpus considered for load balancing.
  * @balance: Should we balance.
  * @sds: variable to hold the statistics for this sched_domain.
  */
 static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
-                       enum cpu_idle_type idle, int *sd_idle,
-                       const struct cpumask *cpus, int *balance,
-                       struct sd_lb_stats *sds)
+                       enum cpu_idle_type idle, const struct cpumask *cpus,
+                       int *balance, struct sd_lb_stats *sds)
 {
        struct sched_domain *child = sd->child;
        struct sched_group *sg = sd->groups;
@@ -2781,7 +2835,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
 
                local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
                memset(&sgs, 0, sizeof(sgs));
-               update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, sd_idle,
+               update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
                                local_group, cpus, balance, &sgs);
 
                if (local_group && !(*balance))
@@ -3033,7 +3087,6 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
  * @imbalance: Variable which stores amount of weighted load which should
  *             be moved to restore balance/put a group to idle.
  * @idle: The idle status of this_cpu.
- * @sd_idle: The idleness of sd
  * @cpus: The set of CPUs under consideration for load-balancing.
  * @balance: Pointer to a variable indicating if this_cpu
  *     is the appropriate cpu to perform load balancing at this_level.
@@ -3046,7 +3099,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
 static struct sched_group *
 find_busiest_group(struct sched_domain *sd, int this_cpu,
                   unsigned long *imbalance, enum cpu_idle_type idle,
-                  int *sd_idle, const struct cpumask *cpus, int *balance)
+                  const struct cpumask *cpus, int *balance)
 {
        struct sd_lb_stats sds;
 
@@ -3056,22 +3109,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
         * Compute the various statistics relavent for load balancing at
         * this level.
         */
-       update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
-                                       balance, &sds);
-
-       /* Cases where imbalance does not exist from POV of this_cpu */
-       /* 1) this_cpu is not the appropriate cpu to perform load balancing
-        *    at this level.
-        * 2) There is no busy sibling group to pull from.
-        * 3) This group is the busiest group.
-        * 4) This group is more busy than the avg busieness at this
-        *    sched_domain.
-        * 5) The imbalance is within the specified limit.
-        *
-        * Note: when doing newidle balance, if the local group has excess
-        * capacity (i.e. nr_running < group_capacity) and the busiest group
-        * does not have any capacity, we force a load balance to pull tasks
-        * to the local group. In this case, we skip past checks 3, 4 and 5.
+       update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
+
+       /*
+        * this_cpu is not the appropriate cpu to perform load balancing at
+        * this level.
         */
        if (!(*balance))
                goto ret;
@@ -3080,41 +3122,55 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
            check_asym_packing(sd, &sds, this_cpu, imbalance))
                return sds.busiest;
 
+       /* There is no busy sibling group to pull tasks from */
        if (!sds.busiest || sds.busiest_nr_running == 0)
                goto out_balanced;
 
-       /*  SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
+       /*
+        * If the busiest group is imbalanced the below checks don't
+        * work because they assumes all things are equal, which typically
+        * isn't true due to cpus_allowed constraints and the like.
+        */
+       if (sds.group_imb)
+               goto force_balance;
+
+       /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
        if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
                        !sds.busiest_has_capacity)
                goto force_balance;
 
+       /*
+        * If the local group is more busy than the selected busiest group
+        * don't try and pull any tasks.
+        */
        if (sds.this_load >= sds.max_load)
                goto out_balanced;
 
+       /*
+        * Don't pull any tasks if this group is already above the domain
+        * average load.
+        */
        sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
-
        if (sds.this_load >= sds.avg_load)
                goto out_balanced;
 
-       /*
-        * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative.
-        * And to check for busy balance use !idle_cpu instead of
-        * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE
-        * even when they are idle.
-        */
-       if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) {
-               if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
-                       goto out_balanced;
-       } else {
+       if (idle == CPU_IDLE) {
                /*
                 * This cpu is idle. If the busiest group load doesn't
                 * have more tasks than the number of available cpu's and
                 * there is no imbalance between this and busiest group
                 * wrt to idle cpu's, it is balanced.
                 */
-               if ((sds.this_idle_cpus  <= sds.busiest_idle_cpus + 1) &&
+               if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
                    sds.busiest_nr_running <= sds.busiest_group_weight)
                        goto out_balanced;
+       } else {
+               /*
+                * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
+                * imbalance_pct to be conservative.
+                */
+               if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
+                       goto out_balanced;
        }
 
 force_balance:
@@ -3193,7 +3249,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
 /* Working cpumask for load_balance and load_balance_newidle. */
 static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
 
-static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle,
+static int need_active_balance(struct sched_domain *sd, int idle,
                               int busiest_cpu, int this_cpu)
 {
        if (idle == CPU_NEWLY_IDLE) {
@@ -3225,10 +3281,6 @@ static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle,
                 * move_tasks() will succeed.  ld_moved will be true and this
                 * active balance code will not be triggered.
                 */
-               if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
-                   !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-                       return 0;
-
                if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
                        return 0;
        }
@@ -3246,7 +3298,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
                        struct sched_domain *sd, enum cpu_idle_type idle,
                        int *balance)
 {
-       int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
+       int ld_moved, all_pinned = 0, active_balance = 0;
        struct sched_group *group;
        unsigned long imbalance;
        struct rq *busiest;
@@ -3255,20 +3307,10 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 
        cpumask_copy(cpus, cpu_active_mask);
 
-       /*
-        * When power savings policy is enabled for the parent domain, idle
-        * sibling can pick up load irrespective of busy siblings. In this case,
-        * let the state of idle sibling percolate up as CPU_IDLE, instead of
-        * portraying it as CPU_NOT_IDLE.
-        */
-       if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
-           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-               sd_idle = 1;
-
        schedstat_inc(sd, lb_count[idle]);
 
 redo:
-       group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
+       group = find_busiest_group(sd, this_cpu, &imbalance, idle,
                                   cpus, balance);
 
        if (*balance == 0)
@@ -3330,8 +3372,7 @@ redo:
                if (idle != CPU_NEWLY_IDLE)
                        sd->nr_balance_failed++;
 
-               if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest),
-                                       this_cpu)) {
+               if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
                        raw_spin_lock_irqsave(&busiest->lock, flags);
 
                        /* don't kick the active_load_balance_cpu_stop,
@@ -3386,10 +3427,6 @@ redo:
                        sd->balance_interval *= 2;
        }
 
-       if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
-           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-               ld_moved = -1;
-
        goto out;
 
 out_balanced:
@@ -3403,11 +3440,7 @@ out_one_pinned:
                        (sd->balance_interval < sd->max_interval))
                sd->balance_interval *= 2;
 
-       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
-           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-               ld_moved = -1;
-       else
-               ld_moved = 0;
+       ld_moved = 0;
 out:
        return ld_moved;
 }
@@ -3831,8 +3864,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
                        if (load_balance(cpu, rq, sd, idle, &balance)) {
                                /*
                                 * We've pulled tasks over so either we're no
-                                * longer idle, or one of our SMT siblings is
-                                * not idle.
+                                * longer idle.
                                 */
                                idle = CPU_NOT_IDLE;
                        }
@@ -4079,33 +4111,62 @@ static void task_fork_fair(struct task_struct *p)
  * Priority of the task has changed. Check to see if we preempt
  * the current task.
  */
-static void prio_changed_fair(struct rq *rq, struct task_struct *p,
-                             int oldprio, int running)
+static void
+prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
 {
+       if (!p->se.on_rq)
+               return;
+
        /*
         * Reschedule if we are currently running on this runqueue and
         * our priority decreased, or if we are not currently running on
         * this runqueue and our priority is higher than the current's
         */
-       if (running) {
+       if (rq->curr == p) {
                if (p->prio > oldprio)
                        resched_task(rq->curr);
        } else
                check_preempt_curr(rq, p, 0);
 }
 
+static void switched_from_fair(struct rq *rq, struct task_struct *p)
+{
+       struct sched_entity *se = &p->se;
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+       /*
+        * Ensure the task's vruntime is normalized, so that when its
+        * switched back to the fair class the enqueue_entity(.flags=0) will
+        * do the right thing.
+        *
+        * If it was on_rq, then the dequeue_entity(.flags=0) will already
+        * have normalized the vruntime, if it was !on_rq, then only when
+        * the task is sleeping will it still have non-normalized vruntime.
+        */
+       if (!se->on_rq && p->state != TASK_RUNNING) {
+               /*
+                * Fix up our vruntime so that the current sleep doesn't
+                * cause 'unlimited' sleep bonus.
+                */
+               place_entity(cfs_rq, se, 0);
+               se->vruntime -= cfs_rq->min_vruntime;
+       }
+}
+
 /*
  * We switched to the sched_fair class.
  */
-static void switched_to_fair(struct rq *rq, struct task_struct *p,
-                            int running)
+static void switched_to_fair(struct rq *rq, struct task_struct *p)
 {
+       if (!p->se.on_rq)
+               return;
+
        /*
         * We were most likely switched from sched_rt, so
         * kick off the schedule if running, otherwise just see
         * if we can still preempt the current task.
         */
-       if (running)
+       if (rq->curr == p)
                resched_task(rq->curr);
        else
                check_preempt_curr(rq, p, 0);
@@ -4171,6 +4232,7 @@ static const struct sched_class fair_sched_class = {
        .enqueue_task           = enqueue_task_fair,
        .dequeue_task           = dequeue_task_fair,
        .yield_task             = yield_task_fair,
+       .yield_to_task          = yield_to_task_fair,
 
        .check_preempt_curr     = check_preempt_wakeup,
 
@@ -4191,6 +4253,7 @@ static const struct sched_class fair_sched_class = {
        .task_fork              = task_fork_fair,
 
        .prio_changed           = prio_changed_fair,
+       .switched_from          = switched_from_fair,
        .switched_to            = switched_to_fair,
 
        .get_rr_interval        = get_rr_interval_fair,
index 9fa0f402c87c2aa2bf8be7f404c6cfc27b64a865..c82f26c1b7c358b6c06fc35eddc079c6aacf31bb 100644 (file)
@@ -52,31 +52,15 @@ static void set_curr_task_idle(struct rq *rq)
 {
 }
 
-static void switched_to_idle(struct rq *rq, struct task_struct *p,
-                            int running)
+static void switched_to_idle(struct rq *rq, struct task_struct *p)
 {
-       /* Can this actually happen?? */
-       if (running)
-               resched_task(rq->curr);
-       else
-               check_preempt_curr(rq, p, 0);
+       BUG();
 }
 
-static void prio_changed_idle(struct rq *rq, struct task_struct *p,
-                             int oldprio, int running)
+static void
+prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
 {
-       /* This can happen for hot plug CPUS */
-
-       /*
-        * Reschedule if we are currently running on this runqueue and
-        * our priority decreased, or if we are not currently running on
-        * this runqueue and our priority is higher than the current's
-        */
-       if (running) {
-               if (p->prio > oldprio)
-                       resched_task(rq->curr);
-       } else
-               check_preempt_curr(rq, p, 0);
+       BUG();
 }
 
 static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
index ad6267714c840b2ee53154faaece04b2f2caee8a..db308cb08b75051ab459c61efea28f52c736ea7b 100644 (file)
@@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 
 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
-       int this_cpu = smp_processor_id();
        struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
        struct sched_rt_entity *rt_se;
 
-       rt_se = rt_rq->tg->rt_se[this_cpu];
+       int cpu = cpu_of(rq_of_rt_rq(rt_rq));
+
+       rt_se = rt_rq->tg->rt_se[cpu];
 
        if (rt_rq->rt_nr_running) {
                if (rt_se && !on_rt_rq(rt_se))
@@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 
 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
-       int this_cpu = smp_processor_id();
        struct sched_rt_entity *rt_se;
+       int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 
-       rt_se = rt_rq->tg->rt_se[this_cpu];
+       rt_se = rt_rq->tg->rt_se[cpu];
 
        if (rt_se && on_rt_rq(rt_se))
                dequeue_rt_entity(rt_se);
@@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
                        if (rt_rq->rt_time || rt_rq->rt_nr_running)
                                idle = 0;
                        raw_spin_unlock(&rt_rq->rt_runtime_lock);
-               } else if (rt_rq->rt_nr_running)
+               } else if (rt_rq->rt_nr_running) {
                        idle = 0;
+                       if (!rt_rq_throttled(rt_rq))
+                               enqueue = 1;
+               }
 
                if (enqueue)
                        sched_rt_rq_enqueue(rt_rq);
@@ -1595,8 +1599,7 @@ static void rq_offline_rt(struct rq *rq)
  * When switch from the rt queue, we bring ourselves to a position
  * that we might want to pull RT tasks from other runqueues.
  */
-static void switched_from_rt(struct rq *rq, struct task_struct *p,
-                          int running)
+static void switched_from_rt(struct rq *rq, struct task_struct *p)
 {
        /*
         * If there are other RT tasks then we will reschedule
@@ -1605,7 +1608,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p,
         * we may need to handle the pulling of RT tasks
         * now.
         */
-       if (!rq->rt.rt_nr_running)
+       if (p->se.on_rq && !rq->rt.rt_nr_running)
                pull_rt_task(rq);
 }
 
@@ -1624,8 +1627,7 @@ static inline void init_sched_rt_class(void)
  * with RT tasks. In this case we try to push them off to
  * other runqueues.
  */
-static void switched_to_rt(struct rq *rq, struct task_struct *p,
-                          int running)
+static void switched_to_rt(struct rq *rq, struct task_struct *p)
 {
        int check_resched = 1;
 
@@ -1636,7 +1638,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
         * If that current running task is also an RT task
         * then see if we can move to another run queue.
         */
-       if (!running) {
+       if (p->se.on_rq && rq->curr != p) {
 #ifdef CONFIG_SMP
                if (rq->rt.overloaded && push_rt_task(rq) &&
                    /* Don't resched if we changed runqueues */
@@ -1652,10 +1654,13 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
  * Priority of the task has changed. This may cause
  * us to initiate a push or pull.
  */
-static void prio_changed_rt(struct rq *rq, struct task_struct *p,
-                           int oldprio, int running)
+static void
+prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
 {
-       if (running) {
+       if (!p->se.on_rq)
+               return;
+
+       if (rq->curr == p) {
 #ifdef CONFIG_SMP
                /*
                 * If our priority decreases while running, we
index 2bf6b47058c19f79a326a6b4f82d4baf7950cb1a..84ec9bcf82d92758021f6b398bceee83bb0d0d0c 100644 (file)
@@ -59,14 +59,13 @@ static void set_curr_task_stop(struct rq *rq)
 {
 }
 
-static void switched_to_stop(struct rq *rq, struct task_struct *p,
-                            int running)
+static void switched_to_stop(struct rq *rq, struct task_struct *p)
 {
        BUG(); /* its impossible to change to this class */
 }
 
-static void prio_changed_stop(struct rq *rq, struct task_struct *p,
-                             int oldprio, int running)
+static void
+prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
 {
        BUG(); /* how!?, what priority? */
 }
index 68eb5efec388759b95fa9842576b4f0448fbd195..56e5dec837f05bd28f90221e2f0b666af23378c9 100644 (file)
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_stat);
 
 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 
-static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
 char *softirq_to_name[NR_SOFTIRQS] = {
        "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
@@ -311,9 +311,21 @@ void irq_enter(void)
 }
 
 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
-# define invoke_softirq()      __do_softirq()
+static inline void invoke_softirq(void)
+{
+       if (!force_irqthreads)
+               __do_softirq();
+       else
+               wakeup_softirqd();
+}
 #else
-# define invoke_softirq()      do_softirq()
+static inline void invoke_softirq(void)
+{
+       if (!force_irqthreads)
+               do_softirq();
+       else
+               wakeup_softirqd();
+}
 #endif
 
 /*
@@ -721,7 +733,6 @@ static int run_ksoftirqd(void * __bind_cpu)
 {
        set_current_state(TASK_INTERRUPTIBLE);
 
-       current->flags |= PF_KSOFTIRQD;
        while (!kthread_should_stop()) {
                preempt_disable();
                if (!local_softirq_pending()) {
@@ -738,7 +749,10 @@ static int run_ksoftirqd(void * __bind_cpu)
                           don't process */
                        if (cpu_is_offline((long)__bind_cpu))
                                goto wait_to_die;
-                       do_softirq();
+                       local_irq_disable();
+                       if (local_softirq_pending())
+                               __do_softirq();
+                       local_irq_enable();
                        preempt_enable_no_resched();
                        cond_resched();
                        preempt_disable();
index c782fe9924c79f052e1b81b7d12bdf53f9991a69..25cc41cd8f3314c75becbf6a668445583bf98582 100644 (file)
@@ -186,3 +186,8 @@ cond_syscall(sys_perf_event_open);
 /* fanotify! */
 cond_syscall(sys_fanotify_init);
 cond_syscall(sys_fanotify_mark);
+
+/* open by handle */
+cond_syscall(sys_name_to_handle_at);
+cond_syscall(sys_open_by_handle_at);
+cond_syscall(compat_sys_open_by_handle_at);
index 0f1bd83db98523333b9fabde37d200512b20b77e..51054fea5d999877a5865c71d1da34d23bbce70d 100644 (file)
@@ -194,9 +194,9 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
 static struct ctl_table root_table[];
 static struct ctl_table_root sysctl_table_root;
 static struct ctl_table_header root_table_header = {
-       .count = 1,
+       {{.count = 1,
        .ctl_table = root_table,
-       .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),
+       .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}},
        .root = &sysctl_table_root,
        .set = &sysctl_table_root.default_set,
 };
@@ -361,20 +361,13 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = sched_rt_handler,
        },
-       {
-               .procname       = "sched_compat_yield",
-               .data           = &sysctl_sched_compat_yield,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
 #ifdef CONFIG_SCHED_AUTOGROUP
        {
                .procname       = "sched_autogroup_enabled",
                .data           = &sysctl_sched_autogroup_enabled,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = &zero,
                .extra2         = &one,
        },
@@ -948,7 +941,7 @@ static struct ctl_table kern_table[] = {
                .data           = &sysctl_perf_event_sample_rate,
                .maxlen         = sizeof(sysctl_perf_event_sample_rate),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = perf_proc_update_handler,
        },
 #endif
 #ifdef CONFIG_KMEMCHECK
@@ -1567,11 +1560,16 @@ void sysctl_head_get(struct ctl_table_header *head)
        spin_unlock(&sysctl_lock);
 }
 
+static void free_head(struct rcu_head *rcu)
+{
+       kfree(container_of(rcu, struct ctl_table_header, rcu));
+}
+
 void sysctl_head_put(struct ctl_table_header *head)
 {
        spin_lock(&sysctl_lock);
        if (!--head->count)
-               kfree(head);
+               call_rcu(&head->rcu, free_head);
        spin_unlock(&sysctl_lock);
 }
 
@@ -1948,10 +1946,10 @@ void unregister_sysctl_table(struct ctl_table_header * header)
        start_unregistering(header);
        if (!--header->parent->count) {
                WARN_ON(1);
-               kfree(header->parent);
+               call_rcu(&header->parent->rcu, free_head);
        }
        if (!--header->count)
-               kfree(header);
+               call_rcu(&header->rcu, free_head);
        spin_unlock(&sysctl_lock);
 }
 
index b875bedf7c9abd2efd068cd67a84a4f359356cc9..3b8e028b96014a088b6227859b9163e8bceddb5a 100644 (file)
@@ -1321,13 +1321,11 @@ static ssize_t binary_sysctl(const int *name, int nlen,
        void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
 {
        const struct bin_table *table = NULL;
-       struct nameidata nd;
        struct vfsmount *mnt;
        struct file *file;
        ssize_t result;
        char *pathname;
        int flags;
-       int acc_mode;
 
        pathname = sysctl_getname(name, nlen, &table);
        result = PTR_ERR(pathname);
@@ -1337,28 +1335,17 @@ static ssize_t binary_sysctl(const int *name, int nlen,
        /* How should the sysctl be accessed? */
        if (oldval && oldlen && newval && newlen) {
                flags = O_RDWR;
-               acc_mode = MAY_READ | MAY_WRITE;
        } else if (newval && newlen) {
                flags = O_WRONLY;
-               acc_mode = MAY_WRITE;
        } else if (oldval && oldlen) {
                flags = O_RDONLY;
-               acc_mode = MAY_READ;
        } else {
                result = 0;
                goto out_putname;
        }
 
        mnt = current->nsproxy->pid_ns->proc_mnt;
-       result = vfs_path_lookup(mnt->mnt_root, mnt, pathname, 0, &nd);
-       if (result)
-               goto out_putname;
-
-       result = may_open(&nd.path, acc_mode, flags);
-       if (result)
-               goto out_putpath;
-
-       file = dentry_open(nd.path.dentry, nd.path.mnt, flags, current_cred());
+       file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
        result = PTR_ERR(file);
        if (IS_ERR(file))
                goto out_putname;
@@ -1370,10 +1357,6 @@ out_putname:
        putname(pathname);
 out:
        return result;
-
-out_putpath:
-       path_put(&nd.path);
-       goto out_putname;
 }
 
 
index 32174359576fa075a520de56ab30aacc51af08ef..8e8dc6d705c93126c8b971e6eb88529b3924f678 100644 (file)
@@ -150,7 +150,7 @@ static inline void warp_clock(void)
  * various programs will get confused when the clock gets warped.
  */
 
-int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
+int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
 {
        static int firsttime = 1;
        int error = 0;
@@ -645,7 +645,7 @@ u64 nsec_to_clock_t(u64 x)
 }
 
 /**
- * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
+ * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
  *
  * @n: nsecs in u64
  *
@@ -657,7 +657,7 @@ u64 nsec_to_clock_t(u64 x)
  *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
  *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
  */
-unsigned long nsecs_to_jiffies(u64 n)
+u64 nsecs_to_jiffies64(u64 n)
 {
 #if (NSEC_PER_SEC % HZ) == 0
        /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
@@ -674,22 +674,23 @@ unsigned long nsecs_to_jiffies(u64 n)
 #endif
 }
 
-#if (BITS_PER_LONG < 64)
-u64 get_jiffies_64(void)
+/**
+ * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
+ *
+ * @n: nsecs in u64
+ *
+ * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
+ * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
+ * for scheduler, not for use in device drivers to calculate timeout value.
+ *
+ * note:
+ *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
+ *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
+ */
+unsigned long nsecs_to_jiffies(u64 n)
 {
-       unsigned long seq;
-       u64 ret;
-
-       do {
-               seq = read_seqbegin(&xtime_lock);
-               ret = jiffies_64;
-       } while (read_seqretry(&xtime_lock, seq));
-       return ret;
+       return (unsigned long)nsecs_to_jiffies64(n);
 }
-EXPORT_SYMBOL(get_jiffies_64);
-#endif
-
-EXPORT_SYMBOL(jiffies);
 
 /*
  * Add two timespec values and do a safety check for overflow.
index ee266620b06ca336a9246489ff9d01d1dd7cd91d..b0425991e9acbc5278379171a7c759c7e773b2b0 100644 (file)
@@ -1,4 +1,5 @@
-obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o timeconv.o
+obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
+obj-y += timeconv.o posix-clock.o
 
 obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD)                += clockevents.o
 obj-$(CONFIG_GENERIC_CLOCKEVENTS)              += tick-common.o
index d7395fdfb9f38f931761e29d14534143dbe3799c..0d74b9ba90c84e0474b28a71ea9d43b76e4263f9 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/notifier.h>
 #include <linux/smp.h>
 #include <linux/sysdev.h>
-#include <linux/tick.h>
 
 #include "tick-internal.h"
 
index 5404a84569094f3cef796e903ad6a4cb92a8b3e8..b2fa506667c0ab0091365052b5411b3d8335ab8c 100644 (file)
 ************************************************************************/
 #include <linux/clocksource.h>
 #include <linux/jiffies.h>
+#include <linux/module.h>
 #include <linux/init.h>
 
+#include "tick-internal.h"
+
 /* The Jiffies based clocksource is the lowest common
  * denominator clock source which should function on
  * all systems. It has the same coarse resolution as
@@ -64,6 +67,23 @@ struct clocksource clocksource_jiffies = {
        .shift          = JIFFIES_SHIFT,
 };
 
+#if (BITS_PER_LONG < 64)
+u64 get_jiffies_64(void)
+{
+       unsigned long seq;
+       u64 ret;
+
+       do {
+               seq = read_seqbegin(&xtime_lock);
+               ret = jiffies_64;
+       } while (read_seqretry(&xtime_lock, seq));
+       return ret;
+}
+EXPORT_SYMBOL(get_jiffies_64);
+#endif
+
+EXPORT_SYMBOL(jiffies);
+
 static int __init init_jiffies_clocksource(void)
 {
        return clocksource_register(&clocksource_jiffies);
index 5c00242fa921cca007ee2f366493903e246d4769..5f1bb8e2008fddb3de06ccc9c57eb2dc9fbc11f2 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 
+#include "tick-internal.h"
+
 /*
  * NTP timekeeping variables:
  */
@@ -646,6 +648,17 @@ int do_adjtimex(struct timex *txc)
                        hrtimer_cancel(&leap_timer);
        }
 
+       if (txc->modes & ADJ_SETOFFSET) {
+               struct timespec delta;
+               delta.tv_sec  = txc->time.tv_sec;
+               delta.tv_nsec = txc->time.tv_usec;
+               if (!(txc->modes & ADJ_NANO))
+                       delta.tv_nsec *= 1000;
+               result = timekeeping_inject_offset(&delta);
+               if (result)
+                       return result;
+       }
+
        getnstimeofday(&ts);
 
        write_seqlock_irq(&xtime_lock);
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
new file mode 100644 (file)
index 0000000..25028dd
--- /dev/null
@@ -0,0 +1,451 @@
+/*
+ * posix-clock.c - support for dynamic clock devices
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/mutex.h>
+#include <linux/posix-clock.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+static void delete_clock(struct kref *kref);
+
+/*
+ * Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
+ */
+static struct posix_clock *get_posix_clock(struct file *fp)
+{
+       struct posix_clock *clk = fp->private_data;
+
+       mutex_lock(&clk->mutex);
+
+       if (!clk->zombie)
+               return clk;
+
+       mutex_unlock(&clk->mutex);
+
+       return NULL;
+}
+
+static void put_posix_clock(struct posix_clock *clk)
+{
+       mutex_unlock(&clk->mutex);
+}
+
+static ssize_t posix_clock_read(struct file *fp, char __user *buf,
+                               size_t count, loff_t *ppos)
+{
+       struct posix_clock *clk = get_posix_clock(fp);
+       int err = -EINVAL;
+
+       if (!clk)
+               return -ENODEV;
+
+       if (clk->ops.read)
+               err = clk->ops.read(clk, fp->f_flags, buf, count);
+
+       put_posix_clock(clk);
+
+       return err;
+}
+
+static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
+{
+       struct posix_clock *clk = get_posix_clock(fp);
+       int result = 0;
+
+       if (!clk)
+               return -ENODEV;
+
+       if (clk->ops.poll)
+               result = clk->ops.poll(clk, fp, wait);
+
+       put_posix_clock(clk);
+
+       return result;
+}
+
+static int posix_clock_fasync(int fd, struct file *fp, int on)
+{
+       struct posix_clock *clk = get_posix_clock(fp);
+       int err = 0;
+
+       if (!clk)
+               return -ENODEV;
+
+       if (clk->ops.fasync)
+               err = clk->ops.fasync(clk, fd, fp, on);
+
+       put_posix_clock(clk);
+
+       return err;
+}
+
+static int posix_clock_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+       struct posix_clock *clk = get_posix_clock(fp);
+       int err = -ENODEV;
+
+       if (!clk)
+               return -ENODEV;
+
+       if (clk->ops.mmap)
+               err = clk->ops.mmap(clk, vma);
+
+       put_posix_clock(clk);
+
+       return err;
+}
+
+static long posix_clock_ioctl(struct file *fp,
+                             unsigned int cmd, unsigned long arg)
+{
+       struct posix_clock *clk = get_posix_clock(fp);
+       int err = -ENOTTY;
+
+       if (!clk)
+               return -ENODEV;
+
+       if (clk->ops.ioctl)
+               err = clk->ops.ioctl(clk, cmd, arg);
+
+       put_posix_clock(clk);
+
+       return err;
+}
+
+#ifdef CONFIG_COMPAT
+static long posix_clock_compat_ioctl(struct file *fp,
+                                    unsigned int cmd, unsigned long arg)
+{
+       struct posix_clock *clk = get_posix_clock(fp);
+       int err = -ENOTTY;
+
+       if (!clk)
+               return -ENODEV;
+
+       if (clk->ops.ioctl)
+               err = clk->ops.ioctl(clk, cmd, arg);
+
+       put_posix_clock(clk);
+
+       return err;
+}
+#endif
+
+static int posix_clock_open(struct inode *inode, struct file *fp)
+{
+       int err;
+       struct posix_clock *clk =
+               container_of(inode->i_cdev, struct posix_clock, cdev);
+
+       mutex_lock(&clk->mutex);
+
+       if (clk->zombie) {
+               err = -ENODEV;
+               goto out;
+       }
+       if (clk->ops.open)
+               err = clk->ops.open(clk, fp->f_mode);
+       else
+               err = 0;
+
+       if (!err) {
+               kref_get(&clk->kref);
+               fp->private_data = clk;
+       }
+out:
+       mutex_unlock(&clk->mutex);
+       return err;
+}
+
+static int posix_clock_release(struct inode *inode, struct file *fp)
+{
+       struct posix_clock *clk = fp->private_data;
+       int err = 0;
+
+       if (clk->ops.release)
+               err = clk->ops.release(clk);
+
+       kref_put(&clk->kref, delete_clock);
+
+       fp->private_data = NULL;
+
+       return err;
+}
+
+static const struct file_operations posix_clock_file_operations = {
+       .owner          = THIS_MODULE,
+       .llseek         = no_llseek,
+       .read           = posix_clock_read,
+       .poll           = posix_clock_poll,
+       .unlocked_ioctl = posix_clock_ioctl,
+       .open           = posix_clock_open,
+       .release        = posix_clock_release,
+       .fasync         = posix_clock_fasync,
+       .mmap           = posix_clock_mmap,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = posix_clock_compat_ioctl,
+#endif
+};
+
+int posix_clock_register(struct posix_clock *clk, dev_t devid)
+{
+       int err;
+
+       kref_init(&clk->kref);
+       mutex_init(&clk->mutex);
+
+       cdev_init(&clk->cdev, &posix_clock_file_operations);
+       clk->cdev.owner = clk->ops.owner;
+       err = cdev_add(&clk->cdev, devid, 1);
+       if (err)
+               goto no_cdev;
+
+       return err;
+no_cdev:
+       mutex_destroy(&clk->mutex);
+       return err;
+}
+EXPORT_SYMBOL_GPL(posix_clock_register);
+
+static void delete_clock(struct kref *kref)
+{
+       struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
+       mutex_destroy(&clk->mutex);
+       if (clk->release)
+               clk->release(clk);
+}
+
+void posix_clock_unregister(struct posix_clock *clk)
+{
+       cdev_del(&clk->cdev);
+
+       mutex_lock(&clk->mutex);
+       clk->zombie = true;
+       mutex_unlock(&clk->mutex);
+
+       kref_put(&clk->kref, delete_clock);
+}
+EXPORT_SYMBOL_GPL(posix_clock_unregister);
+
+struct posix_clock_desc {
+       struct file *fp;
+       struct posix_clock *clk;
+};
+
+static int get_clock_desc(const clockid_t id, struct posix_clock_desc *cd)
+{
+       struct file *fp = fget(CLOCKID_TO_FD(id));
+       int err = -EINVAL;
+
+       if (!fp)
+               return err;
+
+       if (fp->f_op->open != posix_clock_open || !fp->private_data)
+               goto out;
+
+       cd->fp = fp;
+       cd->clk = get_posix_clock(fp);
+
+       err = cd->clk ? 0 : -ENODEV;
+out:
+       if (err)
+               fput(fp);
+       return err;
+}
+
+static void put_clock_desc(struct posix_clock_desc *cd)
+{
+       put_posix_clock(cd->clk);
+       fput(cd->fp);
+}
+
+static int pc_clock_adjtime(clockid_t id, struct timex *tx)
+{
+       struct posix_clock_desc cd;
+       int err;
+
+       err = get_clock_desc(id, &cd);
+       if (err)
+               return err;
+
+       if ((cd.fp->f_mode & FMODE_WRITE) == 0) {
+               err = -EACCES;
+               goto out;
+       }
+
+       if (cd.clk->ops.clock_adjtime)
+               err = cd.clk->ops.clock_adjtime(cd.clk, tx);
+       else
+               err = -EOPNOTSUPP;
+out:
+       put_clock_desc(&cd);
+
+       return err;
+}
+
+static int pc_clock_gettime(clockid_t id, struct timespec *ts)
+{
+       struct posix_clock_desc cd;
+       int err;
+
+       err = get_clock_desc(id, &cd);
+       if (err)
+               return err;
+
+       if (cd.clk->ops.clock_gettime)
+               err = cd.clk->ops.clock_gettime(cd.clk, ts);
+       else
+               err = -EOPNOTSUPP;
+
+       put_clock_desc(&cd);
+
+       return err;
+}
+
+static int pc_clock_getres(clockid_t id, struct timespec *ts)
+{
+       struct posix_clock_desc cd;
+       int err;
+
+       err = get_clock_desc(id, &cd);
+       if (err)
+               return err;
+
+       if (cd.clk->ops.clock_getres)
+               err = cd.clk->ops.clock_getres(cd.clk, ts);
+       else
+               err = -EOPNOTSUPP;
+
+       put_clock_desc(&cd);
+
+       return err;
+}
+
+static int pc_clock_settime(clockid_t id, const struct timespec *ts)
+{
+       struct posix_clock_desc cd;
+       int err;
+
+       err = get_clock_desc(id, &cd);
+       if (err)
+               return err;
+
+       if ((cd.fp->f_mode & FMODE_WRITE) == 0) {
+               err = -EACCES;
+               goto out;
+       }
+
+       if (cd.clk->ops.clock_settime)
+               err = cd.clk->ops.clock_settime(cd.clk, ts);
+       else
+               err = -EOPNOTSUPP;
+out:
+       put_clock_desc(&cd);
+
+       return err;
+}
+
+static int pc_timer_create(struct k_itimer *kit)
+{
+       clockid_t id = kit->it_clock;
+       struct posix_clock_desc cd;
+       int err;
+
+       err = get_clock_desc(id, &cd);
+       if (err)
+               return err;
+
+       if (cd.clk->ops.timer_create)
+               err = cd.clk->ops.timer_create(cd.clk, kit);
+       else
+               err = -EOPNOTSUPP;
+
+       put_clock_desc(&cd);
+
+       return err;
+}
+
+static int pc_timer_delete(struct k_itimer *kit)
+{
+       clockid_t id = kit->it_clock;
+       struct posix_clock_desc cd;
+       int err;
+
+       err = get_clock_desc(id, &cd);
+       if (err)
+               return err;
+
+       if (cd.clk->ops.timer_delete)
+               err = cd.clk->ops.timer_delete(cd.clk, kit);
+       else
+               err = -EOPNOTSUPP;
+
+       put_clock_desc(&cd);
+
+       return err;
+}
+
+static void pc_timer_gettime(struct k_itimer *kit, struct itimerspec *ts)
+{
+       clockid_t id = kit->it_clock;
+       struct posix_clock_desc cd;
+
+       if (get_clock_desc(id, &cd))
+               return;
+
+       if (cd.clk->ops.timer_gettime)
+               cd.clk->ops.timer_gettime(cd.clk, kit, ts);
+
+       put_clock_desc(&cd);
+}
+
+static int pc_timer_settime(struct k_itimer *kit, int flags,
+                           struct itimerspec *ts, struct itimerspec *old)
+{
+       clockid_t id = kit->it_clock;
+       struct posix_clock_desc cd;
+       int err;
+
+       err = get_clock_desc(id, &cd);
+       if (err)
+               return err;
+
+       if (cd.clk->ops.timer_settime)
+               err = cd.clk->ops.timer_settime(cd.clk, kit, flags, ts, old);
+       else
+               err = -EOPNOTSUPP;
+
+       put_clock_desc(&cd);
+
+       return err;
+}
+
+struct k_clock clock_posix_dynamic = {
+       .clock_getres   = pc_clock_getres,
+       .clock_set      = pc_clock_settime,
+       .clock_get      = pc_clock_gettime,
+       .clock_adj      = pc_clock_adjtime,
+       .timer_create   = pc_timer_create,
+       .timer_set      = pc_timer_settime,
+       .timer_del      = pc_timer_delete,
+       .timer_get      = pc_timer_gettime,
+};
index 48b2761b5668119bc932b6278289bf8a481be050..da800ffa810c2f248ebcb6e8d1aa2395343bbd0a 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
-#include <linux/tick.h>
 
 #include "tick-internal.h"
 
@@ -600,4 +599,14 @@ int tick_broadcast_oneshot_active(void)
        return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
 }
 
+/*
+ * Check whether the broadcast device supports oneshot.
+ */
+bool tick_broadcast_oneshot_available(void)
+{
+       struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+       return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
+}
+
 #endif
index 051bc80a0c435cf47a8dfb8a0f5d2d49d20e188c..119528de82359c2ff913c02584aeb31a9ae7265b 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
-#include <linux/tick.h>
 
 #include <asm/irq_regs.h>
 
@@ -51,7 +50,11 @@ int tick_is_oneshot_available(void)
 {
        struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
-       return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
+       if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
+               return 0;
+       if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
+               return 1;
+       return tick_broadcast_oneshot_available();
 }
 
 /*
index 290eefbc1f608ec4d9c957fb5798eb53490f9e7c..1009b06d6f897ac2acab87e70b61c8d00d6542b5 100644 (file)
@@ -1,6 +1,10 @@
 /*
  * tick internal variable and functions used by low/high res code
  */
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD
 
 #define TICK_DO_TIMER_NONE     -1
 #define TICK_DO_TIMER_BOOT     -2
@@ -36,6 +40,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
 extern int tick_broadcast_oneshot_active(void);
 extern void tick_check_oneshot_broadcast(int cpu);
+bool tick_broadcast_oneshot_available(void);
 # else /* BROADCAST */
 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
@@ -46,6 +51,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
 static inline void tick_check_oneshot_broadcast(int cpu) { }
+static inline bool tick_broadcast_oneshot_available(void) { return true; }
 # endif /* !BROADCAST */
 
 #else /* !ONESHOT */
@@ -76,6 +82,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
        return 0;
 }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline bool tick_broadcast_oneshot_available(void) { return false; }
 #endif /* !TICK_ONESHOT */
 
 /*
@@ -132,3 +139,8 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
 {
        return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
 }
+
+#endif
+
+extern void do_timer(unsigned long ticks);
+extern seqlock_t xtime_lock;
index 5cbc101f908b8483938c0153fc1ac023bcd1c784..2d04411a5f05882ce41097e0ab607ffef38d318a 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
-#include <linux/tick.h>
 
 #include "tick-internal.h"
 
index c55ea2433471c2952bd65283cd72eb165d720bbe..d5097c44b407e25a1acae0f5d85c520eda555ae5 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
-#include <linux/tick.h>
 #include <linux/module.h>
 
 #include <asm/irq_regs.h>
index d27c7562902cbe3aa2472292bd813c1eee8c1ecb..3bd7e3d5c6325a82563f03b8124b847ae6432623 100644 (file)
@@ -353,7 +353,7 @@ EXPORT_SYMBOL(do_gettimeofday);
  *
  * Sets the time of day to the new time and update NTP and notify hrtimers
  */
-int do_settimeofday(struct timespec *tv)
+int do_settimeofday(const struct timespec *tv)
 {
        struct timespec ts_delta;
        unsigned long flags;
@@ -387,6 +387,42 @@ int do_settimeofday(struct timespec *tv)
 
 EXPORT_SYMBOL(do_settimeofday);
 
+
+/**
+ * timekeeping_inject_offset - Adds or subtracts from the current time.
+ * @tv:                pointer to the timespec variable containing the offset
+ *
+ * Adds or subtracts an offset value from the current time.
+ */
+int timekeeping_inject_offset(struct timespec *ts)
+{
+       unsigned long flags;
+
+       if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+               return -EINVAL;
+
+       write_seqlock_irqsave(&xtime_lock, flags);
+
+       timekeeping_forward_now();
+
+       xtime = timespec_add(xtime, *ts);
+       wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
+
+       timekeeper.ntp_error = 0;
+       ntp_clear();
+
+       update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+                               timekeeper.mult);
+
+       write_sequnlock_irqrestore(&xtime_lock, flags);
+
+       /* signal hrtimers about time change */
+       clock_was_set();
+
+       return 0;
+}
+EXPORT_SYMBOL(timekeeping_inject_offset);
+
 /**
  * change_clocksource - Swaps clocksources if a new one is available
  *
@@ -779,7 +815,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
  *
  * Called from the timer interrupt, must hold a write on xtime_lock.
  */
-void update_wall_time(void)
+static void update_wall_time(void)
 {
        struct clocksource *clock;
        cycle_t offset;
@@ -871,7 +907,7 @@ void update_wall_time(void)
  * getboottime - Return the real time of system boot.
  * @ts:                pointer to the timespec to be set
  *
- * Returns the time of day in a timespec.
+ * Returns the wall-time of boot in a timespec.
  *
  * This is based on the wall_to_monotonic offset and the total suspend
  * time. Calls to settimeofday will affect the value returned (which
@@ -889,6 +925,55 @@ void getboottime(struct timespec *ts)
 }
 EXPORT_SYMBOL_GPL(getboottime);
 
+
+/**
+ * get_monotonic_boottime - Returns monotonic time since boot
+ * @ts:                pointer to the timespec to be set
+ *
+ * Returns the monotonic time since boot in a timespec.
+ *
+ * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
+ * includes the time spent in suspend.
+ */
+void get_monotonic_boottime(struct timespec *ts)
+{
+       struct timespec tomono, sleep;
+       unsigned int seq;
+       s64 nsecs;
+
+       WARN_ON(timekeeping_suspended);
+
+       do {
+               seq = read_seqbegin(&xtime_lock);
+               *ts = xtime;
+               tomono = wall_to_monotonic;
+               sleep = total_sleep_time;
+               nsecs = timekeeping_get_ns();
+
+       } while (read_seqretry(&xtime_lock, seq));
+
+       set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
+                       ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
+}
+EXPORT_SYMBOL_GPL(get_monotonic_boottime);
+
+/**
+ * ktime_get_boottime - Returns monotonic time since boot in a ktime
+ *
+ * Returns the monotonic time since boot in a ktime
+ *
+ * This is similar to CLOCK_MONTONIC/ktime_get, but also
+ * includes the time spent in suspend.
+ */
+ktime_t ktime_get_boottime(void)
+{
+       struct timespec ts;
+
+       get_monotonic_boottime(&ts);
+       return timespec_to_ktime(ts);
+}
+EXPORT_SYMBOL_GPL(ktime_get_boottime);
+
 /**
  * monotonic_to_bootbased - Convert the monotonic time to boot based.
  * @ts:                pointer to the timespec to be converted
@@ -910,11 +995,6 @@ struct timespec __current_kernel_time(void)
        return xtime;
 }
 
-struct timespec __get_wall_to_monotonic(void)
-{
-       return wall_to_monotonic;
-}
-
 struct timespec current_kernel_time(void)
 {
        struct timespec now;
@@ -946,3 +1026,48 @@ struct timespec get_monotonic_coarse(void)
                                now.tv_nsec + mono.tv_nsec);
        return now;
 }
+
+/*
+ * The 64-bit jiffies value is not atomic - you MUST NOT read it
+ * without sampling the sequence number in xtime_lock.
+ * jiffies is defined in the linker script...
+ */
+void do_timer(unsigned long ticks)
+{
+       jiffies_64 += ticks;
+       update_wall_time();
+       calc_global_load(ticks);
+}
+
+/**
+ * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
+ *    and sleep offsets.
+ * @xtim:      pointer to timespec to be set with xtime
+ * @wtom:      pointer to timespec to be set with wall_to_monotonic
+ * @sleep:     pointer to timespec to be set with time in suspend
+ */
+void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
+                               struct timespec *wtom, struct timespec *sleep)
+{
+       unsigned long seq;
+
+       do {
+               seq = read_seqbegin(&xtime_lock);
+               *xtim = xtime;
+               *wtom = wall_to_monotonic;
+               *sleep = total_sleep_time;
+       } while (read_seqretry(&xtime_lock, seq));
+}
+
+/**
+ * xtime_update() - advances the timekeeping infrastructure
+ * @ticks:     number of ticks, that have elapsed since the last call.
+ *
+ * Must be called with interrupts disabled.
+ */
+void xtime_update(unsigned long ticks)
+{
+       write_seqlock(&xtime_lock);
+       do_timer(ticks);
+       write_sequnlock(&xtime_lock);
+}
index d6459923d2452bd4c0b6c71f48c45ee8d8f505da..fd6198692b57b16e47132d1eb5183bafd98b050e 100644 (file)
@@ -404,6 +404,11 @@ static void timer_stats_account_timer(struct timer_list *timer) {}
 
 static struct debug_obj_descr timer_debug_descr;
 
+static void *timer_debug_hint(void *addr)
+{
+       return ((struct timer_list *) addr)->function;
+}
+
 /*
  * fixup_init is called when:
  * - an active object is initialized
@@ -477,6 +482,7 @@ static int timer_fixup_free(void *addr, enum debug_obj_state state)
 
 static struct debug_obj_descr timer_debug_descr = {
        .name           = "timer_list",
+       .debug_hint     = timer_debug_hint,
        .fixup_init     = timer_fixup_init,
        .fixup_activate = timer_fixup_activate,
        .fixup_free     = timer_fixup_free,
@@ -964,6 +970,25 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
  * add_timer_on(). Upon exit the timer is not queued and the handler is
  * not running on any CPU.
  *
+ * Note: You must not hold locks that are held in interrupt context
+ *   while calling this function. Even if the lock has nothing to do
+ *   with the timer in question.  Here's why:
+ *
+ *    CPU0                             CPU1
+ *    ----                             ----
+ *                                   <SOFTIRQ>
+ *                                   call_timer_fn();
+ *                                     base->running_timer = mytimer;
+ *  spin_lock_irq(somelock);
+ *                                     <IRQ>
+ *                                        spin_lock(somelock);
+ *  del_timer_sync(mytimer);
+ *   while (base->running_timer == mytimer);
+ *
+ * Now del_timer_sync() will never return and never release somelock.
+ * The interrupt on the other CPU is waiting to grab somelock but
+ * it has interrupted the softirq that CPU0 is waiting to finish.
+ *
  * The function returns whether it has deactivated a pending timer or not.
  */
 int del_timer_sync(struct timer_list *timer)
@@ -971,6 +996,10 @@ int del_timer_sync(struct timer_list *timer)
 #ifdef CONFIG_LOCKDEP
        unsigned long flags;
 
+       /*
+        * If lockdep gives a backtrace here, please reference
+        * the synchronization rules above.
+        */
        local_irq_save(flags);
        lock_map_acquire(&timer->lockdep_map);
        lock_map_release(&timer->lockdep_map);
@@ -1295,19 +1324,6 @@ void run_local_timers(void)
        raise_softirq(TIMER_SOFTIRQ);
 }
 
-/*
- * The 64-bit jiffies value is not atomic - you MUST NOT read it
- * without sampling the sequence number in xtime_lock.
- * jiffies is defined in the linker script...
- */
-
-void do_timer(unsigned long ticks)
-{
-       jiffies_64 += ticks;
-       update_wall_time();
-       calc_global_load(ticks);
-}
-
 #ifdef __ARCH_WANT_SYS_ALARM
 
 /*
index d95721f33702352008e2bf7946cf0bd1fc50cd9c..cbafed7d4f386c77816abb4ffe9d7141af29f446 100644 (file)
@@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
        rwbs[i] = '\0';
 }
 
-void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
-{
-       int rw = rq->cmd_flags & 0x03;
-       int bytes;
-
-       if (rq->cmd_flags & REQ_DISCARD)
-               rw |= REQ_DISCARD;
-
-       if (rq->cmd_flags & REQ_SECURE)
-               rw |= REQ_SECURE;
-
-       bytes = blk_rq_bytes(rq);
-
-       blk_fill_rwbs(rwbs, rw, bytes);
-}
-
 #endif /* CONFIG_EVENT_TRACING */
 
index f3dadae83883e89c7f48439531bfc6ac62c0ea99..888b611897d3737fddb0c890084cacfbe2a359b3 100644 (file)
@@ -3328,7 +3328,7 @@ static int start_graph_tracing(void)
        /* The cpu_boot init_task->ret_stack will never be freed */
        for_each_online_cpu(cpu) {
                if (!idle_task(cpu)->ret_stack)
-                       ftrace_graph_init_task(idle_task(cpu));
+                       ftrace_graph_init_idle_task(idle_task(cpu), cpu);
        }
 
        do {
@@ -3418,6 +3418,49 @@ void unregister_ftrace_graph(void)
        mutex_unlock(&ftrace_lock);
 }
 
+static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
+
+static void
+graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
+{
+       atomic_set(&t->tracing_graph_pause, 0);
+       atomic_set(&t->trace_overrun, 0);
+       t->ftrace_timestamp = 0;
+       /* make curr_ret_stack visable before we add the ret_stack */
+       smp_wmb();
+       t->ret_stack = ret_stack;
+}
+
+/*
+ * Allocate a return stack for the idle task. May be the first
+ * time through, or it may be done by CPU hotplug online.
+ */
+void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
+{
+       t->curr_ret_stack = -1;
+       /*
+        * The idle task has no parent, it either has its own
+        * stack or no stack at all.
+        */
+       if (t->ret_stack)
+               WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
+
+       if (ftrace_graph_active) {
+               struct ftrace_ret_stack *ret_stack;
+
+               ret_stack = per_cpu(idle_ret_stack, cpu);
+               if (!ret_stack) {
+                       ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
+                                           * sizeof(struct ftrace_ret_stack),
+                                           GFP_KERNEL);
+                       if (!ret_stack)
+                               return;
+                       per_cpu(idle_ret_stack, cpu) = ret_stack;
+               }
+               graph_init_task(t, ret_stack);
+       }
+}
+
 /* Allocate a return stack for newly created task */
 void ftrace_graph_init_task(struct task_struct *t)
 {
@@ -3433,12 +3476,7 @@ void ftrace_graph_init_task(struct task_struct *t)
                                GFP_KERNEL);
                if (!ret_stack)
                        return;
-               atomic_set(&t->tracing_graph_pause, 0);
-               atomic_set(&t->trace_overrun, 0);
-               t->ftrace_timestamp = 0;
-               /* make curr_ret_stack visable before we add the ret_stack */
-               smp_wmb();
-               t->ret_stack = ret_stack;
+               graph_init_task(t, ret_stack);
        }
 }
 
index bd1c35a4fbccf31c0531f0667545011f704c561f..db7b439d23ee90dbde74c82dbd84b492f9cb298f 100644 (file)
@@ -5,7 +5,6 @@
  */
 #include <linux/ring_buffer.h>
 #include <linux/trace_clock.h>
-#include <linux/ftrace_irq.h>
 #include <linux/spinlock.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
@@ -1429,6 +1428,17 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_resize);
 
+void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
+{
+       mutex_lock(&buffer->mutex);
+       if (val)
+               buffer->flags |= RB_FL_OVERWRITE;
+       else
+               buffer->flags &= ~RB_FL_OVERWRITE;
+       mutex_unlock(&buffer->mutex);
+}
+EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
+
 static inline void *
 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
 {
@@ -2162,11 +2172,19 @@ rb_reserve_next_event(struct ring_buffer *buffer,
        if (likely(ts >= cpu_buffer->write_stamp)) {
                delta = diff;
                if (unlikely(test_time_stamp(delta))) {
+                       int local_clock_stable = 1;
+#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+                       local_clock_stable = sched_clock_stable;
+#endif
                        WARN_ONCE(delta > (1ULL << 59),
-                                 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
+                                 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
                                  (unsigned long long)delta,
                                  (unsigned long long)ts,
-                                 (unsigned long long)cpu_buffer->write_stamp);
+                                 (unsigned long long)cpu_buffer->write_stamp,
+                                 local_clock_stable ? "" :
+                                 "If you just came from a suspend/resume,\n"
+                                 "please switch to the trace global clock:\n"
+                                 "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
                        add_timestamp = 1;
                }
        }
index dc53ecb8058919ed329b02c5644c7f23449cce7b..9541c27c1cf2a8da8e75cbed00d2c09596dabe38 100644 (file)
@@ -41,8 +41,6 @@
 #include "trace.h"
 #include "trace_output.h"
 
-#define TRACE_BUFFER_FLAGS     (RB_FL_OVERWRITE)
-
 /*
  * On boot up, the ring buffer is set to the minimum size, so that
  * we do not waste memory on systems that are not using tracing.
@@ -340,7 +338,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 /* trace_flags holds trace_options default values */
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
        TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
-       TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD;
+       TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
 
 static int trace_stop_count;
 static DEFINE_SPINLOCK(tracing_start_lock);
@@ -425,6 +423,7 @@ static const char *trace_options[] = {
        "sleep-time",
        "graph-time",
        "record-cmd",
+       "overwrite",
        NULL
 };
 
@@ -780,6 +779,11 @@ __acquires(kernel_lock)
                tracing_reset_online_cpus(tr);
 
                current_trace = type;
+
+               /* If we expanded the buffers, make sure the max is expanded too */
+               if (ring_buffer_expanded && type->use_max_tr)
+                       ring_buffer_resize(max_tr.buffer, trace_buf_size);
+
                /* the test is responsible for initializing and enabling */
                pr_info("Testing tracer %s: ", type->name);
                ret = type->selftest(type, tr);
@@ -792,6 +796,10 @@ __acquires(kernel_lock)
                /* Only reset on passing, to avoid touching corrupted buffers */
                tracing_reset_online_cpus(tr);
 
+               /* Shrink the max buffer again */
+               if (ring_buffer_expanded && type->use_max_tr)
+                       ring_buffer_resize(max_tr.buffer, 1);
+
                printk(KERN_CONT "PASSED\n");
        }
 #endif
@@ -1102,7 +1110,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
 
        entry->preempt_count            = pc & 0xff;
        entry->pid                      = (tsk) ? tsk->pid : 0;
-       entry->lock_depth               = (tsk) ? tsk->lock_depth : 0;
        entry->flags =
 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
                (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -1749,10 +1756,9 @@ static void print_lat_help_header(struct seq_file *m)
        seq_puts(m, "#                | / _----=> need-resched    \n");
        seq_puts(m, "#                || / _---=> hardirq/softirq \n");
        seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
-       seq_puts(m, "#                |||| /_--=> lock-depth       \n");
-       seq_puts(m, "#                |||||/     delay             \n");
-       seq_puts(m, "#  cmd     pid   |||||| time  |   caller      \n");
-       seq_puts(m, "#     \\   /      ||||||   \\   |   /           \n");
+       seq_puts(m, "#                |||| /     delay             \n");
+       seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
+       seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
 }
 
 static void print_func_help_header(struct seq_file *m)
@@ -2529,6 +2535,9 @@ static void set_tracer_flags(unsigned int mask, int enabled)
 
        if (mask == TRACE_ITER_RECORD_CMD)
                trace_event_enable_cmd_record(enabled);
+
+       if (mask == TRACE_ITER_OVERWRITE)
+               ring_buffer_change_overwrite(global_trace.buffer, enabled);
 }
 
 static ssize_t
@@ -2710,6 +2719,10 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
 
        mutex_lock(&trace_types_lock);
        if (tracer_enabled ^ val) {
+
+               /* Only need to warn if this is used to change the state */
+               WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
+
                if (val) {
                        tracer_enabled = 1;
                        if (current_trace->start)
@@ -4551,9 +4564,11 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
 __init static int tracer_alloc_buffers(void)
 {
        int ring_buf_size;
+       enum ring_buffer_flags rb_flags;
        int i;
        int ret = -ENOMEM;
 
+
        if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
                goto out;
 
@@ -4566,12 +4581,13 @@ __init static int tracer_alloc_buffers(void)
        else
                ring_buf_size = 1;
 
+       rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
+
        cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
        cpumask_copy(tracing_cpumask, cpu_all_mask);
 
        /* TODO: make the number of buffers hot pluggable with CPUS */
-       global_trace.buffer = ring_buffer_alloc(ring_buf_size,
-                                                  TRACE_BUFFER_FLAGS);
+       global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
        if (!global_trace.buffer) {
                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
                WARN_ON(1);
@@ -4581,7 +4597,7 @@ __init static int tracer_alloc_buffers(void)
 
 
 #ifdef CONFIG_TRACER_MAX_TRACE
-       max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS);
+       max_tr.buffer = ring_buffer_alloc(1, rb_flags);
        if (!max_tr.buffer) {
                printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
                WARN_ON(1);
index 9021f8c0c0c3e379edbd8f39770bd9345794e266..5e9dfc6286dd0eafed900906cccee1c83cf4f28c 100644 (file)
@@ -272,8 +272,8 @@ struct tracer {
        /* If you handled the flag setting, return 0 */
        int                     (*set_flag)(u32 old_flags, u32 bit, int set);
        struct tracer           *next;
-       int                     print_max;
        struct tracer_flags     *flags;
+       int                     print_max;
        int                     use_max_tr;
 };
 
@@ -606,6 +606,7 @@ enum trace_iterator_flags {
        TRACE_ITER_SLEEP_TIME           = 0x40000,
        TRACE_ITER_GRAPH_TIME           = 0x80000,
        TRACE_ITER_RECORD_CMD           = 0x100000,
+       TRACE_ITER_OVERWRITE            = 0x200000,
 };
 
 /*
@@ -661,8 +662,10 @@ struct ftrace_event_field {
 };
 
 struct event_filter {
-       int                     n_preds;
-       struct filter_pred      **preds;
+       int                     n_preds;        /* Number assigned */
+       int                     a_preds;        /* allocated */
+       struct filter_pred      *preds;
+       struct filter_pred      *root;
        char                    *filter_string;
 };
 
@@ -674,11 +677,23 @@ struct event_subsystem {
        int                     nr_events;
 };
 
+#define FILTER_PRED_INVALID    ((unsigned short)-1)
+#define FILTER_PRED_IS_RIGHT   (1 << 15)
+#define FILTER_PRED_FOLD       (1 << 15)
+
+/*
+ * The max preds is the size of unsigned short with
+ * two flags at the MSBs. One bit is used for both the IS_RIGHT
+ * and FOLD flags. The other is reserved.
+ *
+ * 2^14 preds is way more than enough.
+ */
+#define MAX_FILTER_PRED                16384
+
 struct filter_pred;
 struct regex;
 
-typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
-                                int val1, int val2);
+typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
 
 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
 
@@ -700,11 +715,23 @@ struct filter_pred {
        filter_pred_fn_t        fn;
        u64                     val;
        struct regex            regex;
-       char                    *field_name;
+       /*
+        * Leaf nodes use field_name, ops is used by AND and OR
+        * nodes. The field_name is always freed when freeing a pred.
+        * We can overload field_name for ops and have it freed
+        * as well.
+        */
+       union {
+               char            *field_name;
+               unsigned short  *ops;
+       };
        int                     offset;
        int                     not;
        int                     op;
-       int                     pop_n;
+       unsigned short          index;
+       unsigned short          parent;
+       unsigned short          left;
+       unsigned short          right;
 };
 
 extern struct list_head ftrace_common_fields;
index 6cf223764be8aa72489567b90f44fb03d3fb7249..1516cb3ec549a1bd090df147cc156686df3fcd6a 100644 (file)
@@ -109,12 +109,12 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry,
  */
 #define FTRACE_CTX_FIELDS                                      \
        __field(        unsigned int,   prev_pid        )       \
+       __field(        unsigned int,   next_pid        )       \
+       __field(        unsigned int,   next_cpu        )       \
        __field(        unsigned char,  prev_prio       )       \
        __field(        unsigned char,  prev_state      )       \
-       __field(        unsigned int,   next_pid        )       \
        __field(        unsigned char,  next_prio       )       \
-       __field(        unsigned char,  next_state      )       \
-       __field(        unsigned int,   next_cpu        )
+       __field(        unsigned char,  next_state      )
 
 FTRACE_ENTRY(context_switch, ctx_switch_entry,
 
index 5f499e0438a4f9137b66055331cb8c3b048138ed..e88f74fe1d4ce02fedf3e6052feaf2cbdd2017da 100644 (file)
@@ -116,7 +116,6 @@ static int trace_define_common_fields(void)
        __common_field(unsigned char, flags);
        __common_field(unsigned char, preempt_count);
        __common_field(int, pid);
-       __common_field(int, lock_depth);
 
        return ret;
 }
@@ -326,6 +325,7 @@ int trace_set_clr_event(const char *system, const char *event, int set)
 {
        return __ftrace_set_clr_event(NULL, system, event, set);
 }
+EXPORT_SYMBOL_GPL(trace_set_clr_event);
 
 /* 128 should be much more than enough */
 #define EVENT_BUF_SIZE         127
index 36d40104b17f6d53cdc89b45841cff2679551c56..3249b4f77ef083fdec2a5bba0a52c3f17a482b23 100644 (file)
@@ -123,9 +123,13 @@ struct filter_parse_state {
        } operand;
 };
 
+struct pred_stack {
+       struct filter_pred      **preds;
+       int                     index;
+};
+
 #define DEFINE_COMPARISON_PRED(type)                                   \
-static int filter_pred_##type(struct filter_pred *pred, void *event,   \
-                             int val1, int val2)                       \
+static int filter_pred_##type(struct filter_pred *pred, void *event)   \
 {                                                                      \
        type *addr = (type *)(event + pred->offset);                    \
        type val = (type)pred->val;                                     \
@@ -152,8 +156,7 @@ static int filter_pred_##type(struct filter_pred *pred, void *event,        \
 }
 
 #define DEFINE_EQUALITY_PRED(size)                                     \
-static int filter_pred_##size(struct filter_pred *pred, void *event,   \
-                             int val1, int val2)                       \
+static int filter_pred_##size(struct filter_pred *pred, void *event)   \
 {                                                                      \
        u##size *addr = (u##size *)(event + pred->offset);              \
        u##size val = (u##size)pred->val;                               \
@@ -178,23 +181,8 @@ DEFINE_EQUALITY_PRED(32);
 DEFINE_EQUALITY_PRED(16);
 DEFINE_EQUALITY_PRED(8);
 
-static int filter_pred_and(struct filter_pred *pred __attribute((unused)),
-                          void *event __attribute((unused)),
-                          int val1, int val2)
-{
-       return val1 && val2;
-}
-
-static int filter_pred_or(struct filter_pred *pred __attribute((unused)),
-                         void *event __attribute((unused)),
-                         int val1, int val2)
-{
-       return val1 || val2;
-}
-
 /* Filter predicate for fixed sized arrays of characters */
-static int filter_pred_string(struct filter_pred *pred, void *event,
-                             int val1, int val2)
+static int filter_pred_string(struct filter_pred *pred, void *event)
 {
        char *addr = (char *)(event + pred->offset);
        int cmp, match;
@@ -207,8 +195,7 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
 }
 
 /* Filter predicate for char * pointers */
-static int filter_pred_pchar(struct filter_pred *pred, void *event,
-                            int val1, int val2)
+static int filter_pred_pchar(struct filter_pred *pred, void *event)
 {
        char **addr = (char **)(event + pred->offset);
        int cmp, match;
@@ -231,8 +218,7 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event,
  * and add it to the address of the entry, and at last we have
  * the address of the string.
  */
-static int filter_pred_strloc(struct filter_pred *pred, void *event,
-                             int val1, int val2)
+static int filter_pred_strloc(struct filter_pred *pred, void *event)
 {
        u32 str_item = *(u32 *)(event + pred->offset);
        int str_loc = str_item & 0xffff;
@@ -247,8 +233,7 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event,
        return match;
 }
 
-static int filter_pred_none(struct filter_pred *pred, void *event,
-                           int val1, int val2)
+static int filter_pred_none(struct filter_pred *pred, void *event)
 {
        return 0;
 }
@@ -377,32 +362,147 @@ static void filter_build_regex(struct filter_pred *pred)
        pred->not ^= not;
 }
 
+enum move_type {
+       MOVE_DOWN,
+       MOVE_UP_FROM_LEFT,
+       MOVE_UP_FROM_RIGHT
+};
+
+static struct filter_pred *
+get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
+               int index, enum move_type *move)
+{
+       if (pred->parent & FILTER_PRED_IS_RIGHT)
+               *move = MOVE_UP_FROM_RIGHT;
+       else
+               *move = MOVE_UP_FROM_LEFT;
+       pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
+
+       return pred;
+}
+
+/*
+ * A series of AND or ORs where found together. Instead of
+ * climbing up and down the tree branches, an array of the
+ * ops were made in order of checks. We can just move across
+ * the array and short circuit if needed.
+ */
+static int process_ops(struct filter_pred *preds,
+                      struct filter_pred *op, void *rec)
+{
+       struct filter_pred *pred;
+       int type;
+       int match;
+       int i;
+
+       /*
+        * Micro-optimization: We set type to true if op
+        * is an OR and false otherwise (AND). Then we
+        * just need to test if the match is equal to
+        * the type, and if it is, we can short circuit the
+        * rest of the checks:
+        *
+        * if ((match && op->op == OP_OR) ||
+        *     (!match && op->op == OP_AND))
+        *        return match;
+        */
+       type = op->op == OP_OR;
+
+       for (i = 0; i < op->val; i++) {
+               pred = &preds[op->ops[i]];
+               match = pred->fn(pred, rec);
+               if (!!match == type)
+                       return match;
+       }
+       return match;
+}
+
 /* return 1 if event matches, 0 otherwise (discard) */
 int filter_match_preds(struct event_filter *filter, void *rec)
 {
-       int match, top = 0, val1 = 0, val2 = 0;
-       int stack[MAX_FILTER_PRED];
+       int match = -1;
+       enum move_type move = MOVE_DOWN;
+       struct filter_pred *preds;
        struct filter_pred *pred;
-       int i;
+       struct filter_pred *root;
+       int n_preds;
+       int done = 0;
+
+       /* no filter is considered a match */
+       if (!filter)
+               return 1;
+
+       n_preds = filter->n_preds;
+
+       if (!n_preds)
+               return 1;
+
+       /*
+        * n_preds, root and filter->preds are protect with preemption disabled.
+        */
+       preds = rcu_dereference_sched(filter->preds);
+       root = rcu_dereference_sched(filter->root);
+       if (!root)
+               return 1;
+
+       pred = root;
 
-       for (i = 0; i < filter->n_preds; i++) {
-               pred = filter->preds[i];
-               if (!pred->pop_n) {
-                       match = pred->fn(pred, rec, val1, val2);
-                       stack[top++] = match;
+       /* match is currently meaningless */
+       match = -1;
+
+       do {
+               switch (move) {
+               case MOVE_DOWN:
+                       /* only AND and OR have children */
+                       if (pred->left != FILTER_PRED_INVALID) {
+                               /* If ops is set, then it was folded. */
+                               if (!pred->ops) {
+                                       /* keep going to down the left side */
+                                       pred = &preds[pred->left];
+                                       continue;
+                               }
+                               /* We can treat folded ops as a leaf node */
+                               match = process_ops(preds, pred, rec);
+                       } else
+                               match = pred->fn(pred, rec);
+                       /* If this pred is the only pred */
+                       if (pred == root)
+                               break;
+                       pred = get_pred_parent(pred, preds,
+                                              pred->parent, &move);
+                       continue;
+               case MOVE_UP_FROM_LEFT:
+                       /*
+                        * Check for short circuits.
+                        *
+                        * Optimization: !!match == (pred->op == OP_OR)
+                        *   is the same as:
+                        * if ((match && pred->op == OP_OR) ||
+                        *     (!match && pred->op == OP_AND))
+                        */
+                       if (!!match == (pred->op == OP_OR)) {
+                               if (pred == root)
+                                       break;
+                               pred = get_pred_parent(pred, preds,
+                                                      pred->parent, &move);
+                               continue;
+                       }
+                       /* now go down the right side of the tree. */
+                       pred = &preds[pred->right];
+                       move = MOVE_DOWN;
+                       continue;
+               case MOVE_UP_FROM_RIGHT:
+                       /* We finished this equation. */
+                       if (pred == root)
+                               break;
+                       pred = get_pred_parent(pred, preds,
+                                              pred->parent, &move);
                        continue;
                }
-               if (pred->pop_n > top) {
-                       WARN_ON_ONCE(1);
-                       return 0;
-               }
-               val1 = stack[--top];
-               val2 = stack[--top];
-               match = pred->fn(pred, rec, val1, val2);
-               stack[top++] = match;
-       }
+               done = 1;
+       } while (!done);
 
-       return stack[--top];
+       return match;
 }
 EXPORT_SYMBOL_GPL(filter_match_preds);
 
@@ -414,6 +514,9 @@ static void parse_error(struct filter_parse_state *ps, int err, int pos)
 
 static void remove_filter_string(struct event_filter *filter)
 {
+       if (!filter)
+               return;
+
        kfree(filter->filter_string);
        filter->filter_string = NULL;
 }
@@ -473,9 +576,10 @@ static void append_filter_err(struct filter_parse_state *ps,
 
 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
 {
-       struct event_filter *filter = call->filter;
+       struct event_filter *filter;
 
        mutex_lock(&event_mutex);
+       filter = call->filter;
        if (filter && filter->filter_string)
                trace_seq_printf(s, "%s\n", filter->filter_string);
        else
@@ -486,9 +590,10 @@ void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
 void print_subsystem_event_filter(struct event_subsystem *system,
                                  struct trace_seq *s)
 {
-       struct event_filter *filter = system->filter;
+       struct event_filter *filter;
 
        mutex_lock(&event_mutex);
+       filter = system->filter;
        if (filter && filter->filter_string)
                trace_seq_printf(s, "%s\n", filter->filter_string);
        else
@@ -539,10 +644,58 @@ static void filter_clear_pred(struct filter_pred *pred)
        pred->regex.len = 0;
 }
 
-static int filter_set_pred(struct filter_pred *dest,
+static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
+{
+       stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
+       if (!stack->preds)
+               return -ENOMEM;
+       stack->index = n_preds;
+       return 0;
+}
+
+static void __free_pred_stack(struct pred_stack *stack)
+{
+       kfree(stack->preds);
+       stack->index = 0;
+}
+
+static int __push_pred_stack(struct pred_stack *stack,
+                            struct filter_pred *pred)
+{
+       int index = stack->index;
+
+       if (WARN_ON(index == 0))
+               return -ENOSPC;
+
+       stack->preds[--index] = pred;
+       stack->index = index;
+       return 0;
+}
+
+static struct filter_pred *
+__pop_pred_stack(struct pred_stack *stack)
+{
+       struct filter_pred *pred;
+       int index = stack->index;
+
+       pred = stack->preds[index++];
+       if (!pred)
+               return NULL;
+
+       stack->index = index;
+       return pred;
+}
+
+static int filter_set_pred(struct event_filter *filter,
+                          int idx,
+                          struct pred_stack *stack,
                           struct filter_pred *src,
                           filter_pred_fn_t fn)
 {
+       struct filter_pred *dest = &filter->preds[idx];
+       struct filter_pred *left;
+       struct filter_pred *right;
+
        *dest = *src;
        if (src->field_name) {
                dest->field_name = kstrdup(src->field_name, GFP_KERNEL);
@@ -550,116 +703,140 @@ static int filter_set_pred(struct filter_pred *dest,
                        return -ENOMEM;
        }
        dest->fn = fn;
+       dest->index = idx;
 
-       return 0;
+       if (dest->op == OP_OR || dest->op == OP_AND) {
+               right = __pop_pred_stack(stack);
+               left = __pop_pred_stack(stack);
+               if (!left || !right)
+                       return -EINVAL;
+               /*
+                * If both children can be folded
+                * and they are the same op as this op or a leaf,
+                * then this op can be folded.
+                */
+               if (left->index & FILTER_PRED_FOLD &&
+                   (left->op == dest->op ||
+                    left->left == FILTER_PRED_INVALID) &&
+                   right->index & FILTER_PRED_FOLD &&
+                   (right->op == dest->op ||
+                    right->left == FILTER_PRED_INVALID))
+                       dest->index |= FILTER_PRED_FOLD;
+
+               dest->left = left->index & ~FILTER_PRED_FOLD;
+               dest->right = right->index & ~FILTER_PRED_FOLD;
+               left->parent = dest->index & ~FILTER_PRED_FOLD;
+               right->parent = dest->index | FILTER_PRED_IS_RIGHT;
+       } else {
+               /*
+                * Make dest->left invalid to be used as a quick
+                * way to know this is a leaf node.
+                */
+               dest->left = FILTER_PRED_INVALID;
+
+               /* All leafs allow folding the parent ops. */
+               dest->index |= FILTER_PRED_FOLD;
+       }
+
+       return __push_pred_stack(stack, dest);
 }
 
-static void filter_disable_preds(struct ftrace_event_call *call)
+static void __free_preds(struct event_filter *filter)
 {
-       struct event_filter *filter = call->filter;
        int i;
 
-       call->flags &= ~TRACE_EVENT_FL_FILTERED;
+       if (filter->preds) {
+               for (i = 0; i < filter->a_preds; i++)
+                       kfree(filter->preds[i].field_name);
+               kfree(filter->preds);
+               filter->preds = NULL;
+       }
+       filter->a_preds = 0;
        filter->n_preds = 0;
-
-       for (i = 0; i < MAX_FILTER_PRED; i++)
-               filter->preds[i]->fn = filter_pred_none;
 }
 
-static void __free_preds(struct event_filter *filter)
+static void filter_disable(struct ftrace_event_call *call)
 {
-       int i;
+       call->flags &= ~TRACE_EVENT_FL_FILTERED;
+}
 
+static void __free_filter(struct event_filter *filter)
+{
        if (!filter)
                return;
 
-       for (i = 0; i < MAX_FILTER_PRED; i++) {
-               if (filter->preds[i])
-                       filter_free_pred(filter->preds[i]);
-       }
-       kfree(filter->preds);
+       __free_preds(filter);
        kfree(filter->filter_string);
        kfree(filter);
 }
 
+/*
+ * Called when destroying the ftrace_event_call.
+ * The call is being freed, so we do not need to worry about
+ * the call being currently used. This is for module code removing
+ * the tracepoints from within it.
+ */
 void destroy_preds(struct ftrace_event_call *call)
 {
-       __free_preds(call->filter);
+       __free_filter(call->filter);
        call->filter = NULL;
-       call->flags &= ~TRACE_EVENT_FL_FILTERED;
 }
 
-static struct event_filter *__alloc_preds(void)
+static struct event_filter *__alloc_filter(void)
 {
        struct event_filter *filter;
+
+       filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+       return filter;
+}
+
+static int __alloc_preds(struct event_filter *filter, int n_preds)
+{
        struct filter_pred *pred;
        int i;
 
-       filter = kzalloc(sizeof(*filter), GFP_KERNEL);
-       if (!filter)
-               return ERR_PTR(-ENOMEM);
+       if (filter->preds)
+               __free_preds(filter);
 
-       filter->n_preds = 0;
+       filter->preds =
+               kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
 
-       filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred), GFP_KERNEL);
        if (!filter->preds)
-               goto oom;
+               return -ENOMEM;
 
-       for (i = 0; i < MAX_FILTER_PRED; i++) {
-               pred = kzalloc(sizeof(*pred), GFP_KERNEL);
-               if (!pred)
-                       goto oom;
+       filter->a_preds = n_preds;
+       filter->n_preds = 0;
+
+       for (i = 0; i < n_preds; i++) {
+               pred = &filter->preds[i];
                pred->fn = filter_pred_none;
-               filter->preds[i] = pred;
        }
 
-       return filter;
-
-oom:
-       __free_preds(filter);
-       return ERR_PTR(-ENOMEM);
-}
-
-static int init_preds(struct ftrace_event_call *call)
-{
-       if (call->filter)
-               return 0;
-
-       call->flags &= ~TRACE_EVENT_FL_FILTERED;
-       call->filter = __alloc_preds();
-       if (IS_ERR(call->filter))
-               return PTR_ERR(call->filter);
-
        return 0;
 }
 
-static int init_subsystem_preds(struct event_subsystem *system)
+static void filter_free_subsystem_preds(struct event_subsystem *system)
 {
        struct ftrace_event_call *call;
-       int err;
 
        list_for_each_entry(call, &ftrace_events, list) {
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
 
-               err = init_preds(call);
-               if (err)
-                       return err;
+               filter_disable(call);
+               remove_filter_string(call->filter);
        }
-
-       return 0;
 }
 
-static void filter_free_subsystem_preds(struct event_subsystem *system)
+static void filter_free_subsystem_filters(struct event_subsystem *system)
 {
        struct ftrace_event_call *call;
 
        list_for_each_entry(call, &ftrace_events, list) {
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
-
-               filter_disable_preds(call);
-               remove_filter_string(call->filter);
+               __free_filter(call->filter);
+               call->filter = NULL;
        }
 }
 
@@ -667,18 +844,19 @@ static int filter_add_pred_fn(struct filter_parse_state *ps,
                              struct ftrace_event_call *call,
                              struct event_filter *filter,
                              struct filter_pred *pred,
+                             struct pred_stack *stack,
                              filter_pred_fn_t fn)
 {
        int idx, err;
 
-       if (filter->n_preds == MAX_FILTER_PRED) {
+       if (WARN_ON(filter->n_preds == filter->a_preds)) {
                parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
                return -ENOSPC;
        }
 
        idx = filter->n_preds;
-       filter_clear_pred(filter->preds[idx]);
-       err = filter_set_pred(filter->preds[idx], pred, fn);
+       filter_clear_pred(&filter->preds[idx]);
+       err = filter_set_pred(filter, idx, stack, pred, fn);
        if (err)
                return err;
 
@@ -763,6 +941,7 @@ static int filter_add_pred(struct filter_parse_state *ps,
                           struct ftrace_event_call *call,
                           struct event_filter *filter,
                           struct filter_pred *pred,
+                          struct pred_stack *stack,
                           bool dry_run)
 {
        struct ftrace_event_field *field;
@@ -770,17 +949,12 @@ static int filter_add_pred(struct filter_parse_state *ps,
        unsigned long long val;
        int ret;
 
-       pred->fn = filter_pred_none;
+       fn = pred->fn = filter_pred_none;
 
-       if (pred->op == OP_AND) {
-               pred->pop_n = 2;
-               fn = filter_pred_and;
+       if (pred->op == OP_AND)
                goto add_pred_fn;
-       } else if (pred->op == OP_OR) {
-               pred->pop_n = 2;
-               fn = filter_pred_or;
+       else if (pred->op == OP_OR)
                goto add_pred_fn;
-       }
 
        field = find_event_field(call, pred->field_name);
        if (!field) {
@@ -829,7 +1003,7 @@ static int filter_add_pred(struct filter_parse_state *ps,
 
 add_pred_fn:
        if (!dry_run)
-               return filter_add_pred_fn(ps, call, filter, pred, fn);
+               return filter_add_pred_fn(ps, call, filter, pred, stack, fn);
        return 0;
 }
 
@@ -1187,6 +1361,234 @@ static int check_preds(struct filter_parse_state *ps)
        return 0;
 }
 
+static int count_preds(struct filter_parse_state *ps)
+{
+       struct postfix_elt *elt;
+       int n_preds = 0;
+
+       list_for_each_entry(elt, &ps->postfix, list) {
+               if (elt->op == OP_NONE)
+                       continue;
+               n_preds++;
+       }
+
+       return n_preds;
+}
+
+/*
+ * The tree is walked at filtering of an event. If the tree is not correctly
+ * built, it may cause an infinite loop. Check here that the tree does
+ * indeed terminate.
+ */
+static int check_pred_tree(struct event_filter *filter,
+                          struct filter_pred *root)
+{
+       struct filter_pred *preds;
+       struct filter_pred *pred;
+       enum move_type move = MOVE_DOWN;
+       int count = 0;
+       int done = 0;
+       int max;
+
+       /*
+        * The max that we can hit a node is three times.
+        * Once going down, once coming up from left, and
+        * once coming up from right. This is more than enough
+        * since leafs are only hit a single time.
+        */
+       max = 3 * filter->n_preds;
+
+       preds = filter->preds;
+       if  (!preds)
+               return -EINVAL;
+       pred = root;
+
+       do {
+               if (WARN_ON(count++ > max))
+                       return -EINVAL;
+
+               switch (move) {
+               case MOVE_DOWN:
+                       if (pred->left != FILTER_PRED_INVALID) {
+                               pred = &preds[pred->left];
+                               continue;
+                       }
+                       /* A leaf at the root is just a leaf in the tree */
+                       if (pred == root)
+                               break;
+                       pred = get_pred_parent(pred, preds,
+                                              pred->parent, &move);
+                       continue;
+               case MOVE_UP_FROM_LEFT:
+                       pred = &preds[pred->right];
+                       move = MOVE_DOWN;
+                       continue;
+               case MOVE_UP_FROM_RIGHT:
+                       if (pred == root)
+                               break;
+                       pred = get_pred_parent(pred, preds,
+                                              pred->parent, &move);
+                       continue;
+               }
+               done = 1;
+       } while (!done);
+
+       /* We are fine. */
+       return 0;
+}
+
+static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
+{
+       struct filter_pred *pred;
+       enum move_type move = MOVE_DOWN;
+       int count = 0;
+       int done = 0;
+
+       pred = root;
+
+       do {
+               switch (move) {
+               case MOVE_DOWN:
+                       if (pred->left != FILTER_PRED_INVALID) {
+                               pred = &preds[pred->left];
+                               continue;
+                       }
+                       /* A leaf at the root is just a leaf in the tree */
+                       if (pred == root)
+                               return 1;
+                       count++;
+                       pred = get_pred_parent(pred, preds,
+                                              pred->parent, &move);
+                       continue;
+               case MOVE_UP_FROM_LEFT:
+                       pred = &preds[pred->right];
+                       move = MOVE_DOWN;
+                       continue;
+               case MOVE_UP_FROM_RIGHT:
+                       if (pred == root)
+                               break;
+                       pred = get_pred_parent(pred, preds,
+                                              pred->parent, &move);
+                       continue;
+               }
+               done = 1;
+       } while (!done);
+
+       return count;
+}
+
+static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
+{
+       struct filter_pred *pred;
+       enum move_type move = MOVE_DOWN;
+       int count = 0;
+       int children;
+       int done = 0;
+
+       /* No need to keep the fold flag */
+       root->index &= ~FILTER_PRED_FOLD;
+
+       /* If the root is a leaf then do nothing */
+       if (root->left == FILTER_PRED_INVALID)
+               return 0;
+
+       /* count the children */
+       children = count_leafs(preds, &preds[root->left]);
+       children += count_leafs(preds, &preds[root->right]);
+
+       root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
+       if (!root->ops)
+               return -ENOMEM;
+
+       root->val = children;
+
+       pred = root;
+       do {
+               switch (move) {
+               case MOVE_DOWN:
+                       if (pred->left != FILTER_PRED_INVALID) {
+                               pred = &preds[pred->left];
+                               continue;
+                       }
+                       if (WARN_ON(count == children))
+                               return -EINVAL;
+                       pred->index &= ~FILTER_PRED_FOLD;
+                       root->ops[count++] = pred->index;
+                       pred = get_pred_parent(pred, preds,
+                                              pred->parent, &move);
+                       continue;
+               case MOVE_UP_FROM_LEFT:
+                       pred = &preds[pred->right];
+                       move = MOVE_DOWN;
+                       continue;
+               case MOVE_UP_FROM_RIGHT:
+                       if (pred == root)
+                               break;
+                       pred = get_pred_parent(pred, preds,
+                                              pred->parent, &move);
+                       continue;
+               }
+               done = 1;
+       } while (!done);
+
+       return 0;
+}
+
+/*
+ * To optimize the processing of the ops, if we have several "ors" or
+ * "ands" together, we can put them in an array and process them all
+ * together speeding up the filter logic.
+ */
+static int fold_pred_tree(struct event_filter *filter,
+                          struct filter_pred *root)
+{
+       struct filter_pred *preds;
+       struct filter_pred *pred;
+       enum move_type move = MOVE_DOWN;
+       int done = 0;
+       int err;
+
+       preds = filter->preds;
+       if  (!preds)
+               return -EINVAL;
+       pred = root;
+
+       do {
+               switch (move) {
+               case MOVE_DOWN:
+                       if (pred->index & FILTER_PRED_FOLD) {
+                               err = fold_pred(preds, pred);
+                               if (err)
+                                       return err;
+                               /* Folded nodes are like leafs */
+                       } else if (pred->left != FILTER_PRED_INVALID) {
+                               pred = &preds[pred->left];
+                               continue;
+                       }
+
+                       /* A leaf at the root is just a leaf in the tree */
+                       if (pred == root)
+                               break;
+                       pred = get_pred_parent(pred, preds,
+                                              pred->parent, &move);
+                       continue;
+               case MOVE_UP_FROM_LEFT:
+                       pred = &preds[pred->right];
+                       move = MOVE_DOWN;
+                       continue;
+               case MOVE_UP_FROM_RIGHT:
+                       if (pred == root)
+                               break;
+                       pred = get_pred_parent(pred, preds,
+                                              pred->parent, &move);
+                       continue;
+               }
+               done = 1;
+       } while (!done);
+
+       return 0;
+}
+
 static int replace_preds(struct ftrace_event_call *call,
                         struct event_filter *filter,
                         struct filter_parse_state *ps,
@@ -1195,14 +1597,32 @@ static int replace_preds(struct ftrace_event_call *call,
 {
        char *operand1 = NULL, *operand2 = NULL;
        struct filter_pred *pred;
+       struct filter_pred *root;
        struct postfix_elt *elt;
+       struct pred_stack stack = { }; /* init to NULL */
        int err;
        int n_preds = 0;
 
+       n_preds = count_preds(ps);
+       if (n_preds >= MAX_FILTER_PRED) {
+               parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
+               return -ENOSPC;
+       }
+
        err = check_preds(ps);
        if (err)
                return err;
 
+       if (!dry_run) {
+               err = __alloc_pred_stack(&stack, n_preds);
+               if (err)
+                       return err;
+               err = __alloc_preds(filter, n_preds);
+               if (err)
+                       goto fail;
+       }
+
+       n_preds = 0;
        list_for_each_entry(elt, &ps->postfix, list) {
                if (elt->op == OP_NONE) {
                        if (!operand1)
@@ -1211,14 +1631,16 @@ static int replace_preds(struct ftrace_event_call *call,
                                operand2 = elt->operand;
                        else {
                                parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
-                               return -EINVAL;
+                               err = -EINVAL;
+                               goto fail;
                        }
                        continue;
                }
 
-               if (n_preds++ == MAX_FILTER_PRED) {
+               if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
                        parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
-                       return -ENOSPC;
+                       err = -ENOSPC;
+                       goto fail;
                }
 
                if (elt->op == OP_AND || elt->op == OP_OR) {
@@ -1228,76 +1650,181 @@ static int replace_preds(struct ftrace_event_call *call,
 
                if (!operand1 || !operand2) {
                        parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto fail;
                }
 
                pred = create_pred(elt->op, operand1, operand2);
 add_pred:
-               if (!pred)
-                       return -ENOMEM;
-               err = filter_add_pred(ps, call, filter, pred, dry_run);
+               if (!pred) {
+                       err = -ENOMEM;
+                       goto fail;
+               }
+               err = filter_add_pred(ps, call, filter, pred, &stack, dry_run);
                filter_free_pred(pred);
                if (err)
-                       return err;
+                       goto fail;
 
                operand1 = operand2 = NULL;
        }
 
-       return 0;
+       if (!dry_run) {
+               /* We should have one item left on the stack */
+               pred = __pop_pred_stack(&stack);
+               if (!pred)
+                       return -EINVAL;
+               /* This item is where we start from in matching */
+               root = pred;
+               /* Make sure the stack is empty */
+               pred = __pop_pred_stack(&stack);
+               if (WARN_ON(pred)) {
+                       err = -EINVAL;
+                       filter->root = NULL;
+                       goto fail;
+               }
+               err = check_pred_tree(filter, root);
+               if (err)
+                       goto fail;
+
+               /* Optimize the tree */
+               err = fold_pred_tree(filter, root);
+               if (err)
+                       goto fail;
+
+               /* We don't set root until we know it works */
+               barrier();
+               filter->root = root;
+       }
+
+       err = 0;
+fail:
+       __free_pred_stack(&stack);
+       return err;
 }
 
+struct filter_list {
+       struct list_head        list;
+       struct event_filter     *filter;
+};
+
 static int replace_system_preds(struct event_subsystem *system,
                                struct filter_parse_state *ps,
                                char *filter_string)
 {
        struct ftrace_event_call *call;
+       struct filter_list *filter_item;
+       struct filter_list *tmp;
+       LIST_HEAD(filter_list);
        bool fail = true;
        int err;
 
        list_for_each_entry(call, &ftrace_events, list) {
-               struct event_filter *filter = call->filter;
 
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
 
-               /* try to see if the filter can be applied */
-               err = replace_preds(call, filter, ps, filter_string, true);
+               /*
+                * Try to see if the filter can be applied
+                *  (filter arg is ignored on dry_run)
+                */
+               err = replace_preds(call, NULL, ps, filter_string, true);
                if (err)
+                       goto fail;
+       }
+
+       list_for_each_entry(call, &ftrace_events, list) {
+               struct event_filter *filter;
+
+               if (strcmp(call->class->system, system->name) != 0)
                        continue;
 
-               /* really apply the filter */
-               filter_disable_preds(call);
-               err = replace_preds(call, filter, ps, filter_string, false);
+               filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
+               if (!filter_item)
+                       goto fail_mem;
+
+               list_add_tail(&filter_item->list, &filter_list);
+
+               filter_item->filter = __alloc_filter();
+               if (!filter_item->filter)
+                       goto fail_mem;
+               filter = filter_item->filter;
+
+               /* Can only fail on no memory */
+               err = replace_filter_string(filter, filter_string);
                if (err)
-                       filter_disable_preds(call);
-               else {
+                       goto fail_mem;
+
+               err = replace_preds(call, filter, ps, filter_string, false);
+               if (err) {
+                       filter_disable(call);
+                       parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
+                       append_filter_err(ps, filter);
+               } else
                        call->flags |= TRACE_EVENT_FL_FILTERED;
-                       replace_filter_string(filter, filter_string);
-               }
+               /*
+                * Regardless of if this returned an error, we still
+                * replace the filter for the call.
+                */
+               filter = call->filter;
+               call->filter = filter_item->filter;
+               filter_item->filter = filter;
+
                fail = false;
        }
 
-       if (fail) {
-               parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
-               return -EINVAL;
+       if (fail)
+               goto fail;
+
+       /*
+        * The calls can still be using the old filters.
+        * Do a synchronize_sched() to ensure all calls are
+        * done with them before we free them.
+        */
+       synchronize_sched();
+       list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
+               __free_filter(filter_item->filter);
+               list_del(&filter_item->list);
+               kfree(filter_item);
        }
        return 0;
+ fail:
+       /* No call succeeded */
+       list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
+               list_del(&filter_item->list);
+               kfree(filter_item);
+       }
+       parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
+       return -EINVAL;
+ fail_mem:
+       /* If any call succeeded, we still need to sync */
+       if (!fail)
+               synchronize_sched();
+       list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
+               __free_filter(filter_item->filter);
+               list_del(&filter_item->list);
+               kfree(filter_item);
+       }
+       return -ENOMEM;
 }
 
 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
 {
-       int err;
        struct filter_parse_state *ps;
+       struct event_filter *filter;
+       struct event_filter *tmp;
+       int err = 0;
 
        mutex_lock(&event_mutex);
 
-       err = init_preds(call);
-       if (err)
-               goto out_unlock;
-
        if (!strcmp(strstrip(filter_string), "0")) {
-               filter_disable_preds(call);
-               remove_filter_string(call->filter);
+               filter_disable(call);
+               filter = call->filter;
+               if (!filter)
+                       goto out_unlock;
+               call->filter = NULL;
+               /* Make sure the filter is not being used */
+               synchronize_sched();
+               __free_filter(filter);
                goto out_unlock;
        }
 
@@ -1306,22 +1833,41 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
        if (!ps)
                goto out_unlock;
 
-       filter_disable_preds(call);
-       replace_filter_string(call->filter, filter_string);
+       filter = __alloc_filter();
+       if (!filter) {
+               kfree(ps);
+               goto out_unlock;
+       }
+
+       replace_filter_string(filter, filter_string);
 
        parse_init(ps, filter_ops, filter_string);
        err = filter_parse(ps);
        if (err) {
-               append_filter_err(ps, call->filter);
+               append_filter_err(ps, filter);
                goto out;
        }
 
-       err = replace_preds(call, call->filter, ps, filter_string, false);
-       if (err)
-               append_filter_err(ps, call->filter);
-       else
+       err = replace_preds(call, filter, ps, filter_string, false);
+       if (err) {
+               filter_disable(call);
+               append_filter_err(ps, filter);
+       } else
                call->flags |= TRACE_EVENT_FL_FILTERED;
 out:
+       /*
+        * Always swap the call filter with the new filter
+        * even if there was an error. If there was an error
+        * in the filter, we disable the filter and show the error
+        * string
+        */
+       tmp = call->filter;
+       call->filter = filter;
+       if (tmp) {
+               /* Make sure the call is done with the filter */
+               synchronize_sched();
+               __free_filter(tmp);
+       }
        filter_opstack_clear(ps);
        postfix_clear(ps);
        kfree(ps);
@@ -1334,18 +1880,21 @@ out_unlock:
 int apply_subsystem_event_filter(struct event_subsystem *system,
                                 char *filter_string)
 {
-       int err;
        struct filter_parse_state *ps;
+       struct event_filter *filter;
+       int err = 0;
 
        mutex_lock(&event_mutex);
 
-       err = init_subsystem_preds(system);
-       if (err)
-               goto out_unlock;
-
        if (!strcmp(strstrip(filter_string), "0")) {
                filter_free_subsystem_preds(system);
                remove_filter_string(system->filter);
+               filter = system->filter;
+               system->filter = NULL;
+               /* Ensure all filters are no longer used */
+               synchronize_sched();
+               filter_free_subsystem_filters(system);
+               __free_filter(filter);
                goto out_unlock;
        }
 
@@ -1354,7 +1903,17 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
        if (!ps)
                goto out_unlock;
 
-       replace_filter_string(system->filter, filter_string);
+       filter = __alloc_filter();
+       if (!filter)
+               goto out;
+
+       replace_filter_string(filter, filter_string);
+       /*
+        * No event actually uses the system filter
+        * we can free it without synchronize_sched().
+        */
+       __free_filter(system->filter);
+       system->filter = filter;
 
        parse_init(ps, filter_ops, filter_string);
        err = filter_parse(ps);
@@ -1384,7 +1943,7 @@ void ftrace_profile_free_filter(struct perf_event *event)
        struct event_filter *filter = event->filter;
 
        event->filter = NULL;
-       __free_preds(filter);
+       __free_filter(filter);
 }
 
 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
@@ -1410,8 +1969,8 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
        if (event->filter)
                goto out_unlock;
 
-       filter = __alloc_preds();
-       if (IS_ERR(filter)) {
+       filter = __alloc_filter();
+       if (!filter) {
                err = PTR_ERR(filter);
                goto out_unlock;
        }
@@ -1419,7 +1978,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
        err = -ENOMEM;
        ps = kzalloc(sizeof(*ps), GFP_KERNEL);
        if (!ps)
-               goto free_preds;
+               goto free_filter;
 
        parse_init(ps, filter_ops, filter_str);
        err = filter_parse(ps);
@@ -1435,9 +1994,9 @@ free_ps:
        postfix_clear(ps);
        kfree(ps);
 
-free_preds:
+free_filter:
        if (err)
-               __free_preds(filter);
+               __free_filter(filter);
 
 out_unlock:
        mutex_unlock(&event_mutex);
index 2dec9bcde8b495bd43d189204e2bc9099d5ec32f..8435b43b1782d5b455f80415452ac1e5dd1f6e0e 100644 (file)
@@ -353,6 +353,43 @@ static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
        kfree(data);
 }
 
+/* Bitfield fetch function */
+struct bitfield_fetch_param {
+       struct fetch_param orig;
+       unsigned char hi_shift;
+       unsigned char low_shift;
+};
+
+#define DEFINE_FETCH_bitfield(type)                                    \
+static __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs,\
+                                           void *data, void *dest)     \
+{                                                                      \
+       struct bitfield_fetch_param *bprm = data;                       \
+       type buf = 0;                                                   \
+       call_fetch(&bprm->orig, regs, &buf);                            \
+       if (buf) {                                                      \
+               buf <<= bprm->hi_shift;                                 \
+               buf >>= bprm->low_shift;                                \
+       }                                                               \
+       *(type *)dest = buf;                                            \
+}
+DEFINE_BASIC_FETCH_FUNCS(bitfield)
+#define fetch_bitfield_string NULL
+#define fetch_bitfield_string_size NULL
+
+static __kprobes void
+free_bitfield_fetch_param(struct bitfield_fetch_param *data)
+{
+       /*
+        * Don't check the bitfield itself, because this must be the
+        * last fetch function.
+        */
+       if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
+               free_deref_fetch_param(data->orig.data);
+       else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
+               free_symbol_cache(data->orig.data);
+       kfree(data);
+}
 /* Default (unsigned long) fetch type */
 #define __DEFAULT_FETCH_TYPE(t) u##t
 #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
@@ -367,6 +404,7 @@ enum {
        FETCH_MTD_memory,
        FETCH_MTD_symbol,
        FETCH_MTD_deref,
+       FETCH_MTD_bitfield,
        FETCH_MTD_END,
 };
 
@@ -387,6 +425,7 @@ ASSIGN_FETCH_FUNC(retval, ftype),                   \
 ASSIGN_FETCH_FUNC(memory, ftype),                      \
 ASSIGN_FETCH_FUNC(symbol, ftype),                      \
 ASSIGN_FETCH_FUNC(deref, ftype),                       \
+ASSIGN_FETCH_FUNC(bitfield, ftype),                    \
          }                                             \
        }
 
@@ -430,9 +469,33 @@ static const struct fetch_type *find_fetch_type(const char *type)
        if (!type)
                type = DEFAULT_FETCH_TYPE_STR;
 
+       /* Special case: bitfield */
+       if (*type == 'b') {
+               unsigned long bs;
+               type = strchr(type, '/');
+               if (!type)
+                       goto fail;
+               type++;
+               if (strict_strtoul(type, 0, &bs))
+                       goto fail;
+               switch (bs) {
+               case 8:
+                       return find_fetch_type("u8");
+               case 16:
+                       return find_fetch_type("u16");
+               case 32:
+                       return find_fetch_type("u32");
+               case 64:
+                       return find_fetch_type("u64");
+               default:
+                       goto fail;
+               }
+       }
+
        for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++)
                if (strcmp(type, fetch_type_table[i].name) == 0)
                        return &fetch_type_table[i];
+fail:
        return NULL;
 }
 
@@ -586,7 +649,9 @@ error:
 
 static void free_probe_arg(struct probe_arg *arg)
 {
-       if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
+       if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
+               free_bitfield_fetch_param(arg->fetch.data);
+       else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
                free_deref_fetch_param(arg->fetch.data);
        else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
                free_symbol_cache(arg->fetch.data);
@@ -767,16 +832,15 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t,
                }
                break;
        case '+':       /* deref memory */
+               arg++;  /* Skip '+', because strict_strtol() rejects it. */
        case '-':
                tmp = strchr(arg, '(');
                if (!tmp)
                        break;
                *tmp = '\0';
-               ret = strict_strtol(arg + 1, 0, &offset);
+               ret = strict_strtol(arg, 0, &offset);
                if (ret)
                        break;
-               if (arg[0] == '-')
-                       offset = -offset;
                arg = tmp + 1;
                tmp = strrchr(arg, ')');
                if (tmp) {
@@ -807,6 +871,41 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t,
        return ret;
 }
 
+#define BYTES_TO_BITS(nb)      ((BITS_PER_LONG * (nb)) / sizeof(long))
+
+/* Bitfield type needs to be parsed into a fetch function */
+static int __parse_bitfield_probe_arg(const char *bf,
+                                     const struct fetch_type *t,
+                                     struct fetch_param *f)
+{
+       struct bitfield_fetch_param *bprm;
+       unsigned long bw, bo;
+       char *tail;
+
+       if (*bf != 'b')
+               return 0;
+
+       bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+       if (!bprm)
+               return -ENOMEM;
+       bprm->orig = *f;
+       f->fn = t->fetch[FETCH_MTD_bitfield];
+       f->data = (void *)bprm;
+
+       bw = simple_strtoul(bf + 1, &tail, 0);  /* Use simple one */
+       if (bw == 0 || *tail != '@')
+               return -EINVAL;
+
+       bf = tail + 1;
+       bo = simple_strtoul(bf, &tail, 0);
+       if (tail == bf || *tail != '/')
+               return -EINVAL;
+
+       bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo);
+       bprm->low_shift = bprm->hi_shift + bo;
+       return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0;
+}
+
 /* String length checking wrapper */
 static int parse_probe_arg(char *arg, struct trace_probe *tp,
                           struct probe_arg *parg, int is_return)
@@ -836,6 +935,8 @@ static int parse_probe_arg(char *arg, struct trace_probe *tp,
        parg->offset = tp->size;
        tp->size += parg->type->size;
        ret = __parse_probe_arg(arg, parg->type, &parg->fetch, is_return);
+       if (ret >= 0 && t != NULL)
+               ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch);
        if (ret >= 0) {
                parg->fetch_size.fn = get_fetch_size_function(parg->type,
                                                              parg->fetch.fn);
@@ -1130,7 +1231,7 @@ static int command_trace_probe(const char *buf)
        return ret;
 }
 
-#define WRITE_BUFSIZE 128
+#define WRITE_BUFSIZE 4096
 
 static ssize_t probes_write(struct file *file, const char __user *buffer,
                            size_t count, loff_t *ppos)
index 02272baa22065c14c98e1c7394d2486a4d6a473e..456be9063c2d85664b6bb480c4b5d2c4c4a88f14 100644 (file)
@@ -529,24 +529,34 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
  * @entry: The trace entry field from the ring buffer
  *
  * Prints the generic fields of irqs off, in hard or softirq, preempt
- * count and lock depth.
+ * count.
  */
 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 {
-       int hardirq, softirq;
+       char hardsoft_irq;
+       char need_resched;
+       char irqs_off;
+       int hardirq;
+       int softirq;
        int ret;
 
        hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
        softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
 
+       irqs_off =
+               (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
+               (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
+               '.';
+       need_resched =
+               (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.';
+       hardsoft_irq =
+               (hardirq && softirq) ? 'H' :
+               hardirq ? 'h' :
+               softirq ? 's' :
+               '.';
+
        if (!trace_seq_printf(s, "%c%c%c",
-                             (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
-                               (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
-                                 'X' : '.',
-                             (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
-                               'N' : '.',
-                             (hardirq && softirq) ? 'H' :
-                               hardirq ? 'h' : softirq ? 's' : '.'))
+                             irqs_off, need_resched, hardsoft_irq))
                return 0;
 
        if (entry->preempt_count)
@@ -554,13 +564,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
        else
                ret = trace_seq_putc(s, '.');
 
-       if (!ret)
-               return 0;
-
-       if (entry->lock_depth < 0)
-               return trace_seq_putc(s, '.');
-
-       return trace_seq_printf(s, "%d", entry->lock_depth);
+       return ret;
 }
 
 static int
index 8f758d070c43777d96fb85515d3161c1eae1c63e..7e62c0a18456ebbad8fc66a83a9f2578a081e201 100644 (file)
@@ -247,51 +247,3 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
        ctx_trace = tr;
 }
 
-static void stop_sched_trace(struct trace_array *tr)
-{
-       tracing_stop_sched_switch_record();
-}
-
-static int sched_switch_trace_init(struct trace_array *tr)
-{
-       ctx_trace = tr;
-       tracing_reset_online_cpus(tr);
-       tracing_start_sched_switch_record();
-       return 0;
-}
-
-static void sched_switch_trace_reset(struct trace_array *tr)
-{
-       if (sched_ref)
-               stop_sched_trace(tr);
-}
-
-static void sched_switch_trace_start(struct trace_array *tr)
-{
-       sched_stopped = 0;
-}
-
-static void sched_switch_trace_stop(struct trace_array *tr)
-{
-       sched_stopped = 1;
-}
-
-static struct tracer sched_switch_trace __read_mostly =
-{
-       .name           = "sched_switch",
-       .init           = sched_switch_trace_init,
-       .reset          = sched_switch_trace_reset,
-       .start          = sched_switch_trace_start,
-       .stop           = sched_switch_trace_stop,
-       .wait_pipe      = poll_wait_pipe,
-#ifdef CONFIG_FTRACE_SELFTEST
-       .selftest    = trace_selftest_startup_sched_switch,
-#endif
-};
-
-__init static int init_sched_switch_trace(void)
-{
-       return register_tracer(&sched_switch_trace);
-}
-device_initcall(init_sched_switch_trace);
-
index 5c9fe08d209336e3e0ac1956e56c9a6910a29bfb..ee7b5a0bb9f87c1b9abb5964bfd4c54fd99ccb2d 100644 (file)
@@ -60,6 +60,19 @@ extern struct syscall_metadata *__stop_syscalls_metadata[];
 
 static struct syscall_metadata **syscalls_metadata;
 
+#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+{
+       /*
+        * Only compare after the "sys" prefix. Archs that use
+        * syscall wrappers may have syscalls symbols aliases prefixed
+        * with "SyS" instead of "sys", leading to an unwanted
+        * mismatch.
+        */
+       return !strcmp(sym + 3, name + 3);
+}
+#endif
+
 static __init struct syscall_metadata *
 find_syscall_meta(unsigned long syscall)
 {
@@ -72,14 +85,11 @@ find_syscall_meta(unsigned long syscall)
        stop = __stop_syscalls_metadata;
        kallsyms_lookup(syscall, NULL, NULL, NULL, str);
 
+       if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
+               return NULL;
+
        for ( ; start < stop; start++) {
-               /*
-                * Only compare after the "sys" prefix. Archs that use
-                * syscall wrappers may have syscalls symbols aliases prefixed
-                * with "SyS" instead of "sys", leading to an unwanted
-                * mismatch.
-                */
-               if ((*start)->name && !strcmp((*start)->name + 3, str + 3))
+               if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
                        return *start;
        }
        return NULL;
@@ -359,7 +369,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
-       if (num < 0 || num >= NR_syscalls)
+       if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
                return -ENOSYS;
        mutex_lock(&syscall_trace_lock);
        if (!sys_refcount_enter)
@@ -377,7 +387,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call)
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
-       if (num < 0 || num >= NR_syscalls)
+       if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
                return;
        mutex_lock(&syscall_trace_lock);
        sys_refcount_enter--;
@@ -393,7 +403,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
-       if (num < 0 || num >= NR_syscalls)
+       if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
                return -ENOSYS;
        mutex_lock(&syscall_trace_lock);
        if (!sys_refcount_exit)
@@ -411,7 +421,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
-       if (num < 0 || num >= NR_syscalls)
+       if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
                return;
        mutex_lock(&syscall_trace_lock);
        sys_refcount_exit--;
@@ -424,6 +434,14 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
 int init_syscall_trace(struct ftrace_event_call *call)
 {
        int id;
+       int num;
+
+       num = ((struct syscall_metadata *)call->data)->syscall_nr;
+       if (num < 0 || num >= NR_syscalls) {
+               pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
+                               ((struct syscall_metadata *)call->data)->name);
+               return -ENOSYS;
+       }
 
        if (set_syscall_print_fmt(call) < 0)
                return -ENOMEM;
@@ -438,7 +456,7 @@ int init_syscall_trace(struct ftrace_event_call *call)
        return id;
 }
 
-unsigned long __init arch_syscall_addr(int nr)
+unsigned long __init __weak arch_syscall_addr(int nr)
 {
        return (unsigned long)sys_call_table[nr];
 }
index ee6578b578ad3c3e4afb47cdc5fe78f0a572d3fc..b5fe4c00eb3c9de169557404444dbb4b222615f6 100644 (file)
@@ -316,6 +316,11 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
 
 static struct debug_obj_descr work_debug_descr;
 
+static void *work_debug_hint(void *addr)
+{
+       return ((struct work_struct *) addr)->func;
+}
+
 /*
  * fixup_init is called when:
  * - an active object is initialized
@@ -387,6 +392,7 @@ static int work_fixup_free(void *addr, enum debug_obj_state state)
 
 static struct debug_obj_descr work_debug_descr = {
        .name           = "work_struct",
+       .debug_hint     = work_debug_hint,
        .fixup_init     = work_fixup_init,
        .fixup_activate = work_fixup_activate,
        .fixup_free     = work_fixup_free,
index deebcc57d4e6a77e334dacb3b46c703e5c673515..9d86e45086f54c06bd39f7e989593f4b3ccfa086 100644 (file)
@@ -249,14 +249,17 @@ static struct debug_bucket *get_bucket(unsigned long addr)
 
 static void debug_print_object(struct debug_obj *obj, char *msg)
 {
+       struct debug_obj_descr *descr = obj->descr;
        static int limit;
 
-       if (limit < 5 && obj->descr != descr_test) {
+       if (limit < 5 && descr != descr_test) {
+               void *hint = descr->debug_hint ?
+                       descr->debug_hint(obj->object) : NULL;
                limit++;
                WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
-                                "object type: %s\n",
+                                "object type: %s hint: %pS\n",
                        msg, obj_states[obj->state], obj->astate,
-                       obj->descr->name);
+                       descr->name, hint);
        }
        debug_objects_warnings++;
 }
index 5021cbc344115c5281cc5427a0847f43fb7d9f39..ac09f2226dc748132cd869bdf0933a5daf7509c4 100644 (file)
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n)
 {
        int i, len = 0;
 
-       for (i = 0; i < n; i++) {
+       for (i = 0; i < n; i++, p++) {
                if (p->len)
                        len += nla_total_size(p->len);
                else if (nla_attr_minlen[p->type])
index 1471988d9190db351355faa35a21ab10f85c262a..0ae7e64317260b7a1f8bb35d173aed24488ce433 100644 (file)
@@ -28,6 +28,8 @@
 
 #ifdef CONFIG_DEBUG_PI_LIST
 
+static struct plist_head test_head;
+
 static void plist_check_prev_next(struct list_head *t, struct list_head *p,
                                  struct list_head *n)
 {
@@ -54,12 +56,13 @@ static void plist_check_list(struct list_head *top)
 
 static void plist_check_head(struct plist_head *head)
 {
-       WARN_ON(!head->rawlock && !head->spinlock);
+       WARN_ON(head != &test_head && !head->rawlock && !head->spinlock);
        if (head->rawlock)
                WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
        if (head->spinlock)
                WARN_ON_SMP(!spin_is_locked(head->spinlock));
-       plist_check_list(&head->prio_list);
+       if (!plist_head_empty(head))
+               plist_check_list(&plist_first(head)->prio_list);
        plist_check_list(&head->node_list);
 }
 
@@ -75,25 +78,33 @@ static void plist_check_head(struct plist_head *head)
  */
 void plist_add(struct plist_node *node, struct plist_head *head)
 {
-       struct plist_node *iter;
+       struct plist_node *first, *iter, *prev = NULL;
+       struct list_head *node_next = &head->node_list;
 
        plist_check_head(head);
        WARN_ON(!plist_node_empty(node));
+       WARN_ON(!list_empty(&node->prio_list));
+
+       if (plist_head_empty(head))
+               goto ins_node;
 
-       list_for_each_entry(iter, &head->prio_list, plist.prio_list) {
-               if (node->prio < iter->prio)
-                       goto lt_prio;
-               else if (node->prio == iter->prio) {
-                       iter = list_entry(iter->plist.prio_list.next,
-                                       struct plist_node, plist.prio_list);
-                       goto eq_prio;
+       first = iter = plist_first(head);
+
+       do {
+               if (node->prio < iter->prio) {
+                       node_next = &iter->node_list;
+                       break;
                }
-       }
 
-lt_prio:
-       list_add_tail(&node->plist.prio_list, &iter->plist.prio_list);
-eq_prio:
-       list_add_tail(&node->plist.node_list, &iter->plist.node_list);
+               prev = iter;
+               iter = list_entry(iter->prio_list.next,
+                               struct plist_node, prio_list);
+       } while (iter != first);
+
+       if (!prev || prev->prio != node->prio)
+               list_add_tail(&node->prio_list, &iter->prio_list);
+ins_node:
+       list_add_tail(&node->node_list, node_next);
 
        plist_check_head(head);
 }
@@ -108,14 +119,98 @@ void plist_del(struct plist_node *node, struct plist_head *head)
 {
        plist_check_head(head);
 
-       if (!list_empty(&node->plist.prio_list)) {
-               struct plist_node *next = plist_first(&node->plist);
+       if (!list_empty(&node->prio_list)) {
+               if (node->node_list.next != &head->node_list) {
+                       struct plist_node *next;
+
+                       next = list_entry(node->node_list.next,
+                                       struct plist_node, node_list);
 
-               list_move_tail(&next->plist.prio_list, &node->plist.prio_list);
-               list_del_init(&node->plist.prio_list);
+                       /* add the next plist_node into prio_list */
+                       if (list_empty(&next->prio_list))
+                               list_add(&next->prio_list, &node->prio_list);
+               }
+               list_del_init(&node->prio_list);
        }
 
-       list_del_init(&node->plist.node_list);
+       list_del_init(&node->node_list);
 
        plist_check_head(head);
 }
+
+#ifdef CONFIG_DEBUG_PI_LIST
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static struct plist_node __initdata test_node[241];
+
+static void __init plist_test_check(int nr_expect)
+{
+       struct plist_node *first, *prio_pos, *node_pos;
+
+       if (plist_head_empty(&test_head)) {
+               BUG_ON(nr_expect != 0);
+               return;
+       }
+
+       prio_pos = first = plist_first(&test_head);
+       plist_for_each(node_pos, &test_head) {
+               if (nr_expect-- < 0)
+                       break;
+               if (node_pos == first)
+                       continue;
+               if (node_pos->prio == prio_pos->prio) {
+                       BUG_ON(!list_empty(&node_pos->prio_list));
+                       continue;
+               }
+
+               BUG_ON(prio_pos->prio > node_pos->prio);
+               BUG_ON(prio_pos->prio_list.next != &node_pos->prio_list);
+               prio_pos = node_pos;
+       }
+
+       BUG_ON(nr_expect != 0);
+       BUG_ON(prio_pos->prio_list.next != &first->prio_list);
+}
+
+static int  __init plist_test(void)
+{
+       int nr_expect = 0, i, loop;
+       unsigned int r = local_clock();
+
+       printk(KERN_INFO "start plist test\n");
+       plist_head_init(&test_head, NULL);
+       for (i = 0; i < ARRAY_SIZE(test_node); i++)
+               plist_node_init(test_node + i, 0);
+
+       for (loop = 0; loop < 1000; loop++) {
+               r = r * 193939 % 47629;
+               i = r % ARRAY_SIZE(test_node);
+               if (plist_node_empty(test_node + i)) {
+                       r = r * 193939 % 47629;
+                       test_node[i].prio = r % 99;
+                       plist_add(test_node + i, &test_head);
+                       nr_expect++;
+               } else {
+                       plist_del(test_node + i, &test_head);
+                       nr_expect--;
+               }
+               plist_test_check(nr_expect);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(test_node); i++) {
+               if (plist_node_empty(test_node + i))
+                       continue;
+               plist_del(test_node + i, &test_head);
+               nr_expect--;
+               plist_test_check(nr_expect);
+       }
+
+       printk(KERN_INFO "end plist test\n");
+       return 0;
+}
+
+module_init(plist_test);
+
+#endif
index f236d7cd5cf3e34bab6955613a773aeda8914039..aa7c3052261f2c3e6cdb6dd0301b64603fbca67d 100644 (file)
@@ -222,8 +222,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
 /*
  * wait for the read lock to be granted
  */
-asmregparm struct rw_semaphore __sched *
-rwsem_down_read_failed(struct rw_semaphore *sem)
+struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
 {
        return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
                                        -RWSEM_ACTIVE_READ_BIAS);
@@ -232,8 +231,7 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
 /*
  * wait for the write lock to be granted
  */
-asmregparm struct rw_semaphore __sched *
-rwsem_down_write_failed(struct rw_semaphore *sem)
+struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
 {
        return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
                                        -RWSEM_ACTIVE_WRITE_BIAS);
@@ -243,7 +241,7 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
  * handle waking up a waiter on the semaphore
  * - up_read/up_write has decremented the active part of count if we come here
  */
-asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
+struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
 
@@ -263,7 +261,7 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
  * - caller incremented waiting part of count and discovered it still negative
  * - just wake up any readers at the front of the queue
  */
-asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
 
index c47bbe11b804e1aa9e0084eeb73e402cbaea04b9..93ca08b8a451411c3c4ac18f1f9525637e4a7140 100644 (file)
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
        /*
         * Ensure that the address returned is DMA'ble
         */
-       if (!dma_capable(dev, dev_addr, size))
-               panic("map_single: bounce buffer is not DMA'ble");
+       if (!dma_capable(dev, dev_addr, size)) {
+               swiotlb_tbl_unmap_single(dev, map, size, dir);
+               dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
+       }
 
        return dev_addr;
 }
index 2b1b575ae712c2f0f15976a23ec5e4dc53f0897b..42a8326c3e3da1f9fbedb0e0ee26d4cac47d1d85 100644 (file)
@@ -7,7 +7,7 @@ mmu-$(CONFIG_MMU)       := fremap.o highmem.o madvise.o memory.o mincore.o \
                           mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
                           vmalloc.o pagewalk.o pgtable-generic.o
 
-obj-y                  := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
+obj-y                  := filemap.o mempool.o oom_kill.o fadvise.o \
                           maccess.o page_alloc.o page-writeback.o \
                           readahead.o swap.o truncate.o vmscan.o shmem.o \
                           prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
@@ -15,6 +15,12 @@ obj-y                        := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
                           $(mmu-y)
 obj-y += init-mm.o
 
+ifdef CONFIG_NO_BOOTMEM
+       obj-y           += nobootmem.o
+else
+       obj-y           += bootmem.o
+endif
+
 obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
 
 obj-$(CONFIG_BOUNCE)   += bounce.o
index 13b0caa9793c008b7fafa09cbe5c586faa6c7447..07aeb89e396ea140dbd7a28640aacb8fa2707b9a 100644 (file)
 
 #include "internal.h"
 
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+struct pglist_data __refdata contig_page_data = {
+       .bdata = &bootmem_node_data[0]
+};
+EXPORT_SYMBOL(contig_page_data);
+#endif
+
 unsigned long max_low_pfn;
 unsigned long min_low_pfn;
 unsigned long max_pfn;
@@ -35,7 +42,6 @@ unsigned long max_pfn;
 unsigned long saved_max_pfn;
 #endif
 
-#ifndef CONFIG_NO_BOOTMEM
 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
 
 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
@@ -146,7 +152,7 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
        min_low_pfn = start;
        return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
 }
-#endif
+
 /*
  * free_bootmem_late - free bootmem pages directly to page allocator
  * @addr: starting address of the range
@@ -171,53 +177,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
        }
 }
 
-#ifdef CONFIG_NO_BOOTMEM
-static void __init __free_pages_memory(unsigned long start, unsigned long end)
-{
-       int i;
-       unsigned long start_aligned, end_aligned;
-       int order = ilog2(BITS_PER_LONG);
-
-       start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
-       end_aligned = end & ~(BITS_PER_LONG - 1);
-
-       if (end_aligned <= start_aligned) {
-               for (i = start; i < end; i++)
-                       __free_pages_bootmem(pfn_to_page(i), 0);
-
-               return;
-       }
-
-       for (i = start; i < start_aligned; i++)
-               __free_pages_bootmem(pfn_to_page(i), 0);
-
-       for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
-               __free_pages_bootmem(pfn_to_page(i), order);
-
-       for (i = end_aligned; i < end; i++)
-               __free_pages_bootmem(pfn_to_page(i), 0);
-}
-
-unsigned long __init free_all_memory_core_early(int nodeid)
-{
-       int i;
-       u64 start, end;
-       unsigned long count = 0;
-       struct range *range = NULL;
-       int nr_range;
-
-       nr_range = get_free_all_memory_range(&range, nodeid);
-
-       for (i = 0; i < nr_range; i++) {
-               start = range[i].start;
-               end = range[i].end;
-               count += end - start;
-               __free_pages_memory(start, end);
-       }
-
-       return count;
-}
-#else
 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 {
        int aligned;
@@ -278,7 +237,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 
        return count;
 }
-#endif
 
 /**
  * free_all_bootmem_node - release a node's free pages to the buddy allocator
@@ -289,12 +247,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 {
        register_page_bootmem_info_node(pgdat);
-#ifdef CONFIG_NO_BOOTMEM
-       /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
-       return 0;
-#else
        return free_all_bootmem_core(pgdat->bdata);
-#endif
 }
 
 /**
@@ -304,16 +257,6 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
  */
 unsigned long __init free_all_bootmem(void)
 {
-#ifdef CONFIG_NO_BOOTMEM
-       /*
-        * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
-        *  because in some case like Node0 doesnt have RAM installed
-        *  low ram will be on Node1
-        * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
-        *  will be used instead of only Node0 related
-        */
-       return free_all_memory_core_early(MAX_NUMNODES);
-#else
        unsigned long total_pages = 0;
        bootmem_data_t *bdata;
 
@@ -321,10 +264,8 @@ unsigned long __init free_all_bootmem(void)
                total_pages += free_all_bootmem_core(bdata);
 
        return total_pages;
-#endif
 }
 
-#ifndef CONFIG_NO_BOOTMEM
 static void __init __free(bootmem_data_t *bdata,
                        unsigned long sidx, unsigned long eidx)
 {
@@ -419,7 +360,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
        }
        BUG();
 }
-#endif
 
 /**
  * free_bootmem_node - mark a page range as usable
@@ -434,10 +374,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
                              unsigned long size)
 {
-#ifdef CONFIG_NO_BOOTMEM
-       kmemleak_free_part(__va(physaddr), size);
-       memblock_x86_free_range(physaddr, physaddr + size);
-#else
        unsigned long start, end;
 
        kmemleak_free_part(__va(physaddr), size);
@@ -446,7 +382,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
        end = PFN_DOWN(physaddr + size);
 
        mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
-#endif
 }
 
 /**
@@ -460,10 +395,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
  */
 void __init free_bootmem(unsigned long addr, unsigned long size)
 {
-#ifdef CONFIG_NO_BOOTMEM
-       kmemleak_free_part(__va(addr), size);
-       memblock_x86_free_range(addr, addr + size);
-#else
        unsigned long start, end;
 
        kmemleak_free_part(__va(addr), size);
@@ -472,7 +403,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
        end = PFN_DOWN(addr + size);
 
        mark_bootmem(start, end, 0, 0);
-#endif
 }
 
 /**
@@ -489,17 +419,12 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
                                 unsigned long size, int flags)
 {
-#ifdef CONFIG_NO_BOOTMEM
-       panic("no bootmem");
-       return 0;
-#else
        unsigned long start, end;
 
        start = PFN_DOWN(physaddr);
        end = PFN_UP(physaddr + size);
 
        return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
-#endif
 }
 
 /**
@@ -515,20 +440,14 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 int __init reserve_bootmem(unsigned long addr, unsigned long size,
                            int flags)
 {
-#ifdef CONFIG_NO_BOOTMEM
-       panic("no bootmem");
-       return 0;
-#else
        unsigned long start, end;
 
        start = PFN_DOWN(addr);
        end = PFN_UP(addr + size);
 
        return mark_bootmem(start, end, 1, flags);
-#endif
 }
 
-#ifndef CONFIG_NO_BOOTMEM
 int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
                                   int flags)
 {
@@ -685,33 +604,12 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
 #endif
        return NULL;
 }
-#endif
 
 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
                                        unsigned long align,
                                        unsigned long goal,
                                        unsigned long limit)
 {
-#ifdef CONFIG_NO_BOOTMEM
-       void *ptr;
-
-       if (WARN_ON_ONCE(slab_is_available()))
-               return kzalloc(size, GFP_NOWAIT);
-
-restart:
-
-       ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
-
-       if (ptr)
-               return ptr;
-
-       if (goal != 0) {
-               goal = 0;
-               goto restart;
-       }
-
-       return NULL;
-#else
        bootmem_data_t *bdata;
        void *region;
 
@@ -737,7 +635,6 @@ restart:
        }
 
        return NULL;
-#endif
 }
 
 /**
@@ -758,10 +655,6 @@ void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
 {
        unsigned long limit = 0;
 
-#ifdef CONFIG_NO_BOOTMEM
-       limit = -1UL;
-#endif
-
        return ___alloc_bootmem_nopanic(size, align, goal, limit);
 }
 
@@ -798,14 +691,9 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
 {
        unsigned long limit = 0;
 
-#ifdef CONFIG_NO_BOOTMEM
-       limit = -1UL;
-#endif
-
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-#ifndef CONFIG_NO_BOOTMEM
 static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
                                unsigned long size, unsigned long align,
                                unsigned long goal, unsigned long limit)
@@ -822,7 +710,6 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
 
        return ___alloc_bootmem(size, align, goal, limit);
 }
-#endif
 
 /**
  * __alloc_bootmem_node - allocate boot memory from a specific node
@@ -842,24 +729,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
                                   unsigned long align, unsigned long goal)
 {
-       void *ptr;
-
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-#ifdef CONFIG_NO_BOOTMEM
-       ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
-                                        goal, -1ULL);
-       if (ptr)
-               return ptr;
-
-       ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
-                                        goal, -1ULL);
-#else
-       ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
-#endif
-
-       return ptr;
+       return  ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
 }
 
 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -880,13 +753,8 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
                unsigned long new_goal;
 
                new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
-#ifdef CONFIG_NO_BOOTMEM
-               ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
-                                                new_goal, -1ULL);
-#else
                ptr = alloc_bootmem_core(pgdat->bdata, size, align,
                                                 new_goal, 0);
-#endif
                if (ptr)
                        return ptr;
        }
@@ -907,16 +775,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
 void * __init alloc_bootmem_section(unsigned long size,
                                    unsigned long section_nr)
 {
-#ifdef CONFIG_NO_BOOTMEM
-       unsigned long pfn, goal, limit;
-
-       pfn = section_nr_to_pfn(section_nr);
-       goal = pfn << PAGE_SHIFT;
-       limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
-
-       return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
-                                        SMP_CACHE_BYTES, goal, limit);
-#else
        bootmem_data_t *bdata;
        unsigned long pfn, goal, limit;
 
@@ -926,7 +784,6 @@ void * __init alloc_bootmem_section(unsigned long size,
        bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
 
        return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
-#endif
 }
 #endif
 
@@ -938,16 +795,11 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-#ifdef CONFIG_NO_BOOTMEM
-       ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
-                                                goal, -1ULL);
-#else
        ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
        if (ptr)
                return ptr;
 
        ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
-#endif
        if (ptr)
                return ptr;
 
@@ -995,21 +847,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
                                       unsigned long align, unsigned long goal)
 {
-       void *ptr;
-
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-#ifdef CONFIG_NO_BOOTMEM
-       ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+       return ___alloc_bootmem_node(pgdat->bdata, size, align,
                                goal, ARCH_LOW_ADDRESS_LIMIT);
-       if (ptr)
-               return ptr;
-       ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
-                               goal, ARCH_LOW_ADDRESS_LIMIT);
-#else
-       ptr = ___alloc_bootmem_node(pgdat->bdata, size, align,
-                               goal, ARCH_LOW_ADDRESS_LIMIT);
-#endif
-       return ptr;
 }
index 3e29781ee7628b2c3efdd0fde5d639f2d765b18f..113e35c4750209b7cf6f61f54a041f37219ebc4d 100644 (file)
@@ -650,10 +650,10 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag)
 
 static inline struct page *alloc_hugepage_vma(int defrag,
                                              struct vm_area_struct *vma,
-                                             unsigned long haddr)
+                                             unsigned long haddr, int nd)
 {
        return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
-                              HPAGE_PMD_ORDER, vma, haddr);
+                              HPAGE_PMD_ORDER, vma, haddr, nd);
 }
 
 #ifndef CONFIG_NUMA
@@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                if (unlikely(khugepaged_enter(vma)))
                        return VM_FAULT_OOM;
                page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-                                         vma, haddr);
+                                         vma, haddr, numa_node_id());
                if (unlikely(!page))
                        goto out;
                if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
@@ -799,8 +799,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        }
 
        for (i = 0; i < HPAGE_PMD_NR; i++) {
-               pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
-                                         vma, address);
+               pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE,
+                                              vma, address, page_to_nid(page));
                if (unlikely(!pages[i] ||
                             mem_cgroup_newpage_charge(pages[i], mm,
                                                       GFP_KERNEL))) {
@@ -902,7 +902,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow())
                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-                                             vma, haddr);
+                                             vma, haddr, numa_node_id());
        else
                new_page = NULL;
 
@@ -1745,7 +1745,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
 static void collapse_huge_page(struct mm_struct *mm,
                               unsigned long address,
                               struct page **hpage,
-                              struct vm_area_struct *vma)
+                              struct vm_area_struct *vma,
+                              int node)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -1761,6 +1762,10 @@ static void collapse_huge_page(struct mm_struct *mm,
 #ifndef CONFIG_NUMA
        VM_BUG_ON(!*hpage);
        new_page = *hpage;
+       if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+               up_read(&mm->mmap_sem);
+               return;
+       }
 #else
        VM_BUG_ON(*hpage);
        /*
@@ -1773,18 +1778,19 @@ static void collapse_huge_page(struct mm_struct *mm,
         * mmap_sem in read mode is good idea also to allow greater
         * scalability.
         */
-       new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
+       new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
+                                     node);
        if (unlikely(!new_page)) {
                up_read(&mm->mmap_sem);
                *hpage = ERR_PTR(-ENOMEM);
                return;
        }
-#endif
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                up_read(&mm->mmap_sem);
                put_page(new_page);
                return;
        }
+#endif
 
        /* after allocating the hugepage upgrade to mmap_sem write mode */
        up_read(&mm->mmap_sem);
@@ -1919,6 +1925,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
        struct page *page;
        unsigned long _address;
        spinlock_t *ptl;
+       int node = -1;
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
@@ -1949,6 +1956,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                page = vm_normal_page(vma, _address, pteval);
                if (unlikely(!page))
                        goto out_unmap;
+               /*
+                * Chose the node of the first page. This could
+                * be more sophisticated and look at more pages,
+                * but isn't for now.
+                */
+               if (node == -1)
+                       node = page_to_nid(page);
                VM_BUG_ON(PageCompound(page));
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
                        goto out_unmap;
@@ -1965,7 +1979,7 @@ out_unmap:
        pte_unmap_unlock(pte, ptl);
        if (ret)
                /* collapse_huge_page will return with the mmap_sem released */
-               collapse_huge_page(mm, address, hpage, vma);
+               collapse_huge_page(mm, address, hpage, vma, node);
 out:
        return ret;
 }
index 8e8c183248631cccce48a97943168d1da5e28223..5823698c2b71a9dd79b43986c9929f2cd7485a04 100644 (file)
@@ -2648,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping,
                details.last_index = ULONG_MAX;
        details.i_mmap_lock = &mapping->i_mmap_lock;
 
+       mutex_lock(&mapping->unmap_mutex);
        spin_lock(&mapping->i_mmap_lock);
 
        /* Protect against endless unmapping loops */
@@ -2664,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping,
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
        spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->unmap_mutex);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
index 368fc9d23610eb57dc2359b1aba57d9458616a31..b53ec99f142897a30d1513af8bcfe375dc392102 100644 (file)
@@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
 }
 
 /* Return a zonelist indicated by gfp for node representing a mempolicy */
-static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
+static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
+       int nd)
 {
-       int nd = numa_node_id();
-
        switch (policy->mode) {
        case MPOL_PREFERRED:
                if (!(policy->flags & MPOL_F_LOCAL))
@@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
                zl = node_zonelist(interleave_nid(*mpol, vma, addr,
                                huge_page_shift(hstate_vma(vma))), gfp_flags);
        } else {
-               zl = policy_zonelist(gfp_flags, *mpol);
+               zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
                if ((*mpol)->mode == MPOL_BIND)
                        *nodemask = &(*mpol)->v.nodes;
        }
@@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
  */
 struct page *
 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
-               unsigned long addr)
+               unsigned long addr, int node)
 {
        struct mempolicy *pol = get_vma_policy(current, vma, addr);
        struct zonelist *zl;
@@ -1830,13 +1829,13 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
        if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
                unsigned nid;
 
-               nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
+               nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
                mpol_cond_put(pol);
                page = alloc_page_interleave(gfp, order, nid);
                put_mems_allowed();
                return page;
        }
-       zl = policy_zonelist(gfp, pol);
+       zl = policy_zonelist(gfp, pol, node);
        if (unlikely(mpol_needs_cond_ref(pol))) {
                /*
                 * slow path: ref counted shared policy
@@ -1892,7 +1891,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
                page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
        else
                page = __alloc_pages_nodemask(gfp, order,
-                       policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
+                               policy_zonelist(gfp, pol, numa_node_id()),
+                               policy_nodemask(gfp, pol));
        put_mems_allowed();
        return page;
 }
index 7661152538074ee8018be8b87e1600021be5dc27..352de555626c4434471a53e29bee7a02516adfe3 100644 (file)
@@ -1287,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
                return -EPERM;
 
        /* Find the mm_struct */
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        task = pid ? find_task_by_vpid(pid) : current;
        if (!task) {
-               read_unlock(&tasklist_lock);
+               rcu_read_unlock();
                return -ESRCH;
        }
        mm = get_task_mm(task);
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        if (!mm)
                return -EINVAL;
index 9925b6391b8035a547355a8ad9919e9a8f06f920..1de98d492ddcd7adde4508bc3af3b5fcb6a465fe 100644 (file)
@@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                 */
                mapping = vma->vm_file->f_mapping;
                spin_lock(&mapping->i_mmap_lock);
-               if (new_vma->vm_truncate_count &&
-                   new_vma->vm_truncate_count != vma->vm_truncate_count)
-                       new_vma->vm_truncate_count = 0;
+               new_vma->vm_truncate_count = 0;
        }
 
        /*
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
new file mode 100644 (file)
index 0000000..e2bdb07
--- /dev/null
@@ -0,0 +1,435 @@
+/*
+ *  bootmem - A boot-time physical memory allocator and configurator
+ *
+ *  Copyright (C) 1999 Ingo Molnar
+ *                1999 Kanoj Sarcar, SGI
+ *                2008 Johannes Weiner
+ *
+ * Access to this subsystem has to be serialized externally (which is true
+ * for the boot process anyway).
+ */
+#include <linux/init.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/kmemleak.h>
+#include <linux/range.h>
+#include <linux/memblock.h>
+
+#include <asm/bug.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+
+#include "internal.h"
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+struct pglist_data __refdata contig_page_data;
+EXPORT_SYMBOL(contig_page_data);
+#endif
+
+unsigned long max_low_pfn;
+unsigned long min_low_pfn;
+unsigned long max_pfn;
+
+#ifdef CONFIG_CRASH_DUMP
+/*
+ * If we have booted due to a crash, max_pfn will be a very low value. We need
+ * to know the amount of memory that the previous kernel used.
+ */
+unsigned long saved_max_pfn;
+#endif
+
+static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
+                                       u64 goal, u64 limit)
+{
+       void *ptr;
+       u64 addr;
+
+       if (limit > memblock.current_limit)
+               limit = memblock.current_limit;
+
+       addr = find_memory_core_early(nid, size, align, goal, limit);
+
+       if (addr == MEMBLOCK_ERROR)
+               return NULL;
+
+       ptr = phys_to_virt(addr);
+       memset(ptr, 0, size);
+       memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
+       /*
+        * The min_count is set to 0 so that bootmem allocated blocks
+        * are never reported as leaks.
+        */
+       kmemleak_alloc(ptr, size, 0, 0);
+       return ptr;
+}
+
+/*
+ * free_bootmem_late - free bootmem pages directly to page allocator
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * This is only useful when the bootmem allocator has already been torn
+ * down, but we are still initializing the system.  Pages are given directly
+ * to the page allocator, no bootmem metadata is updated because it is gone.
+ */
+void __init free_bootmem_late(unsigned long addr, unsigned long size)
+{
+       unsigned long cursor, end;
+
+       kmemleak_free_part(__va(addr), size);
+
+       cursor = PFN_UP(addr);
+       end = PFN_DOWN(addr + size);
+
+       for (; cursor < end; cursor++) {
+               __free_pages_bootmem(pfn_to_page(cursor), 0);
+               totalram_pages++;
+       }
+}
+
+static void __init __free_pages_memory(unsigned long start, unsigned long end)
+{
+       int i;
+       unsigned long start_aligned, end_aligned;
+       int order = ilog2(BITS_PER_LONG);
+
+       start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
+       end_aligned = end & ~(BITS_PER_LONG - 1);
+
+       if (end_aligned <= start_aligned) {
+               for (i = start; i < end; i++)
+                       __free_pages_bootmem(pfn_to_page(i), 0);
+
+               return;
+       }
+
+       for (i = start; i < start_aligned; i++)
+               __free_pages_bootmem(pfn_to_page(i), 0);
+
+       for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
+               __free_pages_bootmem(pfn_to_page(i), order);
+
+       for (i = end_aligned; i < end; i++)
+               __free_pages_bootmem(pfn_to_page(i), 0);
+}
+
+unsigned long __init free_all_memory_core_early(int nodeid)
+{
+       int i;
+       u64 start, end;
+       unsigned long count = 0;
+       struct range *range = NULL;
+       int nr_range;
+
+       nr_range = get_free_all_memory_range(&range, nodeid);
+
+       for (i = 0; i < nr_range; i++) {
+               start = range[i].start;
+               end = range[i].end;
+               count += end - start;
+               __free_pages_memory(start, end);
+       }
+
+       return count;
+}
+
+/**
+ * free_all_bootmem_node - release a node's free pages to the buddy allocator
+ * @pgdat: node to be released
+ *
+ * Returns the number of pages actually released.
+ */
+unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
+{
+       register_page_bootmem_info_node(pgdat);
+
+       /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
+       return 0;
+}
+
+/**
+ * free_all_bootmem - release free pages to the buddy allocator
+ *
+ * Returns the number of pages actually released.
+ */
+unsigned long __init free_all_bootmem(void)
+{
+       /*
+        * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
+        *  because in some case like Node0 doesnt have RAM installed
+        *  low ram will be on Node1
+        * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
+        *  will be used instead of only Node0 related
+        */
+       return free_all_memory_core_early(MAX_NUMNODES);
+}
+
+/**
+ * free_bootmem_node - mark a page range as usable
+ * @pgdat: node the range resides on
+ * @physaddr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * Partial pages will be considered reserved and left as they are.
+ *
+ * The range must reside completely on the specified node.
+ */
+void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
+                             unsigned long size)
+{
+       kmemleak_free_part(__va(physaddr), size);
+       memblock_x86_free_range(physaddr, physaddr + size);
+}
+
+/**
+ * free_bootmem - mark a page range as usable
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * Partial pages will be considered reserved and left as they are.
+ *
+ * The range must be contiguous but may span node boundaries.
+ */
+void __init free_bootmem(unsigned long addr, unsigned long size)
+{
+       kmemleak_free_part(__va(addr), size);
+       memblock_x86_free_range(addr, addr + size);
+}
+
+static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+                                       unsigned long align,
+                                       unsigned long goal,
+                                       unsigned long limit)
+{
+       void *ptr;
+
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc(size, GFP_NOWAIT);
+
+restart:
+
+       ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
+
+       if (ptr)
+               return ptr;
+
+       if (goal != 0) {
+               goal = 0;
+               goto restart;
+       }
+
+       return NULL;
+}
+
+/**
+ * __alloc_bootmem_nopanic - allocate boot memory without panicking
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * Returns NULL on failure.
+ */
+void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
+                                       unsigned long goal)
+{
+       unsigned long limit = -1UL;
+
+       return ___alloc_bootmem_nopanic(size, align, goal, limit);
+}
+
+static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
+                                       unsigned long goal, unsigned long limit)
+{
+       void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
+
+       if (mem)
+               return mem;
+       /*
+        * Whoops, we cannot satisfy the allocation request.
+        */
+       printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+       panic("Out of memory");
+       return NULL;
+}
+
+/**
+ * __alloc_bootmem - allocate boot memory
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem(unsigned long size, unsigned long align,
+                             unsigned long goal)
+{
+       unsigned long limit = -1UL;
+
+       return ___alloc_bootmem(size, align, goal, limit);
+}
+
+/**
+ * __alloc_bootmem_node - allocate boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+                                  unsigned long align, unsigned long goal)
+{
+       void *ptr;
+
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+       ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+                                        goal, -1ULL);
+       if (ptr)
+               return ptr;
+
+       return __alloc_memory_core_early(MAX_NUMNODES, size, align,
+                                        goal, -1ULL);
+}
+
+void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+                                  unsigned long align, unsigned long goal)
+{
+#ifdef MAX_DMA32_PFN
+       unsigned long end_pfn;
+
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+       /* update goal according ...MAX_DMA32_PFN */
+       end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+
+       if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
+           (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
+               void *ptr;
+               unsigned long new_goal;
+
+               new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
+               ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
+                                                new_goal, -1ULL);
+               if (ptr)
+                       return ptr;
+       }
+#endif
+
+       return __alloc_bootmem_node(pgdat, size, align, goal);
+
+}
+
+#ifdef CONFIG_SPARSEMEM
+/**
+ * alloc_bootmem_section - allocate boot memory from a specific section
+ * @size: size of the request in bytes
+ * @section_nr: sparse map section to allocate from
+ *
+ * Return NULL on failure.
+ */
+void * __init alloc_bootmem_section(unsigned long size,
+                                   unsigned long section_nr)
+{
+       unsigned long pfn, goal, limit;
+
+       pfn = section_nr_to_pfn(section_nr);
+       goal = pfn << PAGE_SHIFT;
+       limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
+
+       return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
+                                        SMP_CACHE_BYTES, goal, limit);
+}
+#endif
+
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+                                  unsigned long align, unsigned long goal)
+{
+       void *ptr;
+
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+       ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
+                                                goal, -1ULL);
+       if (ptr)
+               return ptr;
+
+       return __alloc_bootmem_nopanic(size, align, goal);
+}
+
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
+/**
+ * __alloc_bootmem_low - allocate low boot memory
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
+                                 unsigned long goal)
+{
+       return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
+}
+
+/**
+ * __alloc_bootmem_low_node - allocate low boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
+                                      unsigned long align, unsigned long goal)
+{
+       void *ptr;
+
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+       ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+                               goal, ARCH_LOW_ADDRESS_LIMIT);
+       if (ptr)
+               return ptr;
+
+       return  __alloc_memory_core_early(MAX_NUMNODES, size, align,
+                               goal, ARCH_LOW_ADDRESS_LIMIT);
+}
index a873e61e312e6dd7795b4b734a0370bc844d9f29..bd7625676a645c2bfe6d7aacd4197a0cfc5b23fd 100644 (file)
@@ -3699,13 +3699,45 @@ void __init free_bootmem_with_active_regions(int nid,
 }
 
 #ifdef CONFIG_HAVE_MEMBLOCK
+/*
+ * Basic iterator support. Return the last range of PFNs for a node
+ * Note: nid == MAX_NUMNODES returns last region regardless of node
+ */
+static int __meminit last_active_region_index_in_nid(int nid)
+{
+       int i;
+
+       for (i = nr_nodemap_entries - 1; i >= 0; i--)
+               if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
+                       return i;
+
+       return -1;
+}
+
+/*
+ * Basic iterator support. Return the previous active range of PFNs for a node
+ * Note: nid == MAX_NUMNODES returns next region regardless of node
+ */
+static int __meminit previous_active_region_index_in_nid(int index, int nid)
+{
+       for (index = index - 1; index >= 0; index--)
+               if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
+                       return index;
+
+       return -1;
+}
+
+#define for_each_active_range_index_in_nid_reverse(i, nid) \
+       for (i = last_active_region_index_in_nid(nid); i != -1; \
+                               i = previous_active_region_index_in_nid(i, nid))
+
 u64 __init find_memory_core_early(int nid, u64 size, u64 align,
                                        u64 goal, u64 limit)
 {
        int i;
 
        /* Need to go over early_node_map to find out good range for node */
-       for_each_active_range_index_in_nid(i, nid) {
+       for_each_active_range_index_in_nid_reverse(i, nid) {
                u64 addr;
                u64 ei_start, ei_last;
                u64 final_start, final_end;
@@ -3748,34 +3780,6 @@ int __init add_from_early_node_map(struct range *range, int az,
        return nr_range;
 }
 
-#ifdef CONFIG_NO_BOOTMEM
-void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
-                                       u64 goal, u64 limit)
-{
-       void *ptr;
-       u64 addr;
-
-       if (limit > memblock.current_limit)
-               limit = memblock.current_limit;
-
-       addr = find_memory_core_early(nid, size, align, goal, limit);
-
-       if (addr == MEMBLOCK_ERROR)
-               return NULL;
-
-       ptr = phys_to_virt(addr);
-       memset(ptr, 0, size);
-       memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
-       /*
-        * The min_count is set to 0 so that bootmem allocated blocks
-        * are never reported as leaks.
-        */
-       kmemleak_alloc(ptr, size, 0, 0);
-       return ptr;
-}
-#endif
-
-
 void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
 {
        int i;
@@ -4809,15 +4813,6 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
        dma_reserve = new_dma_reserve;
 }
 
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data __refdata contig_page_data = {
-#ifndef CONFIG_NO_BOOTMEM
- .bdata = &bootmem_node_data[0]
-#endif
- };
-EXPORT_SYMBOL(contig_page_data);
-#endif
-
 void __init free_area_init(unsigned long *zones_size)
 {
        free_area_init_node(0, zones_size,
@@ -5376,10 +5371,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
        for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
                unsigned long check = pfn + iter;
 
-               if (!pfn_valid_within(check)) {
-                       iter++;
+               if (!pfn_valid_within(check))
                        continue;
-               }
+
                page = pfn_to_page(check);
                if (!page_count(page)) {
                        if (PageBuddy(page))
index f21f4a1d6a1ce144d2ce45c30123eb2010f93bb8..941bf82e896128b618284ae17a25d963c13838fc 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -497,41 +497,51 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        int referenced = 0;
 
-       /*
-        * Don't want to elevate referenced for mlocked page that gets this far,
-        * in order that it progresses to try_to_unmap and is moved to the
-        * unevictable list.
-        */
-       if (vma->vm_flags & VM_LOCKED) {
-               *mapcount = 0;  /* break early from loop */
-               *vm_flags |= VM_LOCKED;
-               goto out;
-       }
-
-       /* Pretend the page is referenced if the task has the
-          swap token and is in the middle of a page fault. */
-       if (mm != current->mm && has_swap_token(mm) &&
-                       rwsem_is_locked(&mm->mmap_sem))
-               referenced++;
-
        if (unlikely(PageTransHuge(page))) {
                pmd_t *pmd;
 
                spin_lock(&mm->page_table_lock);
+               /*
+                * rmap might return false positives; we must filter
+                * these out using page_check_address_pmd().
+                */
                pmd = page_check_address_pmd(page, mm, address,
                                             PAGE_CHECK_ADDRESS_PMD_FLAG);
-               if (pmd && !pmd_trans_splitting(*pmd) &&
-                   pmdp_clear_flush_young_notify(vma, address, pmd))
+               if (!pmd) {
+                       spin_unlock(&mm->page_table_lock);
+                       goto out;
+               }
+
+               if (vma->vm_flags & VM_LOCKED) {
+                       spin_unlock(&mm->page_table_lock);
+                       *mapcount = 0;  /* break early from loop */
+                       *vm_flags |= VM_LOCKED;
+                       goto out;
+               }
+
+               /* go ahead even if the pmd is pmd_trans_splitting() */
+               if (pmdp_clear_flush_young_notify(vma, address, pmd))
                        referenced++;
                spin_unlock(&mm->page_table_lock);
        } else {
                pte_t *pte;
                spinlock_t *ptl;
 
+               /*
+                * rmap might return false positives; we must filter
+                * these out using page_check_address().
+                */
                pte = page_check_address(page, mm, address, &ptl, 0);
                if (!pte)
                        goto out;
 
+               if (vma->vm_flags & VM_LOCKED) {
+                       pte_unmap_unlock(pte, ptl);
+                       *mapcount = 0;  /* break early from loop */
+                       *vm_flags |= VM_LOCKED;
+                       goto out;
+               }
+
                if (ptep_clear_flush_young_notify(vma, address, pte)) {
                        /*
                         * Don't treat a reference through a sequentially read
@@ -546,6 +556,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                pte_unmap_unlock(pte, ptl);
        }
 
+       /* Pretend the page is referenced if the task has the
+          swap token and is in the middle of a page fault. */
+       if (mm != current->mm && has_swap_token(mm) &&
+                       rwsem_is_locked(&mm->mmap_sem))
+               referenced++;
+
        (*mapcount)--;
 
        if (referenced)
index 5ee67c9906022a15b566711da17b8c78e65fa16d..3437b65d6d6e76e71aedbe5ed02f851f400133cf 100644 (file)
@@ -2144,8 +2144,10 @@ static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
 {
        struct inode *inode = dentry->d_inode;
 
-       if (*len < 3)
+       if (*len < 3) {
+               *len = 3;
                return 255;
+       }
 
        if (inode_unhashed(inode)) {
                /* Unfortunately insert_inode_hash is not idempotent,
index 07a458d72fa880f5adc366b8acf03e610841880e..0341c5700e346fa62401e9815720ed85af3a20c9 100644 (file)
@@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
 
        error = -EINVAL;
        if (S_ISBLK(inode->i_mode)) {
-               bdev = I_BDEV(inode);
+               bdev = bdgrab(I_BDEV(inode));
                error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
                                   sys_swapon);
                if (error < 0) {
index 49feb46e77b8802803d20009f758ec65d2db0835..d64296be00d39e5c66199e94269f3b8f5ba0bf64 100644 (file)
@@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
        next = start;
        while (next <= end &&
               pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+               mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
                        pgoff_t page_index = page->index;
@@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        unlock_page(page);
                }
                pagevec_release(&pvec);
+               mem_cgroup_uncharge_end();
                cond_resched();
        }
 
index 17497d0cd8b9e4c95bf5ce39abf1f59d2eb15f2f..6771ea70bfe7e399d96237a58d3860357aad4c46 100644 (file)
@@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone,
        if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
                return false;
 
-       /*
-        * If we failed to reclaim and have scanned the full list, stop.
-        * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far
-        *       faster but obviously would be less likely to succeed
-        *       allocation. If this is desirable, use GFP_REPEAT to decide
-        *       if both reclaimed and scanned should be checked or just
-        *       reclaimed
-        */
-       if (!nr_reclaimed && !nr_scanned)
-               return false;
+       /* Consider stopping depending on scan and reclaim activity */
+       if (sc->gfp_mask & __GFP_REPEAT) {
+               /*
+                * For __GFP_REPEAT allocations, stop reclaiming if the
+                * full LRU list has been scanned and we are still failing
+                * to reclaim pages. This full LRU scan is potentially
+                * expensive but a __GFP_REPEAT caller really wants to succeed
+                */
+               if (!nr_reclaimed && !nr_scanned)
+                       return false;
+       } else {
+               /*
+                * For non-__GFP_REPEAT allocations which can presumably
+                * fail without consequence, stop if we failed to reclaim
+                * any pages from the last SWAP_CLUSTER_MAX number of
+                * pages that were scanned. This will return to the
+                * caller faster at the risk reclaim/compaction and
+                * the resulting allocation attempt fails
+                */
+               if (!nr_reclaimed)
+                       return false;
+       }
 
        /*
         * If we have not reclaimed enough pages for compaction and the
index a3330ebe2c5345a32140e9b8b84445ac756dcecb..a51d9465e6284e47753394b45330eba20abfcaac 100644 (file)
@@ -19,9 +19,7 @@ obj-$(CONFIG_NETFILTER)               += netfilter/
 obj-$(CONFIG_INET)             += ipv4/
 obj-$(CONFIG_XFRM)             += xfrm/
 obj-$(CONFIG_UNIX)             += unix/
-ifneq ($(CONFIG_IPV6),)
-obj-y                          += ipv6/
-endif
+obj-$(CONFIG_NET)              += ipv6/
 obj-$(CONFIG_PACKET)           += packet/
 obj-$(CONFIG_NET_KEY)          += key/
 obj-$(CONFIG_BRIDGE)           += bridge/
index 2575c2db64047021080131d47d37a9f5ad49584a..d7b9af4703d02718c512cb9bca4a99913532f630 100644 (file)
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
                        break;
                }
 
+               tty_unlock();
                schedule();
+               tty_lock();
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&dev->wait, &wait);
index 9190ae462cb4215db167eaeb08164b11a052e63f..6dee7bf648a9af5a8da4bfd2b46082b71c197aa4 100644 (file)
@@ -6,6 +6,7 @@ config BRIDGE
        tristate "802.1d Ethernet Bridging"
        select LLC
        select STP
+       depends on IPV6 || IPV6=n
        ---help---
          If you say Y here, then your Linux box will be able to act as an
          Ethernet bridge, which means that the different Ethernet segments it
index 09d5c098792562655e19c8e66a13f89684534a5a..030a002ff8eee31230c02bad9c1584f5f94931aa 100644 (file)
        rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
+static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
 {
-       if (ipv6_addr_is_multicast(addr) &&
-           IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
+       if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
                return 1;
        return 0;
 }
@@ -435,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        eth = eth_hdr(skb);
 
        memcpy(eth->h_source, br->dev->dev_addr, 6);
-       ipv6_eth_mc_map(group, eth->h_dest);
        eth->h_proto = htons(ETH_P_IPV6);
        skb_put(skb, sizeof(*eth));
 
@@ -447,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        ip6h->payload_len = htons(8 + sizeof(*mldq));
        ip6h->nexthdr = IPPROTO_HOPOPTS;
        ip6h->hop_limit = 1;
-       ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
+       ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+                          &ip6h->saddr);
        ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
+       ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
 
        hopopt = (u8 *)(ip6h + 1);
        hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
@@ -780,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
 {
        struct br_ip br_group;
 
-       if (ipv6_is_local_multicast(group))
+       if (!ipv6_is_transient_multicast(group))
                return 0;
 
        ipv6_addr_copy(&br_group.u.ip6, group);
-       br_group.proto = htons(ETH_P_IP);
+       br_group.proto = htons(ETH_P_IPV6);
 
        return br_multicast_add_group(br, port, &br_group);
 }
@@ -1013,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
 
                nsrcs = skb_header_pointer(skb,
                                           len + offsetof(struct mld2_grec,
-                                                         grec_mca),
+                                                         grec_nsrcs),
                                           sizeof(_nsrcs), &_nsrcs);
                if (!nsrcs)
                        return -EINVAL;
 
                if (!pskb_may_pull(skb,
                                   len + sizeof(*grec) +
-                                  sizeof(struct in6_addr) * (*nsrcs)))
+                                  sizeof(struct in6_addr) * ntohs(*nsrcs)))
                        return -EINVAL;
 
                grec = (struct mld2_grec *)(skb->data + len);
-               len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs);
+               len += sizeof(*grec) +
+                      sizeof(struct in6_addr) * ntohs(*nsrcs);
 
                /* We treat these as MLDv1 reports for now. */
                switch (grec->grec_type) {
@@ -1340,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
 {
        struct br_ip br_group;
 
-       if (ipv6_is_local_multicast(group))
+       if (!ipv6_is_transient_multicast(group))
                return;
 
        ipv6_addr_copy(&br_group.u.ip6, group);
index 35b36b86d762892a118b7e34e88b654979db30a1..05f357828a2fb8de1bb2be9f4103c90058484259 100644 (file)
@@ -336,7 +336,6 @@ static void reset_connection(struct ceph_connection *con)
                ceph_msg_put(con->out_msg);
                con->out_msg = NULL;
        }
-       con->out_keepalive_pending = false;
        con->in_seq = 0;
        con->in_seq_acked = 0;
 }
@@ -1248,8 +1247,6 @@ static int process_connect(struct ceph_connection *con)
                     con->auth_retry);
                if (con->auth_retry == 2) {
                        con->error_msg = "connect authorization failure";
-                       reset_connection(con);
-                       set_bit(CLOSED, &con->state);
                        return -1;
                }
                con->auth_retry = 1;
@@ -1715,14 +1712,6 @@ more:
 
        /* open the socket first? */
        if (con->sock == NULL) {
-               /*
-                * if we were STANDBY and are reconnecting _this_
-                * connection, bump connect_seq now.  Always bump
-                * global_seq.
-                */
-               if (test_and_clear_bit(STANDBY, &con->state))
-                       con->connect_seq++;
-
                prepare_write_banner(msgr, con);
                prepare_write_connect(msgr, con, 1);
                prepare_read_banner(con);
@@ -1951,7 +1940,24 @@ static void con_work(struct work_struct *work)
                                                   work.work);
 
        mutex_lock(&con->mutex);
+       if (test_and_clear_bit(BACKOFF, &con->state)) {
+               dout("con_work %p backing off\n", con);
+               if (queue_delayed_work(ceph_msgr_wq, &con->work,
+                                      round_jiffies_relative(con->delay))) {
+                       dout("con_work %p backoff %lu\n", con, con->delay);
+                       mutex_unlock(&con->mutex);
+                       return;
+               } else {
+                       con->ops->put(con);
+                       dout("con_work %p FAILED to back off %lu\n", con,
+                            con->delay);
+               }
+       }
 
+       if (test_bit(STANDBY, &con->state)) {
+               dout("con_work %p STANDBY\n", con);
+               goto done;
+       }
        if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
                dout("con_work CLOSED\n");
                con_close_socket(con);
@@ -2008,10 +2014,12 @@ static void ceph_fault(struct ceph_connection *con)
        /* Requeue anything that hasn't been acked */
        list_splice_init(&con->out_sent, &con->out_queue);
 
-       /* If there are no messages in the queue, place the connection
-        * in a STANDBY state (i.e., don't try to reconnect just yet). */
-       if (list_empty(&con->out_queue) && !con->out_keepalive_pending) {
-               dout("fault setting STANDBY\n");
+       /* If there are no messages queued or keepalive pending, place
+        * the connection in a STANDBY state */
+       if (list_empty(&con->out_queue) &&
+           !test_bit(KEEPALIVE_PENDING, &con->state)) {
+               dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
+               clear_bit(WRITE_PENDING, &con->state);
                set_bit(STANDBY, &con->state);
        } else {
                /* retry after a delay. */
@@ -2019,11 +2027,24 @@ static void ceph_fault(struct ceph_connection *con)
                        con->delay = BASE_DELAY_INTERVAL;
                else if (con->delay < MAX_DELAY_INTERVAL)
                        con->delay *= 2;
-               dout("fault queueing %p delay %lu\n", con, con->delay);
                con->ops->get(con);
                if (queue_delayed_work(ceph_msgr_wq, &con->work,
-                                      round_jiffies_relative(con->delay)) == 0)
+                                      round_jiffies_relative(con->delay))) {
+                       dout("fault queued %p delay %lu\n", con, con->delay);
+               } else {
                        con->ops->put(con);
+                       dout("fault failed to queue %p delay %lu, backoff\n",
+                            con, con->delay);
+                       /*
+                        * In many cases we see a socket state change
+                        * while con_work is running and end up
+                        * queuing (non-delayed) work, such that we
+                        * can't backoff with a delay.  Set a flag so
+                        * that when con_work restarts we schedule the
+                        * delay then.
+                        */
+                       set_bit(BACKOFF, &con->state);
+               }
        }
 
 out_unlock:
@@ -2094,6 +2115,19 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr)
 }
 EXPORT_SYMBOL(ceph_messenger_destroy);
 
+static void clear_standby(struct ceph_connection *con)
+{
+       /* come back from STANDBY? */
+       if (test_and_clear_bit(STANDBY, &con->state)) {
+               mutex_lock(&con->mutex);
+               dout("clear_standby %p and ++connect_seq\n", con);
+               con->connect_seq++;
+               WARN_ON(test_bit(WRITE_PENDING, &con->state));
+               WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
+               mutex_unlock(&con->mutex);
+       }
+}
+
 /*
  * Queue up an outgoing message on the given connection.
  */
@@ -2126,6 +2160,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
 
        /* if there wasn't anything waiting to send before, queue
         * new work */
+       clear_standby(con);
        if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
                queue_con(con);
 }
@@ -2191,6 +2226,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
  */
 void ceph_con_keepalive(struct ceph_connection *con)
 {
+       dout("con_keepalive %p\n", con);
+       clear_standby(con);
        if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
            test_and_set_bit(WRITE_PENDING, &con->state) == 0)
                queue_con(con);
index 1a040e64c69f23545f98e282bae934b3ed33baa9..cd9c21df87d172fa0c7bfe1af7ee289ab5dd0494 100644 (file)
@@ -16,22 +16,30 @@ struct page **ceph_get_direct_page_vector(const char __user *data,
                                          int num_pages, bool write_page)
 {
        struct page **pages;
-       int rc;
+       int got = 0;
+       int rc = 0;
 
        pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
        if (!pages)
                return ERR_PTR(-ENOMEM);
 
        down_read(&current->mm->mmap_sem);
-       rc = get_user_pages(current, current->mm, (unsigned long)data,
-                           num_pages, write_page, 0, pages, NULL);
+       while (got < num_pages) {
+               rc = get_user_pages(current, current->mm,
+                   (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
+                   num_pages - got, write_page, 0, pages + got, NULL);
+               if (rc < 0)
+                       break;
+               BUG_ON(rc == 0);
+               got += rc;
+       }
        up_read(&current->mm->mmap_sem);
-       if (rc < num_pages)
+       if (rc < 0)
                goto fail;
        return pages;
 
 fail:
-       ceph_put_page_vector(pages, rc > 0 ? rc : 0, false);
+       ceph_put_page_vector(pages, got, false);
        return ERR_PTR(rc);
 }
 EXPORT_SYMBOL(ceph_get_direct_page_vector);
index 8ae6631abcc2093fe2ce9929f255be7e9c1eaff1..6561021d22d1fef9b58ec54ec2b394400fb7ba25 100644 (file)
@@ -1114,13 +1114,21 @@ EXPORT_SYMBOL(netdev_bonding_change);
 void dev_load(struct net *net, const char *name)
 {
        struct net_device *dev;
+       int no_module;
 
        rcu_read_lock();
        dev = dev_get_by_name_rcu(net, name);
        rcu_read_unlock();
 
-       if (!dev && capable(CAP_NET_ADMIN))
-               request_module("%s", name);
+       no_module = !dev;
+       if (no_module && capable(CAP_NET_ADMIN))
+               no_module = request_module("netdev-%s", name);
+       if (no_module && capable(CAP_SYS_MODULE)) {
+               if (!request_module("%s", name))
+                       pr_err("Loading kernel module for a network device "
+"with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s "
+"instead\n", name);
+       }
 }
 EXPORT_SYMBOL(dev_load);
 
index 508f9c18992f0a0717ca31926fc2b2afcb07d9a1..133fd22ea287d3b44c4ac2e6ec82d35ce339f756 100644 (file)
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
 
        list_for_each_entry(ha, &from_list->list, list) {
                type = addr_type ? addr_type : ha->type;
-               __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
+               __hw_addr_del(to_list, ha->addr, addr_len, type);
        }
 }
 EXPORT_SYMBOL(__hw_addr_del_multiple);
index a9e7fc4c461fa6679c39a51b214d242e1c22b873..b5bada92f63704cc87651cdc384c085bc1cbfe66 100644 (file)
@@ -3321,7 +3321,7 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
                                    pkt_dev->started_at);
        ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
 
-       p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n",
+       p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
                     (unsigned long long)ktime_to_us(elapsed),
                     (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
                     (unsigned long long)ktime_to_us(idle),
index bbe4544508016925726522f275e95aefadeb1664..4c1ef026d6955b8cadaf77ceee6538dd4b4e1800 100644 (file)
@@ -95,7 +95,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
                int fd = fdp[i];
                struct file *file;
 
-               if (fd < 0 || !(file = fget(fd)))
+               if (fd < 0 || !(file = fget_raw(fd)))
                        return -EBADF;
                *fpp++ = file;
                fpl->count++;
index d5074a5672899a9d85adf0f20b363533fa8db9b5..c44348adba3bd22047d6a9cc95afa35ac802e291 100644 (file)
@@ -1193,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
                        goto err;
        }
 
-       if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) {
+       if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
                struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
                err = ops->ieee_setpfc(netdev, pfc);
                if (err)
index 8cde009e8b8501ead8ff3e94ef6bfbaabc33088c..4222e7a654b0ca16a7850f8f67efcaddeb0a8f03 100644 (file)
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                /* Caller (dccp_v4_do_rcv) will send Reset */
                dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
                return 1;
+       } else if (sk->sk_state == DCCP_CLOSED) {
+               dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
+               return 1;
        }
 
        if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        switch (sk->sk_state) {
-       case DCCP_CLOSED:
-               dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
-               return 1;
-
        case DCCP_REQUESTING:
                queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
                if (queued >= 0)
index 739435a6af3983fa2776a4b402a51d9d7aefeec8..cfa7a5e1c5c98ca4592e09588a3a80bc1ba24d10 100644 (file)
@@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
        size_t result_len = 0;
        const char *data = _data, *end, *opt;
 
-       kenter("%%%d,%s,'%s',%zu",
-              key->serial, key->description, data, datalen);
+       kenter("%%%d,%s,'%*.*s',%zu",
+              key->serial, key->description,
+              (int)datalen, (int)datalen, data, datalen);
 
        if (datalen <= 1 || !data || data[datalen - 1] != '\0')
                return -EINVAL;
@@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
                seq_printf(m, ": %u", key->datalen);
 }
 
+/*
+ * read the DNS data
+ * - the key's semaphore is read-locked
+ */
+static long dns_resolver_read(const struct key *key,
+                             char __user *buffer, size_t buflen)
+{
+       if (key->type_data.x[0])
+               return key->type_data.x[0];
+
+       return user_read(key, buffer, buflen);
+}
+
 struct key_type key_type_dns_resolver = {
        .name           = "dns_resolver",
        .instantiate    = dns_resolver_instantiate,
@@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = {
        .revoke         = user_revoke,
        .destroy        = user_destroy,
        .describe       = dns_resolver_describe,
-       .read           = user_read,
+       .read           = dns_resolver_read,
 };
 
 static int __init init_dns_resolver(void)
index df4616fce9294910e3f6d9118d6f764d02090280..036652c8166d7cd541d5c3bb3b2af46e84779c48 100644 (file)
@@ -670,7 +670,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                             ifap = &ifa->ifa_next) {
                                if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
                                    sin_orig.sin_addr.s_addr ==
-                                                       ifa->ifa_address) {
+                                                       ifa->ifa_local) {
                                        break; /* found */
                                }
                        }
@@ -1040,8 +1040,8 @@ static void inetdev_send_gratuitous_arp(struct net_device *dev,
                return;
 
        arp_send(ARPOP_REQUEST, ETH_P_ARP,
-                ifa->ifa_address, dev,
-                ifa->ifa_address, NULL,
+                ifa->ifa_local, dev,
+                ifa->ifa_local, NULL,
                 dev->dev_addr, NULL);
 }
 
index c5af909cf701c37dfb2afecf078de854349c2950..3c8dfa16614d4213823e3a03d7cf528324f42f88 100644 (file)
@@ -505,7 +505,9 @@ restart:
                        }
 
                        rcu_read_unlock();
+                       local_bh_disable();
                        inet_twsk_deschedule(tw, twdr);
+                       local_bh_enable();
                        inet_twsk_put(tw);
                        goto restart_rcu;
                }
index 6613edfac28c1b10ebd9a71ef0c2f2cde479db64..d1d0e2c256fc4080033a01a621f73b1c3b080b7e 100644 (file)
@@ -1765,4 +1765,4 @@ module_exit(ipgre_fini);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_RTNL_LINK("gre");
 MODULE_ALIAS_RTNL_LINK("gretap");
-MODULE_ALIAS("gre0");
+MODULE_ALIAS_NETDEV("gre0");
index 988f52fba54a172bc1c49a692393f121314ba444..a5f58e7cbb26eec188786ff1324cfc540aba1ce1 100644 (file)
@@ -913,4 +913,4 @@ static void __exit ipip_fini(void)
 module_init(ipip_init);
 module_exit(ipip_fini);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("tunl0");
+MODULE_ALIAS_NETDEV("tunl0");
index eb7f82ebf4a325a1199bbe5610cd4191bc6e2df5..65f6c04062453aefdffa2317781921990616e796 100644 (file)
@@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
        }
 
        /* D-SACK for already forgotten data... Do dumb counting. */
-       if (dup_sack &&
+       if (dup_sack && tp->undo_marker && tp->undo_retrans &&
            !after(end_seq_0, prior_snd_una) &&
            after(end_seq_0, tp->undo_marker))
                tp->undo_retrans--;
@@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
 
        /* Account D-SACK for retransmitted packet. */
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
-               if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
+               if (tp->undo_marker && tp->undo_retrans &&
+                   after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
                        tp->undo_retrans--;
                if (sacked & TCPCB_SACKED_ACKED)
                        state->reord = min(fack_count, state->reord);
index 406f320336e6591db59197fd4b75e47534b41ef7..dfa5beb0c1c8c4819d8bd59a17b8a9328ed31a96 100644 (file)
@@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                if (!tp->retrans_stamp)
                        tp->retrans_stamp = TCP_SKB_CB(skb)->when;
 
-               tp->undo_retrans++;
+               tp->undo_retrans += tcp_skb_pcount(skb);
 
                /* snd_nxt is stored to detect loss of retransmitted segment,
                 * see tcp_input.c tcp_sacktag_write_queue().
index 4f4483e697bd09e541624dca9c58a3afa5cdbbe3..e528a42a52be2114e2e2df72519802faa4ce0aab 100644 (file)
@@ -57,6 +57,7 @@
 MODULE_AUTHOR("Ville Nuorvala");
 MODULE_DESCRIPTION("IPv6 tunneling device");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETDEV("ip6tnl0");
 
 #ifdef IP6_TNL_DEBUG
 #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
index 09c88891a753e725d8594edc68941b293d171100..de338037a7362cf55255b35f8653d21a219b6915 100644 (file)
@@ -410,7 +410,7 @@ fallback:
                if (p != NULL) {
                        sb_add(m, "%02x", *p++);
                        for (i = 1; i < len; i++)
-                               sb_add(m, ":%02x", p[i]);
+                               sb_add(m, ":%02x", *p++);
                }
                sb_add(m, " ");
 
index a998db6e78951ccf6dad683da74f8120e3479fda..e7db7014e89f949dc5683b104d3ccf6afb797d5f 100644 (file)
@@ -739,8 +739,10 @@ restart:
 
        if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
                nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
-       else
+       else if (!(rt->dst.flags & DST_HOST))
                nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
+       else
+               goto out2;
 
        dst_release(&rt->dst);
        rt = nrt ? : net->ipv6.ip6_null_entry;
@@ -2557,14 +2559,16 @@ static
 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
                              void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct net *net = current->nsproxy->net_ns;
-       int delay = net->ipv6.sysctl.flush_delay;
-       if (write) {
-               proc_dointvec(ctl, write, buffer, lenp, ppos);
-               fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
-               return 0;
-       } else
+       struct net *net;
+       int delay;
+       if (!write)
                return -EINVAL;
+
+       net = (struct net *)ctl->extra1;
+       delay = net->ipv6.sysctl.flush_delay;
+       proc_dointvec(ctl, write, buffer, lenp, ppos);
+       fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
+       return 0;
 }
 
 ctl_table ipv6_route_table_template[] = {
@@ -2651,6 +2655,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
 
        if (table) {
                table[0].data = &net->ipv6.sysctl.flush_delay;
+               table[0].extra1 = net;
                table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
                table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
                table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
index 8ce38f10a547e68fee50a9151cfb8727770bceda..d2c16e10f650807ec32cc0502efa32cea8bc93b6 100644 (file)
@@ -1290,4 +1290,4 @@ static int __init sit_init(void)
 module_init(sit_init);
 module_exit(sit_cleanup);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("sit0");
+MODULE_ALIAS_NETDEV("sit0");
index 8acba456744ea06cb91fe887cf4a4060a8c15e7d..7a10a8d1b2d0db48f1c8f2eca47e6372a1089f4f 100644 (file)
@@ -1229,6 +1229,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
        }
        mutex_unlock(&local->iflist_mtx);
        unregister_netdevice_many(&unreg_list);
+       list_del(&unreg_list);
 }
 
 static u32 ieee80211_idle_off(struct ieee80211_local *local,
index 45fbb9e33746e5817252a554a7fdff6ec8265eee..c9ceb4d57ab0e62ab3c250ad1e0d959ce1de06ef 100644 (file)
@@ -1033,6 +1033,12 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
        if (is_multicast_ether_addr(hdr->addr1))
                return;
 
+       /*
+        * In case we receive frames after disassociation.
+        */
+       if (!sdata->u.mgd.associated)
+               return;
+
        ieee80211_sta_reset_conn_monitor(sdata);
 }
 
index 22f7ad5101abb32d24af2d254dcd3528501d6553..ba98e1308f3ced1a19414259285d2bdba2f37fd3 100644 (file)
@@ -808,9 +808,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
        dest->u_threshold = udest->u_threshold;
        dest->l_threshold = udest->l_threshold;
 
-       spin_lock(&dest->dst_lock);
+       spin_lock_bh(&dest->dst_lock);
        ip_vs_dst_reset(dest);
-       spin_unlock(&dest->dst_lock);
+       spin_unlock_bh(&dest->dst_lock);
 
        if (add)
                ip_vs_new_estimator(&dest->stats);
index b07393eab88e2fb86a21d7556f7ce532c807a172..91816998ed86c065041acd185eb1a197672cadca 100644 (file)
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister);
 
 int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
 {
+       if (pf >= ARRAY_SIZE(nf_loggers))
+               return -EINVAL;
        mutex_lock(&nf_log_mutex);
        if (__find_logger(pf, logger->name) == NULL) {
                mutex_unlock(&nf_log_mutex);
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf);
 
 void nf_log_unbind_pf(u_int8_t pf)
 {
+       if (pf >= ARRAY_SIZE(nf_loggers))
+               return;
        mutex_lock(&nf_log_mutex);
        rcu_assign_pointer(nf_loggers[pf], NULL);
        mutex_unlock(&nf_log_mutex);
index 4d87befb04c04c793a54360de809e5eb64ee44c2..474d621cbc2ea8f992eed715a033b2e77e5835d3 100644 (file)
@@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb)
        skb->destructor = NULL;
 
        if (sk)
-               nf_tproxy_put_sock(sk);
+               sock_put(sk);
 }
 
 /* consumes sk */
-int
+void
 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
 {
-       bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
-                               inet_twsk(sk)->tw_transparent :
-                               inet_sk(sk)->transparent;
-
-       if (transparent) {
-               skb_orphan(skb);
-               skb->sk = sk;
-               skb->destructor = nf_tproxy_destructor;
-               return 1;
-       } else
-               nf_tproxy_put_sock(sk);
-
-       return 0;
+       /* assigning tw sockets complicates things; most
+        * skb->sk->X checks would have to test sk->sk_state first */
+       if (sk->sk_state == TCP_TIME_WAIT) {
+               inet_twsk_put(inet_twsk(sk));
+               return;
+       }
+
+       skb_orphan(skb);
+       skb->sk = sk;
+       skb->destructor = nf_tproxy_destructor;
 }
 EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
 
index 640678f47a2ad5420a869e4fbcd63bd677297c2c..dcfd57eb9d0249cea62289e33e64e76696b2dc00 100644 (file)
 #include <net/netfilter/nf_tproxy_core.h>
 #include <linux/netfilter/xt_TPROXY.h>
 
+static bool tproxy_sk_is_transparent(struct sock *sk)
+{
+       if (sk->sk_state != TCP_TIME_WAIT) {
+               if (inet_sk(sk)->transparent)
+                       return true;
+               sock_put(sk);
+       } else {
+               if (inet_twsk(sk)->tw_transparent)
+                       return true;
+               inet_twsk_put(inet_twsk(sk));
+       }
+       return false;
+}
+
 static inline __be32
 tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
 {
@@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
                                           skb->dev, NFT_LOOKUP_LISTENER);
 
        /* NOTE: assign_sock consumes our sk reference */
-       if (sk && nf_tproxy_assign_sock(skb, sk)) {
+       if (sk && tproxy_sk_is_transparent(sk)) {
                /* This should be in a separate target, but we don't do multiple
                   targets on the same rule yet */
                skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
@@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
                pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
                         iph->protocol, &iph->daddr, ntohs(hp->dest),
                         &laddr, ntohs(lport), skb->mark);
+
+               nf_tproxy_assign_sock(skb, sk);
                return NF_ACCEPT;
        }
 
@@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
                                           par->in, NFT_LOOKUP_LISTENER);
 
        /* NOTE: assign_sock consumes our sk reference */
-       if (sk && nf_tproxy_assign_sock(skb, sk)) {
+       if (sk && tproxy_sk_is_transparent(sk)) {
                /* This should be in a separate target, but we don't do multiple
                   targets on the same rule yet */
                skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
@@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
                pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
                         tproto, &iph->saddr, ntohs(hp->source),
                         laddr, ntohs(lport), skb->mark);
+
+               nf_tproxy_assign_sock(skb, sk);
                return NF_ACCEPT;
        }
 
index 00d6ae838303f1dc0904b5a0aba543fbc32d200b..9cc46356b5773058c0554931bc84866a14113f75 100644 (file)
 #include <net/netfilter/nf_conntrack.h>
 #endif
 
+static void
+xt_socket_put_sk(struct sock *sk)
+{
+       if (sk->sk_state == TCP_TIME_WAIT)
+               inet_twsk_put(inet_twsk(sk));
+       else
+               sock_put(sk);
+}
+
 static int
 extract_icmp4_fields(const struct sk_buff *skb,
                    u8 *protocol,
@@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
                                       (sk->sk_state == TCP_TIME_WAIT &&
                                        inet_twsk(sk)->tw_transparent));
 
-               nf_tproxy_put_sock(sk);
+               xt_socket_put_sk(sk);
 
                if (wildcard || !transparent)
                        sk = NULL;
@@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
                                       (sk->sk_state == TCP_TIME_WAIT &&
                                        inet_twsk(sk)->tw_transparent));
 
-               nf_tproxy_put_sock(sk);
+               xt_socket_put_sk(sk);
 
                if (wildcard || !transparent)
                        sk = NULL;
index 478181d53c555dc0d7cf9a18714aa0a08b03f3e1..1f924595bdefd8e6f632563b29aebf670c566869 100644 (file)
@@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        int noblock = flags&MSG_DONTWAIT;
        size_t copied;
        struct sk_buff *skb, *data_skb;
-       int err;
+       int err, ret;
 
        if (flags&MSG_OOB)
                return -EOPNOTSUPP;
@@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
 
        skb_free_datagram(sk, skb);
 
-       if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
-               netlink_dump(sk);
+       if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+               ret = netlink_dump(sk);
+               if (ret) {
+                       sk->sk_err = ret;
+                       sk->sk_error_report(sk);
+               }
+       }
 
        scm_recv(sock, msg, siocb->scm, flags);
 out:
@@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        struct netlink_callback *cb;
        struct sock *sk;
        struct netlink_sock *nlk;
+       int ret;
 
        cb = kzalloc(sizeof(*cb), GFP_KERNEL);
        if (cb == NULL)
@@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        nlk->cb = cb;
        mutex_unlock(nlk->cb_mutex);
 
-       netlink_dump(sk);
+       ret = netlink_dump(sk);
+
        sock_put(sk);
 
+       if (ret)
+               return ret;
+
        /* We successfully started a dump, by returning -EINTR we
         * signal not to send ACK even if it was requested.
         */
index 71f373c421bc4d8b6a97f2be7201a2dc8c2290e8..c47a511f203d463d70eea98bc594ae7468bf9919 100644 (file)
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
        if (conn->c_loopback
            && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
                rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
-               return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+               scat = &rm->data.op_sg[sg];
+               ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+               ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
+               return ret;
        }
 
        /* FIXME we may overallocate here */
index aeec1d483b17e6f65c858e1510c7752965e35260..bca6761a3ca2f578b1355e670dbb446ad24cd20a 100644 (file)
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
                         unsigned int hdr_off, unsigned int sg,
                         unsigned int off)
 {
+       struct scatterlist *sgp = &rm->data.op_sg[sg];
+       int ret = sizeof(struct rds_header) +
+                       be32_to_cpu(rm->m_inc.i_hdr.h_len);
+
        /* Do not send cong updates to loopback */
        if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
                rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
-               return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+               ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off);
+               goto out;
        }
 
        BUG_ON(hdr_off || sg || off);
@@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
                            NULL);
 
        rds_inc_put(&rm->m_inc);
-
-       return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len);
+out:
+       return ret;
 }
 
 /*
index 89315009bab11f2f6c32db1bd50d0505d8210c46..1a2b0633feced0ef52ca476fb80d65e412d345ac 100644 (file)
@@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
                        goto protocol_error;
                }
 
+       case RXRPC_PACKET_TYPE_ACKALL:
        case RXRPC_PACKET_TYPE_ACK:
                /* ACK processing is done in process context */
                read_lock_bh(&call->state_lock);
index 5ee16f0353febe4d195daaa6ca29ace62a79c06a..d763793d39de4476dec0ff7b0e7fee557176f56c 100644 (file)
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
                return ret;
 
        plen -= sizeof(*token);
-       token = kmalloc(sizeof(*token), GFP_KERNEL);
+       token = kzalloc(sizeof(*token), GFP_KERNEL);
        if (!token)
                return -ENOMEM;
 
-       token->kad = kmalloc(plen, GFP_KERNEL);
+       token->kad = kzalloc(plen, GFP_KERNEL);
        if (!token->kad) {
                kfree(token);
                return -ENOMEM;
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
                goto error;
 
        ret = -ENOMEM;
-       token = kmalloc(sizeof(*token), GFP_KERNEL);
+       token = kzalloc(sizeof(*token), GFP_KERNEL);
        if (!token)
                goto error;
-       token->kad = kmalloc(plen, GFP_KERNEL);
+       token->kad = kzalloc(plen, GFP_KERNEL);
        if (!token->kad)
                goto error_free;
 
index 34dc598440a240c76b729d53cce8b90c18bfc78c..1bc698039ae2e647d670d53b83b976858cdf2a09 100644 (file)
@@ -839,6 +839,7 @@ void dev_deactivate(struct net_device *dev)
 
        list_add(&dev->unreg_list, &single);
        dev_deactivate_many(&single);
+       list_del(&single);
 }
 
 static void dev_init_scheduler_queue(struct net_device *dev,
index 2cc46f0962ca37b63e398c633f794c5986977095..b23428f3c0dde3657187645e47c191a33a5dd7de 100644 (file)
@@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
                        *errp = sctp_make_op_error_fixed(asoc, chunk);
 
                if (*errp) {
-                       sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
-                                       WORD_ROUND(ntohs(param.p->length)));
-                       sctp_addto_chunk_fixed(*errp,
-                                       WORD_ROUND(ntohs(param.p->length)),
-                                       param.v);
+                       if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+                                       WORD_ROUND(ntohs(param.p->length))))
+                               sctp_addto_chunk_fixed(*errp,
+                                               WORD_ROUND(ntohs(param.p->length)),
+                                               param.v);
                } else {
                        /* If there is no memory for generating the ERROR
                         * report as specified, an ABORT will be triggered
index 243fc09b164e81865a902015f18bd87313413167..59e599498e37ffcd25a0c298de1030b78a13d516 100644 (file)
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task)
 
 /*
  * Mark an RPC call as having completed by clearing the 'active' bit
+ * and then waking up all tasks that were sleeping.
  */
-static void rpc_mark_complete_task(struct rpc_task *task)
+static int rpc_complete_task(struct rpc_task *task)
 {
-       smp_mb__before_clear_bit();
+       void *m = &task->tk_runstate;
+       wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
+       struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&wq->lock, flags);
        clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
-       smp_mb__after_clear_bit();
-       wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
+       ret = atomic_dec_and_test(&task->tk_count);
+       if (waitqueue_active(wq))
+               __wake_up_locked_key(wq, TASK_NORMAL, &k);
+       spin_unlock_irqrestore(&wq->lock, flags);
+       return ret;
 }
 
 /*
  * Allow callers to wait for completion of an RPC call
+ *
+ * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
+ * to enforce taking of the wq->lock and hence avoid races with
+ * rpc_complete_task().
  */
 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
 {
        if (action == NULL)
                action = rpc_wait_bit_killable;
-       return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
+       return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
                        action, TASK_KILLABLE);
 }
 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work)
        rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
 }
 
-void rpc_put_task(struct rpc_task *task)
+static void rpc_release_resources_task(struct rpc_task *task)
 {
-       if (!atomic_dec_and_test(&task->tk_count))
-               return;
-       /* Release resources */
        if (task->tk_rqstp)
                xprt_release(task);
        if (task->tk_msg.rpc_cred)
                put_rpccred(task->tk_msg.rpc_cred);
        rpc_task_release_client(task);
-       if (task->tk_workqueue != NULL) {
+}
+
+static void rpc_final_put_task(struct rpc_task *task,
+               struct workqueue_struct *q)
+{
+       if (q != NULL) {
                INIT_WORK(&task->u.tk_work, rpc_async_release);
-               queue_work(task->tk_workqueue, &task->u.tk_work);
+               queue_work(q, &task->u.tk_work);
        } else
                rpc_free_task(task);
 }
+
+static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
+{
+       if (atomic_dec_and_test(&task->tk_count)) {
+               rpc_release_resources_task(task);
+               rpc_final_put_task(task, q);
+       }
+}
+
+void rpc_put_task(struct rpc_task *task)
+{
+       rpc_do_put_task(task, NULL);
+}
 EXPORT_SYMBOL_GPL(rpc_put_task);
 
+void rpc_put_task_async(struct rpc_task *task)
+{
+       rpc_do_put_task(task, task->tk_workqueue);
+}
+EXPORT_SYMBOL_GPL(rpc_put_task_async);
+
 static void rpc_release_task(struct rpc_task *task)
 {
        dprintk("RPC: %5u release task\n", task->tk_pid);
 
        BUG_ON (RPC_IS_QUEUED(task));
 
-       /* Wake up anyone who is waiting for task completion */
-       rpc_mark_complete_task(task);
+       rpc_release_resources_task(task);
 
-       rpc_put_task(task);
+       /*
+        * Note: at this point we have been removed from rpc_clnt->cl_tasks,
+        * so it should be safe to use task->tk_count as a test for whether
+        * or not any other processes still hold references to our rpc_task.
+        */
+       if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
+               /* Wake up anyone who may be waiting for task completion */
+               if (!rpc_complete_task(task))
+                       return;
+       } else {
+               if (!atomic_dec_and_test(&task->tk_count))
+                       return;
+       }
+       rpc_final_put_task(task, task->tk_workqueue);
 }
 
 int rpciod_up(void)
index 9df1eadc912a837c8863bd4a69733a23dbbab134..1a10dcd999ea9008b5069582a64867a44766cd15 100644 (file)
@@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
                                            p, 0, length, DMA_FROM_DEVICE);
        if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
                put_page(p);
+               svc_rdma_put_context(ctxt, 1);
                return;
        }
        atomic_inc(&xprt->sc_dma_used);
index c431f5a579605bfa5ea33161ff45188ab4bd570d..be96d429b475f72c31f5d16a0b9e28d677d969b9 100644 (file)
@@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
        }
        xs_reclassify_socket(family, sock);
 
-       if (xs_bind(transport, sock)) {
+       err = xs_bind(transport, sock);
+       if (err) {
                sock_release(sock);
                goto out;
        }
index dd419d2862043c95001df58853d2799cc135d5dc..ba5b8c208498c26f2c2024c58f3d65d976af548a 100644 (file)
@@ -850,7 +850,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                 * Get the parent directory, calculate the hash for last
                 * component.
                 */
-               err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
+               err = kern_path_parent(sunaddr->sun_path, &nd);
                if (err)
                        goto out_mknod_parent;
 
@@ -1724,7 +1724,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        msg->msg_namelen = 0;
 
-       mutex_lock(&u->readlock);
+       err = mutex_lock_interruptible(&u->readlock);
+       if (err) {
+               err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+               goto out;
+       }
 
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb) {
@@ -1864,7 +1868,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                memset(&tmp_scm, 0, sizeof(tmp_scm));
        }
 
-       mutex_lock(&u->readlock);
+       err = mutex_lock_interruptible(&u->readlock);
+       if (err) {
+               err = sock_intr_errno(timeo);
+               goto out;
+       }
 
        do {
                int chunk;
@@ -1895,11 +1903,12 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
 
                        timeo = unix_stream_data_wait(sk, timeo);
 
-                       if (signal_pending(current)) {
+                       if (signal_pending(current)
+                           ||  mutex_lock_interruptible(&u->readlock)) {
                                err = sock_intr_errno(timeo);
                                goto out;
                        }
-                       mutex_lock(&u->readlock);
+
                        continue;
  unlock:
                        unix_state_unlock(sk);
index f89f83bf828ee0e713ded75c84da22a1c1b2426e..b6f4b994eb356a0cd9c05851d49fff16bece3eb0 100644 (file)
@@ -104,7 +104,7 @@ struct sock *unix_get_socket(struct file *filp)
        /*
         *      Socket ?
         */
-       if (S_ISSOCK(inode->i_mode)) {
+       if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
                struct socket *sock = SOCKET_I(inode);
                struct sock *s = sock->sk;
 
index 3e5dbd4e4cd5e470f016aaff5c8cff997f60b678..d112f038edf05d08433bfad22e36095577055cb0 100644 (file)
@@ -802,11 +802,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
                        return freq;
                if (freq == 0)
                        return -EINVAL;
-               wdev_lock(wdev);
                mutex_lock(&rdev->devlist_mtx);
+               wdev_lock(wdev);
                err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
-               mutex_unlock(&rdev->devlist_mtx);
                wdev_unlock(wdev);
+               mutex_unlock(&rdev->devlist_mtx);
                return err;
        default:
                return -EOPNOTSUPP;
index 6c94c6ce2925f7a35c4c91631ef500f57945fe14..291228e259845aa62c15f45e43389fd7a2b4d675 100644 (file)
@@ -309,6 +309,11 @@ static void do_config_file(const char *filename)
        close(fd);
 }
 
+/*
+ * Important: The below generated source_foo.o and deps_foo.o variable
+ * assignments are parsed not only by make, but also by the rather simple
+ * parser in scripts/mod/sumversion.c.
+ */
 static void parse_dep_file(void *map, size_t len)
 {
        char *m = map;
@@ -323,7 +328,6 @@ static void parse_dep_file(void *map, size_t len)
                exit(1);
        }
        memcpy(s, m, p-m); s[p-m] = 0;
-       printf("deps_%s := \\\n", target);
        m = p+1;
 
        clear_config();
@@ -343,12 +347,15 @@ static void parse_dep_file(void *map, size_t len)
                    strrcmp(s, "arch/um/include/uml-config.h") &&
                    strrcmp(s, ".ver")) {
                        /*
-                        * Do not output the first dependency (the
-                        * source file), so that kbuild is not confused
-                        * if a .c file is rewritten into .S or vice
-                        * versa.
+                        * Do not list the source file as dependency, so that
+                        * kbuild is not confused if a .c file is rewritten
+                        * into .S or vice versa. Storing it in source_* is
+                        * needed for modpost to compute srcversions.
                         */
-                       if (!first)
+                       if (first) {
+                               printf("source_%s := %s\n\n", target, s);
+                               printf("deps_%s := \\\n", target);
+                       } else
                                printf("  %s \\\n", s);
                        do_config_file(s);
                }
index 4c0383da1c9a22087853e423875b7829f03f87ac..58848e3e392c7c45004340d8704229da18e990eb 100755 (executable)
@@ -2654,11 +2654,6 @@ sub process {
                        WARN("Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\n" . $herecurr);
                }
 
-# SPIN_LOCK_UNLOCKED & RW_LOCK_UNLOCKED are deprecated
-               if ($line =~ /\b(SPIN_LOCK_UNLOCKED|RW_LOCK_UNLOCKED)/) {
-                       ERROR("Use of $1 is deprecated: see Documentation/spinlocks.txt\n" . $herecurr);
-               }
-
 # warn about #if 0
                if ($line =~ /^.\s*\#\s*if\s+0\b/) {
                        CHK("if this code is redundant consider removing it\n" .
index fd81fc33d6338858fc8352c82034937f26010ce8..a4fe923c01315205e3038b3da5b3514c6d134291 100644 (file)
@@ -1,6 +1,6 @@
 #!/usr/bin/perl -w
 #
-# Copywrite 2005-2009 - Steven Rostedt
+# Copyright 2005-2009 - Steven Rostedt
 # Licensed under the terms of the GNU GPL License version 2
 #
 #  It's simple enough to figure out how this works.
index ecf9c7dc18259358d10ba9ff1c1cf91d9664a63f..9dfcd6d988dacaacebef7b91cd51a024618bfb00 100644 (file)
@@ -300,8 +300,8 @@ static int is_static_library(const char *objfile)
                return 0;
 }
 
-/* We have dir/file.o.  Open dir/.file.o.cmd, look for deps_ line to
- * figure out source file. */
+/* We have dir/file.o.  Open dir/.file.o.cmd, look for source_ and deps_ line
+ * to figure out source files. */
 static int parse_source_files(const char *objfile, struct md4_ctx *md)
 {
        char *cmd, *file, *line, *dir;
@@ -340,6 +340,21 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
        */
        while ((line = get_next_line(&pos, file, flen)) != NULL) {
                char* p = line;
+
+               if (strncmp(line, "source_", sizeof("source_")-1) == 0) {
+                       p = strrchr(line, ' ');
+                       if (!p) {
+                               warn("malformed line: %s\n", line);
+                               goto out_file;
+                       }
+                       p++;
+                       if (!parse_file(p, md)) {
+                               warn("could not open %s: %s\n",
+                                    p, strerror(errno));
+                               goto out_file;
+                       }
+                       continue;
+               }
                if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) {
                        check_files = 1;
                        continue;
index 038b3d1e29814f789ec6728708c4f71858ef7234..f9f6f52db772830c4712d080063773a6187020c5 100644 (file)
@@ -206,7 +206,8 @@ static uint32_t (*w2)(uint16_t);
 static int
 is_mcounted_section_name(char const *const txtname)
 {
-       return 0 == strcmp(".text",          txtname) ||
+       return 0 == strcmp(".text",           txtname) ||
+               0 == strcmp(".ref.text",      txtname) ||
                0 == strcmp(".sched.text",    txtname) ||
                0 == strcmp(".spinlock.text", txtname) ||
                0 == strcmp(".irqentry.text", txtname) ||
index 1d7963f4ee79b853055337f3eed7ca80731dc667..4be0deea71ca6f947b6c1d47c28c431cae88c3c7 100755 (executable)
@@ -130,6 +130,7 @@ if ($inputfile =~ m,kernel/trace/ftrace\.o$,) {
 # Acceptable sections to record.
 my %text_sections = (
      ".text" => 1,
+     ".ref.text" => 1,
      ".sched.text" => 1,
      ".spinlock.text" => 1,
      ".irqentry.text" => 1,
index 44423b4dcb820959b82f6bf126e42a6c6d08f44c..8c81d76959eee335062d090257aa20a383cac423 100644 (file)
@@ -33,8 +33,6 @@ cmd_opcodes = {
     "lockintnowait" : "6",
     "lockcont"      : "7",
     "unlock"        : "8",
-    "lockbkl"       : "9",
-    "unlockbkl"     : "10",
     "signal"        : "11",
     "resetevent"    : "98",
     "reset"         : "99",
index 8821f27cc8be6aea6db61b07a7255b651da16bf8..3710c8b2090d5f8b4b762387f99529b3151bd651 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       0
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index cde1f189a02bf29075cdf102fd9244af42bd26c7..b4cc95975adbb4f6921340fd4d77895cb880045e 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       0
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index 3ab0bfc49950ee076032bf7b1bd828a86690e76d..1b57376cc1f7cac05e7fb8e76c96050e0860b278 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       0
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index f4b5d5d6215fd9808dc8eefb0bccf36b7ece2d80..68b10629b6f4d1138f6f5343ff23884a4ac6033e 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       0
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index 63440ca2cce9e83efe8be4c4771e6483c923019e..8e6c8b11ae563a94e33f0b79c82e1187a0aa0113 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       thread to signal (0-7)
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index e5816fe67df324caf96908b45dbf7372b81aa826..69c2212fc52089e667c4bd563b955c049c087c33 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       thread to signal (0-7)
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index 718b82b5d3bbe09bccc6329699e5f30db8449e0c..9b0f1eb26a88a95c8dd0fc5020f0e85b34576a3b 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       thread to signal (0-7)
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index c6e2135634985f4af3c75cad81d2306f82803157..39ec74ab06ee76b9f0cbdf39f3d333c2ad0c4301 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       thread to signal (0-7)
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index f53749d59d79d2beca742a0674ccb10bedb9cc50..e03db7e010fa242aa28b1165f1e1d61d89c47cf3 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       thread to signal (0-7)
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index cdc3e4fd7bac6375062ab1cda98c97d6bf176f65..7b59100d3e48f5a429c7399a6bb39c5547977a73 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       thread to signal (0-7)
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index baa14137f47354e7d60ebd798b3887156a118340..2f0e049d64438bea77bd5c3b7dc02db5ec36f473 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       thread to signal (0-7)
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index e6ec0c81b54dc91b638e0b756919f08c0cb3da48..04f4034ff895a11b3c544bcca4553fe792e708f7 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       thread to signal (0-7)
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index ca64f8bbf4bc7a4f25f6db6560f37bff527ca30c..a48a6ee29ddcf136956528c6df81bad9cdec4b8b 100644 (file)
@@ -19,8 +19,6 @@
 # lockintnowait        lock nr (0-7)
 # lockcont     lock nr (0-7)
 # unlock       lock nr (0-7)
-# lockbkl      lock nr (0-7)
-# unlockbkl    lock nr (0-7)
 # signal       thread to signal (0-7)
 # reset                0
 # resetevent   0
@@ -39,9 +37,6 @@
 # blocked      lock nr (0-7)
 # blockedwake  lock nr (0-7)
 # unlocked     lock nr (0-7)
-# lockedbkl    dont care
-# blockedbkl   dont care
-# unlockedbkl  dont care
 # opcodeeq     command opcode or number
 # opcodelt     number
 # opcodegt     number
index 64c2ed9c90158d1b7df59eff08595566d3dc1927..dbfdaed4cc663c6ed40836f69da8f35a9f27245a 100644 (file)
@@ -93,7 +93,7 @@ int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap,
  * Determine whether the current process may set the system clock and timezone
  * information, returning 0 if permission granted, -ve if denied.
  */
-int cap_settime(struct timespec *ts, struct timezone *tz)
+int cap_settime(const struct timespec *ts, const struct timezone *tz)
 {
        if (!capable(CAP_SYS_TIME))
                return -EPERM;
index 7b7308ace8c5b1b26bd79369d8c142ea6e14734c..bb33ecadcf958dd91ecc828adfd4c06e75f12c2c 100644 (file)
@@ -201,7 +201,7 @@ int security_syslog(int type)
        return security_ops->syslog(type);
 }
 
-int security_settime(struct timespec *ts, struct timezone *tz)
+int security_settime(const struct timespec *ts, const struct timezone *tz)
 {
        return security_ops->settime(ts, tz);
 }
index 4902ae568730c01fbb553c7c1c96fb71524a0411..53b53e97c8960e4415b7c45801d5368cb611690c 100644 (file)
@@ -141,6 +141,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
 
 fail_input:
        input_free_device(jack->input_dev);
+       kfree(jack->id);
        kfree(jack);
        return err;
 }
index a07b031090d8fc30353d2c6ef0864ffe97d0750e..067982f4f1829e74dfb0ea295ed2eb28bb8becfc 100644 (file)
@@ -1039,9 +1039,11 @@ static struct hda_verb cs_errata_init_verbs[] = {
        {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
        {0x11, AC_VERB_SET_PROC_STATE, 0x00},
 
+#if 0 /* Don't to set to D3 as we are in power-up sequence */
        {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
        {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
        /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
+#endif
 
        {} /* terminator */
 };
index dd7c5c12225d29123d4bf13cb5247e962a864c66..4d5004e693f03aa60ad00245a9abfc315f311033 100644 (file)
@@ -3114,6 +3114,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
        SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
        SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
        SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
        SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
@@ -3937,6 +3939,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = {
          .patch = patch_cxt5066 },
        { .id = 0x14f15069, .name = "CX20585",
          .patch = patch_cxt5066 },
+       { .id = 0x14f1506e, .name = "CX20590",
+         .patch = patch_cxt5066 },
        { .id = 0x14f15097, .name = "CX20631",
          .patch = patch_conexant_auto },
        { .id = 0x14f15098, .name = "CX20632",
@@ -3963,6 +3967,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066");
 MODULE_ALIAS("snd-hda-codec-id:14f15067");
 MODULE_ALIAS("snd-hda-codec-id:14f15068");
 MODULE_ALIAS("snd-hda-codec-id:14f15069");
+MODULE_ALIAS("snd-hda-codec-id:14f1506e");
 MODULE_ALIAS("snd-hda-codec-id:14f15097");
 MODULE_ALIAS("snd-hda-codec-id:14f15098");
 MODULE_ALIAS("snd-hda-codec-id:14f150a1");
index a5876773672749d05ac9672b9ba59b8acb0fc903..ec0fa2dd0a2792a3fb7e1a18334fd3eb28225b29 100644 (file)
@@ -1634,6 +1634,9 @@ static struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0012, .name = "GPU 12 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de0013, .name = "GPU 13 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de0014, .name = "GPU 14 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
+{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
+{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
+/* 17 is known to be absent */
 { .id = 0x10de0018, .name = "GPU 18 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de0019, .name = "GPU 19 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de001a, .name = "GPU 1a HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
@@ -1676,6 +1679,8 @@ MODULE_ALIAS("snd-hda-codec-id:10de0011");
 MODULE_ALIAS("snd-hda-codec-id:10de0012");
 MODULE_ALIAS("snd-hda-codec-id:10de0013");
 MODULE_ALIAS("snd-hda-codec-id:10de0014");
+MODULE_ALIAS("snd-hda-codec-id:10de0015");
+MODULE_ALIAS("snd-hda-codec-id:10de0016");
 MODULE_ALIAS("snd-hda-codec-id:10de0018");
 MODULE_ALIAS("snd-hda-codec-id:10de0019");
 MODULE_ALIAS("snd-hda-codec-id:10de001a");
index 3328a259a2421bd6f68692ca6766079b8e7d0a8f..4261bb8eec1d524b283346ef6effa181134601e8 100644 (file)
@@ -1133,11 +1133,8 @@ static void alc_automute_speaker(struct hda_codec *codec, int pinctl)
                nid = spec->autocfg.hp_pins[i];
                if (!nid)
                        break;
-               if (snd_hda_jack_detect(codec, nid)) {
-                       spec->jack_present = 1;
-                       break;
-               }
-               alc_report_jack(codec, spec->autocfg.hp_pins[i]);
+               alc_report_jack(codec, nid);
+               spec->jack_present |= snd_hda_jack_detect(codec, nid);
        }
 
        mute = spec->jack_present ? HDA_AMP_MUTE : 0;
@@ -15015,7 +15012,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC),
        SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC),
        SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC),
-       SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC),
+       SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC),
        SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC),
        SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC),
        SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC),
index 9ea48b425d0b8b24b0d90a7f6bed760d7f512422..bd7b123f64407e5398f0ddfe8a48ec10acfb1308 100644 (file)
@@ -586,7 +586,12 @@ static hda_nid_t stac92hd83xxx_pin_nids[10] = {
        0x0f, 0x10, 0x11, 0x1f, 0x20,
 };
 
-static hda_nid_t stac92hd88xxx_pin_nids[10] = {
+static hda_nid_t stac92hd87xxx_pin_nids[6] = {
+       0x0a, 0x0b, 0x0c, 0x0d,
+       0x0f, 0x11,
+};
+
+static hda_nid_t stac92hd88xxx_pin_nids[8] = {
        0x0a, 0x0b, 0x0c, 0x0d,
        0x0f, 0x11, 0x1f, 0x20,
 };
@@ -5430,12 +5435,13 @@ again:
        switch (codec->vendor_id) {
        case 0x111d76d1:
        case 0x111d76d9:
+       case 0x111d76e5:
                spec->dmic_nids = stac92hd87b_dmic_nids;
                spec->num_dmics = stac92xx_connected_ports(codec,
                                stac92hd87b_dmic_nids,
                                STAC92HD87B_NUM_DMICS);
-               spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
-               spec->pin_nids = stac92hd88xxx_pin_nids;
+               spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids);
+               spec->pin_nids = stac92hd87xxx_pin_nids;
                spec->mono_nid = 0;
                spec->num_pwrs = 0;
                break;
@@ -5443,6 +5449,7 @@ again:
        case 0x111d7667:
        case 0x111d7668:
        case 0x111d7669:
+       case 0x111d76e3:
                spec->num_dmics = stac92xx_connected_ports(codec,
                                stac92hd88xxx_dmic_nids,
                                STAC92HD88XXX_NUM_DMICS);
@@ -6387,6 +6394,8 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = {
        { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
        { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
        { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
        {} /* terminator */
 };
index a76c3260d941688398f43170e0584c77d7d14320..63b0054200a878e3d879f259f3ad5134ba7e7682 100644 (file)
@@ -567,7 +567,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
                hda_nid_t nid = cfg->inputs[i].pin;
                if (spec->smart51_enabled && is_smart51_pins(spec, nid))
                        ctl = PIN_OUT;
-               else if (i == AUTO_PIN_MIC)
+               else if (cfg->inputs[i].type == AUTO_PIN_MIC)
                        ctl = PIN_VREF50;
                else
                        ctl = PIN_IN;
index bb4bf65b9e7e4c213e0887fd9dd00fe355a65259..0bb424af956fb0377632d53a9bd6e2bcaadcccd9 100644 (file)
@@ -367,7 +367,7 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
        return 0;
 }
 
-static const u8 cx20442_reg = CX20442_TELOUT | CX20442_MIC;
+static const u8 cx20442_reg;
 
 static struct snd_soc_codec_driver cx20442_codec_dev = {
        .probe =        cx20442_codec_probe,
index 987476a5895f100a96ee0e7f488260e09049490a..017d99ceb42eb28b84844a85ee19b0a840fd824a 100644 (file)
@@ -1482,7 +1482,7 @@ int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
                            WM8903_MICDET_EINT | WM8903_MICSHRT_EINT,
                            irq_mask);
 
-       if (det && shrt) {
+       if (det || shrt) {
                /* Enable mic detection, this may not have been set through
                 * platform data (eg, if the defaults are OK). */
                snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
index e8490f3edd031c036c87f032d8a996fec547af3d..e3ec2433b2159eb327b4668c3a0c026d8c717e13 100644 (file)
@@ -165,7 +165,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
 
 #define WM8903_VMID_RES_50K                          2
 #define WM8903_VMID_RES_250K                         3
-#define WM8903_VMID_RES_5K                           4
+#define WM8903_VMID_RES_5K                           6
 
 /*
  * R8 (0x08) - Analogue DAC 0
index 4bbc3442703f7ceaf7b20954e45365cf88db04de..8dfb0a0da67393a9909897375c20dc735ac727a3 100644 (file)
@@ -145,18 +145,18 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = {
        SOC_SINGLE("DAC Playback Limiter Threshold",
                WM8978_DAC_LIMITER_2, 4, 7, 0),
        SOC_SINGLE("DAC Playback Limiter Boost",
-               WM8978_DAC_LIMITER_2, 0, 15, 0),
+               WM8978_DAC_LIMITER_2, 0, 12, 0),
 
        SOC_ENUM("ALC Enable Switch", alc1),
        SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0),
        SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0),
 
-       SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 7, 0),
+       SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 10, 0),
        SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0),
 
        SOC_ENUM("ALC Capture Mode", alc3),
-       SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 15, 0),
-       SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 15, 0),
+       SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 10, 0),
+       SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 10, 0),
 
        SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0),
        SOC_SINGLE("ALC Capture Noise Gate Threshold",
@@ -211,8 +211,10 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = {
                WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1),
 
        /* DAC / ADC oversampling */
-       SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL, 8, 1, 0),
-       SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL, 8, 1, 0),
+       SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL,
+                  5, 1, 0),
+       SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL,
+                  5, 1, 0),
 };
 
 /* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */
index 37b8aa8a680f39645b07965e9e72d49c67a6f8b3..c6c958ee5d5920f8f7755a2498c8af73585b52de 100644 (file)
@@ -107,6 +107,12 @@ struct wm8994_priv {
 
        int revision;
        struct wm8994_pdata *pdata;
+
+       unsigned int aif1clk_enable:1;
+       unsigned int aif2clk_enable:1;
+
+       unsigned int aif1clk_disable:1;
+       unsigned int aif2clk_disable:1;
 };
 
 static int wm8994_readable(unsigned int reg)
@@ -1004,6 +1010,110 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
        }
 }
 
+static int late_enable_ev(struct snd_soc_dapm_widget *w,
+                         struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               if (wm8994->aif1clk_enable) {
+                       snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+                                           WM8994_AIF1CLK_ENA_MASK,
+                                           WM8994_AIF1CLK_ENA);
+                       wm8994->aif1clk_enable = 0;
+               }
+               if (wm8994->aif2clk_enable) {
+                       snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+                                           WM8994_AIF2CLK_ENA_MASK,
+                                           WM8994_AIF2CLK_ENA);
+                       wm8994->aif2clk_enable = 0;
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static int late_disable_ev(struct snd_soc_dapm_widget *w,
+                          struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+       switch (event) {
+       case SND_SOC_DAPM_POST_PMD:
+               if (wm8994->aif1clk_disable) {
+                       snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+                                           WM8994_AIF1CLK_ENA_MASK, 0);
+                       wm8994->aif1clk_disable = 0;
+               }
+               if (wm8994->aif2clk_disable) {
+                       snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+                                           WM8994_AIF2CLK_ENA_MASK, 0);
+                       wm8994->aif2clk_disable = 0;
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static int aif1clk_ev(struct snd_soc_dapm_widget *w,
+                     struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               wm8994->aif1clk_enable = 1;
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               wm8994->aif1clk_disable = 1;
+               break;
+       }
+
+       return 0;
+}
+
+static int aif2clk_ev(struct snd_soc_dapm_widget *w,
+                     struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               wm8994->aif2clk_enable = 1;
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               wm8994->aif2clk_disable = 1;
+               break;
+       }
+
+       return 0;
+}
+
+static int adc_mux_ev(struct snd_soc_dapm_widget *w,
+                     struct snd_kcontrol *kcontrol, int event)
+{
+       late_enable_ev(w, kcontrol, event);
+       return 0;
+}
+
+static int dac_ev(struct snd_soc_dapm_widget *w,
+                 struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       unsigned int mask = 1 << w->shift;
+
+       snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+                           mask, mask);
+       return 0;
+}
+
 static const char *hp_mux_text[] = {
        "Mixer",
        "DAC",
@@ -1272,6 +1382,59 @@ static const struct soc_enum aif2dacr_src_enum =
 static const struct snd_kcontrol_new aif2dacr_src_mux =
        SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
 
+static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
+SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev,
+       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev,
+       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+       late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC1R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+       late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC2L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+       late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC2R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+       late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+
+SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
+};
+
+static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
+SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0)
+};
+
+static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = {
+SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0,
+       dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0,
+       dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0,
+       dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0,
+       dac_ev, SND_SOC_DAPM_PRE_PMU),
+};
+
+static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = {
+SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
+SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
+SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
+SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
+};
+
+static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
+SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+};
+
+static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
+SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+};
+
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
 SND_SOC_DAPM_INPUT("DMIC1DAT"),
 SND_SOC_DAPM_INPUT("DMIC2DAT"),
@@ -1284,9 +1447,6 @@ SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
 
-SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
-SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
-
 SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
                     0, WM8994_POWER_MANAGEMENT_4, 9, 0),
 SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
@@ -1369,14 +1529,6 @@ SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0),
 SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0),
 SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0),
 
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
-
-SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
-SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
-SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
-SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
-
 SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux),
 SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux),
 
@@ -1516,14 +1668,12 @@ static const struct snd_soc_dapm_route intercon[] = {
        { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" },
 
        /* DAC1 inputs */
-       { "DAC1L", NULL, "DAC1L Mixer" },
        { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" },
        { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
        { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
        { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" },
        { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" },
 
-       { "DAC1R", NULL, "DAC1R Mixer" },
        { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" },
        { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
        { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
@@ -1532,7 +1682,6 @@ static const struct snd_soc_dapm_route intercon[] = {
 
        /* DAC2/AIF2 outputs  */
        { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" },
-       { "DAC2L", NULL, "AIF2DAC2L Mixer" },
        { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" },
        { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
        { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
@@ -1540,7 +1689,6 @@ static const struct snd_soc_dapm_route intercon[] = {
        { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" },
 
        { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" },
-       { "DAC2R", NULL, "AIF2DAC2R Mixer" },
        { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" },
        { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
        { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
@@ -1584,6 +1732,24 @@ static const struct snd_soc_dapm_route intercon[] = {
        { "Right Headphone Mux", "DAC", "DAC1R" },
 };
 
+static const struct snd_soc_dapm_route wm8994_lateclk_revd_intercon[] = {
+       { "DAC1L", NULL, "Late DAC1L Enable PGA" },
+       { "Late DAC1L Enable PGA", NULL, "DAC1L Mixer" },
+       { "DAC1R", NULL, "Late DAC1R Enable PGA" },
+       { "Late DAC1R Enable PGA", NULL, "DAC1R Mixer" },
+       { "DAC2L", NULL, "Late DAC2L Enable PGA" },
+       { "Late DAC2L Enable PGA", NULL, "AIF2DAC2L Mixer" },
+       { "DAC2R", NULL, "Late DAC2R Enable PGA" },
+       { "Late DAC2R Enable PGA", NULL, "AIF2DAC2R Mixer" }
+};
+
+static const struct snd_soc_dapm_route wm8994_lateclk_intercon[] = {
+       { "DAC1L", NULL, "DAC1L Mixer" },
+       { "DAC1R", NULL, "DAC1R Mixer" },
+       { "DAC2L", NULL, "AIF2DAC2L Mixer" },
+       { "DAC2R", NULL, "AIF2DAC2R Mixer" },
+};
+
 static const struct snd_soc_dapm_route wm8994_revd_intercon[] = {
        { "AIF1DACDAT", NULL, "AIF2DACDAT" },
        { "AIF2DACDAT", NULL, "AIF1DACDAT" },
@@ -2514,6 +2680,22 @@ static int wm8994_resume(struct snd_soc_codec *codec)
 {
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
        int i, ret;
+       unsigned int val, mask;
+
+       if (wm8994->revision < 4) {
+               /* force a HW read */
+               val = wm8994_reg_read(codec->control_data,
+                                     WM8994_POWER_MANAGEMENT_5);
+
+               /* modify the cache only */
+               codec->cache_only = 1;
+               mask =  WM8994_DAC1R_ENA | WM8994_DAC1L_ENA |
+                       WM8994_DAC2R_ENA | WM8994_DAC2L_ENA;
+               val &= mask;
+               snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+                                   mask, val);
+               codec->cache_only = 0;
+       }
 
        /* Restore the registers */
        ret = snd_soc_cache_sync(codec);
@@ -2847,11 +3029,10 @@ static void wm8958_default_micdet(u16 status, void *data)
                report |= SND_JACK_BTN_5;
 
 done:
-       snd_soc_jack_report(wm8994->micdet[0].jack,
+       snd_soc_jack_report(wm8994->micdet[0].jack, report,
                            SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 |
                            SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 |
-                           SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT,
-                           report);
+                           SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT);
 }
 
 /**
@@ -3125,10 +3306,31 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
        case WM8994:
                snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
                                          ARRAY_SIZE(wm8994_specific_dapm_widgets));
+               if (wm8994->revision < 4) {
+                       snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets,
+                                                 ARRAY_SIZE(wm8994_lateclk_revd_widgets));
+                       snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets,
+                                                 ARRAY_SIZE(wm8994_adc_revd_widgets));
+                       snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets,
+                                                 ARRAY_SIZE(wm8994_dac_revd_widgets));
+               } else {
+                       snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
+                                                 ARRAY_SIZE(wm8994_lateclk_widgets));
+                       snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets,
+                                                 ARRAY_SIZE(wm8994_adc_widgets));
+                       snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
+                                                 ARRAY_SIZE(wm8994_dac_widgets));
+               }
                break;
        case WM8958:
                snd_soc_add_controls(codec, wm8958_snd_controls,
                                     ARRAY_SIZE(wm8958_snd_controls));
+               snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
+                                         ARRAY_SIZE(wm8994_lateclk_widgets));
+               snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets,
+                                         ARRAY_SIZE(wm8994_adc_widgets));
+               snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
+                                         ARRAY_SIZE(wm8994_dac_widgets));
                snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
                                          ARRAY_SIZE(wm8958_dapm_widgets));
                break;
@@ -3143,12 +3345,19 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
                snd_soc_dapm_add_routes(dapm, wm8994_intercon,
                                        ARRAY_SIZE(wm8994_intercon));
 
-               if (wm8994->revision < 4)
+               if (wm8994->revision < 4) {
                        snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,
                                                ARRAY_SIZE(wm8994_revd_intercon));
-                       
+                       snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon,
+                                               ARRAY_SIZE(wm8994_lateclk_revd_intercon));
+               } else {
+                       snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon,
+                                               ARRAY_SIZE(wm8994_lateclk_intercon));
+               }
                break;
        case WM8958:
+               snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon,
+                                       ARRAY_SIZE(wm8994_lateclk_intercon));
                snd_soc_dapm_add_routes(dapm, wm8958_intercon,
                                        ARRAY_SIZE(wm8958_intercon));
                break;
index 43825b2102a59e3b885bec479b137a774d77abf7..cce704c275c61df460d7056c6beca250e530525f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/device.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
@@ -1341,6 +1342,10 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
        wm9081->control_type = SND_SOC_I2C;
        wm9081->control_data = i2c;
 
+       if (dev_get_platdata(&i2c->dev))
+               memcpy(&wm9081->retune, dev_get_platdata(&i2c->dev),
+                      sizeof(wm9081->retune));
+
        ret = snd_soc_register_codec(&i2c->dev,
                        &soc_codec_dev_wm9081, &wm9081_dai, 1);
        if (ret < 0)
index 613df5db0b329d338f7f7eec6c16458ffbfe001b..516892706063d9fc62d89b86e5ce3089ab7f1d84 100644 (file)
@@ -674,6 +674,9 @@ SND_SOC_DAPM_OUTPUT("LINEOUT2N"),
 };
 
 static const struct snd_soc_dapm_route analogue_routes[] = {
+       { "MICBIAS1", NULL, "CLK_SYS" },
+       { "MICBIAS2", NULL, "CLK_SYS" },
+
        { "IN1L PGA", "IN1LP Switch", "IN1LP" },
        { "IN1L PGA", "IN1LN Switch", "IN1LN" },
 
index e20c9e1457c0ee35b7f55a225126887c78076297..1e9bccae4e80ff69d228e1df6dc494dc275b5331 100644 (file)
@@ -79,7 +79,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = {
        .name           = "tlv320aic23",
        .stream_name    = "TLV320AIC23",
        .codec_dai_name = "tlv320aic23-hifi",
-       .platform_name  = "imx-pcm-audio.0",
+       .platform_name  = "imx-fiq-pcm-audio.0",
        .codec_name     = "tlv320aic23-codec.0-001a",
        .cpu_dai_name   = "imx-ssi.0",
        .ops            = &eukrea_tlv320_snd_ops,
index 161750443ebcd87ad57c428ed521bb88f42341f2..73dde4a1adc34b9c0af6ea8a8dc617408f2ca207 100644 (file)
@@ -139,7 +139,7 @@ static struct snd_soc_dai_link am3517evm_dai = {
        .cpu_dai_name ="omap-mcbsp-dai.0",
        .codec_dai_name = "tlv320aic23-hifi",
        .platform_name = "omap-pcm-audio",
-       .codec_name = "tlv320aic23-codec",
+       .codec_name = "tlv320aic23-codec.2-001a",
        .init = am3517evm_aic23_init,
        .ops = &am3517evm_ops,
 };
index 28333e7d9c508e3c85d5eb671fd7d03dd2039c90..dc65650a6fa15af240a43de9dab1dbe5e929f228 100644 (file)
@@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = {
        {
                .name = "AC97",
                .stream_name = "AC97 HiFi",
-               .cpu_dai_name = "pxa-ac97.0",
+               .cpu_dai_name = "pxa2xx-ac97",
                .codec_dai_name = "wm9705-hifi",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9705-codec",
@@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = {
        {
                .name = "AC97 Aux",
                .stream_name = "AC97 Aux",
-               .cpu_dai_name = "pxa-ac97.1",
+               .cpu_dai_name = "pxa2xx-ac97-aux",
                .codec_dai_name = "wm9705-aux",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9705-codec",
index 01bf31675c559c6a51169d94d7177a7b94de0561..51897fcd911b9c922f8151af161a73f1588c1131 100644 (file)
@@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = {
        {
                .name = "AC97",
                .stream_name = "AC97 HiFi",
-               .cpu_dai_name = "pxa-ac97.0",
+               .cpu_dai_name = "pxa2xx-ac97",
                .codec_dai_name = "wm9705-hifi",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9705-codec",
@@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = {
        {
                .name = "AC97 Aux",
                .stream_name = "AC97 Aux",
-               .cpu_dai_name = "pxa-ac97.1",
+               .cpu_dai_name = "pxa2xx-ac97-aux",
                .codec_dai_name ="wm9705-aux",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9705-codec",
index c6a37c6ef23b3caa3cdc8c2b9672defc0ad6621e..053ed208e59f00b713aac31268b0a4163c34f71b 100644 (file)
@@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = {
        {
                .name = "AC97",
                .stream_name = "AC97 HiFi",
-               .cpu_dai_name = "pxa-ac97.0",
+               .cpu_dai_name = "pxa2xx-ac97",
                .codec_dai_name = "wm9712-hifi",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9712-codec",
@@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = {
        {
                .name = "AC97 Aux",
                .stream_name = "AC97 Aux",
-               .cpu_dai_name = "pxa-ac97.1",
+               .cpu_dai_name = "pxa2xx-ac97-aux",
                .codec_dai_name ="wm9712-aux",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9712-codec",
index fc22e6eefc98baf6ad7a825f0da31c5cc43b7b8e..b13a4252812da77235f05ebe24359b9ea0950832 100644 (file)
@@ -37,7 +37,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
        {
                .name = "AC97",
                .stream_name = "AC97 HiFi",
-               .cpu_dai_name = "pxa-ac97.0",
+               .cpu_dai_name = "pxa2xx-ac97",
                .codec_dai_name = "wm9712-hifi",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9712-codec",
@@ -45,7 +45,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
        {
                .name = "AC97 Aux",
                .stream_name = "AC97 Aux",
-               .cpu_dai_name = "pxa-ac97.1",
+               .cpu_dai_name = "pxa2xx-ac97-aux",
                .codec_dai_name ="wm9712-aux",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9712-codec",
index 0d70fc8c12bd2d0aff6110a9cef261737a760e2a..38ca6759907e9d0d333c7c6f0407b9e5d3f8d260 100644 (file)
@@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
        {
                .name = "AC97",
                .stream_name = "AC97 HiFi",
-               .cpu_dai_name = "pxa-ac97.0",
+               .cpu_dai_name = "pxa2xx-ac97",
                .codec_dai_name = "wm9713-hifi",
                .codec_name = "wm9713-codec",
                .init = mioa701_wm9713_init,
@@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
        {
                .name = "AC97 Aux",
                .stream_name = "AC97 Aux",
-               .cpu_dai_name = "pxa-ac97.1",
+               .cpu_dai_name = "pxa2xx-ac97-aux",
                .codec_dai_name ="wm9713-aux",
                .codec_name = "wm9713-codec",
                .platform_name = "pxa-pcm-audio",
index 857db96d4a4fddb1557d692b72a3f8a397727ca5..504e4004f004ea6a7dbc0f682d43d25d0071259b 100644 (file)
@@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
 {
        .name = "AC97 HiFi",
        .stream_name = "AC97 HiFi",
-       .cpu_dai_name = "pxa-ac97.0",
+       .cpu_dai_name = "pxa2xx-ac97",
        .codec_dai_name =  "wm9712-hifi",
        .codec_name = "wm9712-codec",
        .platform_name = "pxa-pcm-audio",
@@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
 {
        .name = "AC97 Aux",
        .stream_name = "AC97 Aux",
-       .cpu_dai_name = "pxa-ac97.1",
+       .cpu_dai_name = "pxa2xx-ac97-aux",
        .codec_dai_name = "wm9712-aux",
        .codec_name = "wm9712-codec",
        .platform_name = "pxa-pcm-audio",
index f75804ef0897ae0341e29f54ec3941d64ee9b32e..4b6e5d608b42e5cbb97742fe10a4c633c45d1443 100644 (file)
@@ -219,7 +219,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
 {
        .name = "AC97",
        .stream_name = "AC97 HiFi",
-       .cpu_dai_name = "pxa-ac97.0",
+       .cpu_dai_name = "pxa2xx-ac97",
        .codec_dai_name = "wm9712-hifi",
        .platform_name = "pxa-pcm-audio",
        .codec_name = "wm9712-codec",
@@ -229,7 +229,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
 {
        .name = "AC97 Aux",
        .stream_name = "AC97 Aux",
-       .cpu_dai_name = "pxa-ac97.1",
+       .cpu_dai_name = "pxa2xx-ac97-aux",
        .codec_dai_name = "wm9712-aux",
        .platform_name = "pxa-pcm-audio",
        .codec_name = "wm9712-codec",
index b222a7d7202719341ecd295ee2ca595ee7d1323b..25bba108fea3295295236869c636113a03c76cd5 100644 (file)
@@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
        .stream_name = "AC97 HiFi",
        .codec_name = "wm9713-codec",
        .platform_name = "pxa-pcm-audio",
-       .cpu_dai_name = "pxa-ac97.0",
+       .cpu_dai_name = "pxa2xx-ac97",
        .codec_name = "wm9713-hifi",
        .init = zylonite_wm9713_init,
 },
@@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
        .stream_name = "AC97 Aux",
        .codec_name = "wm9713-codec",
        .platform_name = "pxa-pcm-audio",
-       .cpu_dai_name = "pxa-ac97.1",
+       .cpu_dai_name = "pxa2xx-ac97-aux",
        .codec_name = "wm9713-aux",
 },
 {
index 8194f150bab7138412b0e06674d06f90d555699b..1790f83ee6651b8051ff4950d2bf7540b4ea8011 100644 (file)
@@ -712,7 +712,15 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
                    !path->connected(path->source, path->sink))
                        continue;
 
-               if (path->sink && path->sink->power_check &&
+               if (!path->sink)
+                       continue;
+
+               if (path->sink->force) {
+                       power = 1;
+                       break;
+               }
+
+               if (path->sink->power_check &&
                    path->sink->power_check(path->sink)) {
                        power = 1;
                        break;
@@ -933,7 +941,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
        }
 
        if (!list_empty(&pending))
-               dapm_seq_run_coalesced(dapm, &pending);
+               dapm_seq_run_coalesced(cur_dapm, &pending);
 }
 
 static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
@@ -1627,6 +1635,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes);
 int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
 {
        struct snd_soc_dapm_widget *w;
+       unsigned int val;
 
        list_for_each_entry(w, &dapm->card->widgets, list)
        {
@@ -1675,6 +1684,18 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
                case snd_soc_dapm_post:
                        break;
                }
+
+               /* Read the initial power state from the device */
+               if (w->reg >= 0) {
+                       val = snd_soc_read(w->codec, w->reg);
+                       val &= 1 << w->shift;
+                       if (w->invert)
+                               val = !val;
+
+                       if (val)
+                               w->power = 1;
+               }
+
                w->new = 1;
        }
 
index 800f7cb4f251cd77ae246c5dee2acaed9de587f4..c0f8270bc199e9e91a82cef451d1aa63ead9eb15 100644 (file)
@@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
                return -ENOMEM;
        }
 
+       mutex_init(&chip->shutdown_mutex);
        chip->index = idx;
        chip->dev = dev;
        chip->card = card;
@@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
        chip = ptr;
        card = chip->card;
        mutex_lock(&register_mutex);
+       mutex_lock(&chip->shutdown_mutex);
        chip->shutdown = 1;
        chip->num_interfaces--;
        if (chip->num_interfaces <= 0) {
@@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
                        snd_usb_mixer_disconnect(p);
                }
                usb_chip[chip->index] = NULL;
+               mutex_unlock(&chip->shutdown_mutex);
                mutex_unlock(&register_mutex);
                snd_card_free_when_closed(card);
        } else {
+               mutex_unlock(&chip->shutdown_mutex);
                mutex_unlock(&register_mutex);
        }
 }
index 4132522ac90f8c88da831b68f9cbdf777131de8e..e3f680526cb5ce840787d7feba00cdf7e54ff2f5 100644 (file)
@@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
        }
 
        if (changed) {
+               mutex_lock(&subs->stream->chip->shutdown_mutex);
                /* format changed */
                snd_usb_release_substream_urbs(subs, 0);
                /* influenced: period_bytes, channels, rate, format, */
@@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                                                  params_rate(hw_params),
                                                  snd_pcm_format_physical_width(params_format(hw_params)) *
                                                        params_channels(hw_params));
+               mutex_unlock(&subs->stream->chip->shutdown_mutex);
        }
 
        return ret;
@@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
        subs->cur_audiofmt = NULL;
        subs->cur_rate = 0;
        subs->period_bytes = 0;
-       if (!subs->stream->chip->shutdown)
-               snd_usb_release_substream_urbs(subs, 0);
+       mutex_lock(&subs->stream->chip->shutdown_mutex);
+       snd_usb_release_substream_urbs(subs, 0);
+       mutex_unlock(&subs->stream->chip->shutdown_mutex);
        return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
 
index db3eb21627eeee5f7c9c062286e5669ed93aff1a..6e66fffe87f57096bbcb9abf1cbe453d075400ab 100644 (file)
@@ -36,6 +36,7 @@ struct snd_usb_audio {
        struct snd_card *card;
        u32 usb_id;
        int shutdown;
+       struct mutex shutdown_mutex;
        unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
        int num_interfaces;
        int num_suspended_intf;
index cb43289e447f9d880b32ebadf173fa0f01aca166..416684be0ad308a6df2a69d22f41409de05f7466 100644 (file)
@@ -1,4 +1,3 @@
-PERF-BUILD-OPTIONS
 PERF-CFLAGS
 PERF-GUI-VARS
 PERF-VERSION-FILE
index bd498d496952a6f78973b0e6b8422552009770d0..4626a398836a244a48c73b9e03d69fba8dc3ecd6 100644 (file)
@@ -178,8 +178,8 @@ install-pdf: pdf
        $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir)
        $(INSTALL) -m 644 user-manual.pdf $(DESTDIR)$(pdfdir)
 
-install-html: html
-       '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
+#install-html: html
+#      '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
 
 ../PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
        $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) PERF-VERSION-FILE
@@ -288,15 +288,16 @@ $(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt
        sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b xhtml11 - >$@+ && \
        mv $@+ $@
 
-install-webdoc : html
-       '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(WEBDOC_DEST)
+# UNIMPLEMENTED
+#install-webdoc : html
+#      '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(WEBDOC_DEST)
 
-quick-install: quick-install-man
+quick-install: quick-install-man
 
-quick-install-man:
-       '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(DOC_REF) $(DESTDIR)$(mandir)
+quick-install-man:
+#      '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(DOC_REF) $(DESTDIR)$(mandir)
 
-quick-install-html:
-       '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(HTML_REF) $(DESTDIR)$(htmldir)
+#quick-install-html:
+#      '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(HTML_REF) $(DESTDIR)$(htmldir)
 
 .PHONY: .FORCE-PERF-VERSION-FILE
index 399751befeed923eec1ed9e3d937d755f776a559..7a527f7e9da9933823eefc1b7d4c173c5085ede0 100644 (file)
@@ -8,7 +8,7 @@ perf-list - List all symbolic event types
 SYNOPSIS
 --------
 [verse]
-'perf list'
+'perf list' [hw|sw|cache|tracepoint|event_glob]
 
 DESCRIPTION
 -----------
@@ -63,7 +63,26 @@ details. Some of them are referenced in the SEE ALSO section below.
 
 OPTIONS
 -------
-None
+
+Without options all known events will be listed.
+
+To limit the list use:
+
+. 'hw' or 'hardware' to list hardware events such as cache-misses, etc.
+
+. 'sw' or 'software' to list software events such as context switches, etc.
+
+. 'cache' or 'hwcache' to list hardware cache events such as L1-dcache-loads, etc.
+
+. 'tracepoint' to list all tracepoint events, alternatively use
+  'subsys_glob:event_glob' to filter by tracepoint subsystems such as sched,
+  block, etc.
+
+. If none of the above is matched, it will apply the supplied glob to all
+  events, printing the ones that match.
+
+One or more types can be used at the same time, listing the events for the
+types specified.
 
 SEE ALSO
 --------
index 921de259ea1086f36ce5b7e6a3426cec5618deba..4a26a2f3a6a39a2da7e3954fdc8fed59c6647f20 100644 (file)
@@ -24,8 +24,8 @@ and statistics with this 'perf lock' command.
 
   'perf lock report' reports statistical data.
 
-OPTIONS
--------
+COMMON OPTIONS
+--------------
 
 -i::
 --input=<file>::
@@ -39,6 +39,14 @@ OPTIONS
 --dump-raw-trace::
         Dump raw trace in ASCII.
 
+REPORT OPTIONS
+--------------
+
+-k::
+--key=<value>::
+        Sorting key. Possible values: acquired (default), contended,
+        wait_total, wait_max, wait_min.
+
 SEE ALSO
 --------
 linkperf:perf[1]
index 86b797a35aa6acae540b652345937991257daa4d..02bafce4b341cfc63e399d63b5007b70fbe27d6e 100644 (file)
@@ -16,7 +16,7 @@ or
 or
 'perf probe' --list
 or
-'perf probe' [options] --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
+'perf probe' [options] --line='LINE'
 or
 'perf probe' [options] --vars='PROBEPOINT'
 
@@ -73,6 +73,17 @@ OPTIONS
        (Only for --vars) Show external defined variables in addition to local
        variables.
 
+-F::
+--funcs::
+       Show available functions in given module or kernel.
+
+--filter=FILTER::
+       (Only for --vars and --funcs) Set filter. FILTER is a combination of glob
+       pattern, see FILTER PATTERN for detail.
+       Default FILTER is "!__k???tab_* & !__crc_*" for --vars, and "!_*"
+       for --funcs.
+       If several filters are specified, only the last filter is used.
+
 -f::
 --force::
        Forcibly add events with existing name.
@@ -117,13 +128,14 @@ LINE SYNTAX
 -----------
 Line range is described by following syntax.
 
- "FUNC[:RLN[+NUM|-RLN2]]|SRC[:ALN[+NUM|-ALN2]]"
+ "FUNC[@SRC][:RLN[+NUM|-RLN2]]|SRC[:ALN[+NUM|-ALN2]]"
 
 FUNC specifies the function name of showing lines. 'RLN' is the start line
 number from function entry line, and 'RLN2' is the end line number. As same as
 probe syntax, 'SRC' means the source file path, 'ALN' is start line number,
 and 'ALN2' is end line number in the file. It is also possible to specify how
-many lines to show by using 'NUM'.
+many lines to show by using 'NUM'. Moreover, 'FUNC@SRC' combination is good
+for searching a specific function when several functions share same name.
 So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function.
 
 LAZY MATCHING
@@ -135,6 +147,14 @@ e.g.
 
 This provides some sort of flexibility and robustness to probe point definitions against minor code changes. For example, actual 10th line of schedule() can be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist in the function.)
 
+FILTER PATTERN
+--------------
+ The filter pattern is a glob matching pattern(s) to filter variables.
+ In addition, you can use "!" for specifying filter-out rule. You also can give several rules combined with "&" or "|", and fold those rules as one rule by using "(" ")".
+
+e.g.
+ With --filter "foo* | bar*", perf probe -V shows variables which start with "foo" or "bar".
+ With --filter "!foo* & *bar", perf probe -V shows variables which don't start with "foo" and end with "bar", like "fizzbar". But "foobar" is filtered out.
 
 EXAMPLES
 --------
index e032716c839be0604700af69ebab43dd9eebf78e..5a520f8252956cd66dd5c3321be04200e3519846 100644 (file)
@@ -137,6 +137,17 @@ Do not update the builid cache. This saves some overhead in situations
 where the information in the perf.data file (which includes buildids)
 is sufficient.
 
+-G name,...::
+--cgroup name,...::
+monitor only in the container (cgroup) called "name". This option is available only
+in per-cpu mode. The cgroup filesystem must be mounted. All threads belonging to
+container "name" are monitored when they run on the monitored CPUs. Multiple cgroups
+can be provided. Each cgroup is applied to the corresponding event, i.e., first cgroup
+to first event, second cgroup to second event and so on. It is possible to provide
+an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must have
+corresponding events, i.e., they always refer to events defined earlier on the command
+line.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-list[1]
index b6da7affbbeeb82533387e9ba3f7c788d3e28dec..918cc38ee6d1e33ca5aa2a32f73946676e493942 100644 (file)
@@ -83,6 +83,17 @@ This option is only valid in system-wide mode.
 print counts using a CSV-style output to make it easy to import directly into
 spreadsheets. Columns are separated by the string specified in SEP.
 
+-G name::
+--cgroup name::
+monitor only in the container (cgroup) called "name". This option is available only
+in per-cpu mode. The cgroup filesystem must be mounted. All threads belonging to
+container "name" are monitored when they run on the monitored CPUs. Multiple cgroups
+can be provided. Each cgroup is applied to the corresponding event, i.e., first cgroup
+to first event, second cgroup to second event and so on. It is possible to provide
+an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must have
+corresponding events, i.e., they always refer to events defined earlier on the command
+line.
+
 EXAMPLES
 --------
 
index 7141c42e146920b887fe60ca1ba463bfbce90ea8..9b8421805c5ca465a954ef6624ca63101a5af312 100644 (file)
@@ -3,7 +3,7 @@ ifeq ("$(origin O)", "command line")
 endif
 
 # The default target of this Makefile is...
-all::
+all:
 
 ifneq ($(OUTPUT),)
 # check that the output directory actually exists
@@ -11,152 +11,12 @@ OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
 $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
 endif
 
-# Define V=1 to have a more verbose compile.
-# Define V=2 to have an even more verbose compile.
-#
-# Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf()
-# or vsnprintf() return -1 instead of number of characters which would
-# have been written to the final string if enough space had been available.
-#
-# Define FREAD_READS_DIRECTORIES if your are on a system which succeeds
-# when attempting to read from an fopen'ed directory.
-#
-# Define NO_OPENSSL environment variable if you do not have OpenSSL.
-# This also implies MOZILLA_SHA1.
-#
-# Define CURLDIR=/foo/bar if your curl header and library files are in
-# /foo/bar/include and /foo/bar/lib directories.
-#
-# Define EXPATDIR=/foo/bar if your expat header and library files are in
-# /foo/bar/include and /foo/bar/lib directories.
-#
-# Define NO_D_INO_IN_DIRENT if you don't have d_ino in your struct dirent.
-#
-# Define NO_D_TYPE_IN_DIRENT if your platform defines DT_UNKNOWN but lacks
-# d_type in struct dirent (latest Cygwin -- will be fixed soonish).
-#
-# Define NO_C99_FORMAT if your formatted IO functions (printf/scanf et.al.)
-# do not support the 'size specifiers' introduced by C99, namely ll, hh,
-# j, z, t. (representing long long int, char, intmax_t, size_t, ptrdiff_t).
-# some C compilers supported these specifiers prior to C99 as an extension.
-#
-# Define NO_STRCASESTR if you don't have strcasestr.
-#
-# Define NO_MEMMEM if you don't have memmem.
-#
-# Define NO_STRTOUMAX if you don't have strtoumax in the C library.
-# If your compiler also does not support long long or does not have
-# strtoull, define NO_STRTOULL.
-#
-# Define NO_SETENV if you don't have setenv in the C library.
-#
-# Define NO_UNSETENV if you don't have unsetenv in the C library.
-#
-# Define NO_MKDTEMP if you don't have mkdtemp in the C library.
-#
-# Define NO_SYS_SELECT_H if you don't have sys/select.h.
-#
-# Define NO_SYMLINK_HEAD if you never want .perf/HEAD to be a symbolic link.
-# Enable it on Windows.  By default, symrefs are still used.
-#
-# Define NO_SVN_TESTS if you want to skip time-consuming SVN interoperability
-# tests.  These tests take up a significant amount of the total test time
-# but are not needed unless you plan to talk to SVN repos.
-#
-# Define NO_FINK if you are building on Darwin/Mac OS X, have Fink
-# installed in /sw, but don't want PERF to link against any libraries
-# installed there.  If defined you may specify your own (or Fink's)
-# include directories and library directories by defining CFLAGS
-# and LDFLAGS appropriately.
-#
-# Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X,
-# have DarwinPorts installed in /opt/local, but don't want PERF to
-# link against any libraries installed there.  If defined you may
-# specify your own (or DarwinPort's) include directories and
-# library directories by defining CFLAGS and LDFLAGS appropriately.
-#
-# Define PPC_SHA1 environment variable when running make to make use of
-# a bundled SHA1 routine optimized for PowerPC.
-#
-# Define ARM_SHA1 environment variable when running make to make use of
-# a bundled SHA1 routine optimized for ARM.
-#
-# Define MOZILLA_SHA1 environment variable when running make to make use of
-# a bundled SHA1 routine coming from Mozilla. It is GPL'd and should be fast
-# on non-x86 architectures (e.g. PowerPC), while the OpenSSL version (default
-# choice) has very fast version optimized for i586.
-#
-# Define NEEDS_SSL_WITH_CRYPTO if you need -lcrypto with -lssl (Darwin).
-#
-# Define NEEDS_LIBICONV if linking with libc is not enough (Darwin).
-#
-# Define NEEDS_SOCKET if linking with libc is not enough (SunOS,
-# Patrick Mauritz).
-#
-# Define NO_MMAP if you want to avoid mmap.
-#
-# Define NO_PTHREADS if you do not have or do not want to use Pthreads.
-#
-# Define NO_PREAD if you have a problem with pread() system call (e.g.
-# cygwin.dll before v1.5.22).
-#
-# Define NO_FAST_WORKING_DIRECTORY if accessing objects in pack files is
-# generally faster on your platform than accessing the working directory.
-#
-# Define NO_TRUSTABLE_FILEMODE if your filesystem may claim to support
-# the executable mode bit, but doesn't really do so.
-#
-# Define NO_IPV6 if you lack IPv6 support and getaddrinfo().
-#
-# Define NO_SOCKADDR_STORAGE if your platform does not have struct
-# sockaddr_storage.
-#
-# Define NO_ICONV if your libc does not properly support iconv.
-#
-# Define OLD_ICONV if your library has an old iconv(), where the second
-# (input buffer pointer) parameter is declared with type (const char **).
-#
-# Define NO_DEFLATE_BOUND if your zlib does not have deflateBound.
-#
-# Define NO_R_TO_GCC_LINKER if your gcc does not like "-R/path/lib"
-# that tells runtime paths to dynamic libraries;
-# "-Wl,-rpath=/path/lib" is used instead.
-#
-# Define USE_NSEC below if you want perf to care about sub-second file mtimes
-# and ctimes. Note that you need recent glibc (at least 2.2.4) for this, and
-# it will BREAK YOUR LOCAL DIFFS! show-diff and anything using it will likely
-# randomly break unless your underlying filesystem supports those sub-second
-# times (my ext3 doesn't).
-#
-# Define USE_ST_TIMESPEC if your "struct stat" uses "st_ctimespec" instead of
-# "st_ctim"
-#
-# Define NO_NSEC if your "struct stat" does not have "st_ctim.tv_nsec"
-# available.  This automatically turns USE_NSEC off.
-#
-# Define USE_STDEV below if you want perf to care about the underlying device
-# change being considered an inode change from the update-index perspective.
-#
-# Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks
-# field that counts the on-disk footprint in 512-byte blocks.
+# Define V to have a more verbose compile.
 #
 # Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8
 #
 # Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72.
 #
-# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's
-# MakeMaker (e.g. using ActiveState under Cygwin).
-#
-# Define NO_PERL if you do not want Perl scripts or libraries at all.
-#
-# Define INTERNAL_QSORT to use Git's implementation of qsort(), which
-# is a simplified version of the merge sort used in glibc. This is
-# recommended if Git triggers O(n^2) behavior in your platform's qsort().
-#
-# Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call
-# your external grep (e.g., if your system lacks grep, if its grep is
-# broken, or spawning external process is slower than built-in grep perf has).
-#
 # Define LDFLAGS=-static to build a static binary.
 #
 # Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
@@ -167,12 +27,7 @@ $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
        @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
 -include $(OUTPUT)PERF-VERSION-FILE
 
-uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not')
-uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not')
-uname_O := $(shell sh -c 'uname -o 2>/dev/null || echo not')
-uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not')
-uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not')
-uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not')
+uname_M := $(shell uname -m 2>/dev/null || echo not)
 
 ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
                                  -e s/arm.*/arm/ -e s/sa110/arm/ \
@@ -191,8 +46,6 @@ ifeq ($(ARCH),x86_64)
        ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S
 endif
 
-# CFLAGS and LDFLAGS are for the users to override from the command line.
-
 #
 # Include saner warnings here, which can catch bugs:
 #
@@ -270,22 +123,13 @@ CC = $(CROSS_COMPILE)gcc
 AR = $(CROSS_COMPILE)ar
 RM = rm -f
 MKDIR = mkdir
-TAR = tar
 FIND = find
 INSTALL = install
-RPMBUILD = rpmbuild
-PTHREAD_LIBS = -lpthread
 
 # sparse is architecture-neutral, which means that we need to tell it
 # explicitly what architecture to check for. Fix this up for yours..
 SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
 
-ifeq ($(V), 2)
-       QUIET_STDERR = ">/dev/null"
-else
-       QUIET_STDERR = ">/dev/null 2>&1"
-endif
-
 -include feature-tests.mak
 
 ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y)
@@ -310,49 +154,37 @@ BASIC_LDFLAGS =
 
 # Guard against environment variables
 BUILTIN_OBJS =
-BUILT_INS =
-COMPAT_CFLAGS =
-COMPAT_OBJS =
 LIB_H =
 LIB_OBJS =
-SCRIPT_PERL =
+PYRF_OBJS =
 SCRIPT_SH =
-TEST_PROGRAMS =
 
 SCRIPT_SH += perf-archive.sh
 
 grep-libs = $(filter -l%,$(1))
 strip-libs = $(filter-out -l%,$(1))
 
+$(OUTPUT)python/perf.so: $(PYRF_OBJS)
+       $(QUIET_GEN)python util/setup.py --quiet  build_ext --build-lib='$(OUTPUT)python' \
+                                               --build-temp='$(OUTPUT)python/temp'
 #
 # No Perl scripts right now:
 #
 
-# SCRIPT_PERL += perf-add--interactive.perl
-
-SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \
-         $(patsubst %.perl,%,$(SCRIPT_PERL))
-
-# Empty...
-EXTRA_PROGRAMS =
-
-# ... and all the rest that could be moved out of bindir to perfexecdir
-PROGRAMS += $(EXTRA_PROGRAMS)
+SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
 
 #
 # Single 'perf' binary right now:
 #
 PROGRAMS += $(OUTPUT)perf
 
-# List built-in command $C whose implementation cmd_$C() is not in
-# builtin-$C.o but is linked in as part of some other command.
-#
+LANG_BINDINGS =
 
 # what 'all' will build and 'install' will install, in perfexecdir
 ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
 
 # what 'all' will build but not install in perfexecdir
-OTHER_PROGRAMS = $(OUTPUT)perf$X
+OTHER_PROGRAMS = $(OUTPUT)perf
 
 # Set paths to tools early so that they can be used for version tests.
 ifndef SHELL_PATH
@@ -395,6 +227,7 @@ LIB_H += util/include/dwarf-regs.h
 LIB_H += util/include/asm/dwarf2.h
 LIB_H += util/include/asm/cpufeature.h
 LIB_H += perf.h
+LIB_H += util/annotate.h
 LIB_H += util/cache.h
 LIB_H += util/callchain.h
 LIB_H += util/build-id.h
@@ -402,6 +235,7 @@ LIB_H += util/debug.h
 LIB_H += util/debugfs.h
 LIB_H += util/event.h
 LIB_H += util/evsel.h
+LIB_H += util/evlist.h
 LIB_H += util/exec_cmd.h
 LIB_H += util/types.h
 LIB_H += util/levenshtein.h
@@ -416,6 +250,7 @@ LIB_H += util/help.h
 LIB_H += util/session.h
 LIB_H += util/strbuf.h
 LIB_H += util/strlist.h
+LIB_H += util/strfilter.h
 LIB_H += util/svghelper.h
 LIB_H += util/run-command.h
 LIB_H += util/sigchain.h
@@ -425,21 +260,26 @@ LIB_H += util/values.h
 LIB_H += util/sort.h
 LIB_H += util/hist.h
 LIB_H += util/thread.h
+LIB_H += util/thread_map.h
 LIB_H += util/trace-event.h
 LIB_H += util/probe-finder.h
 LIB_H += util/probe-event.h
 LIB_H += util/pstack.h
 LIB_H += util/cpumap.h
+LIB_H += util/top.h
 LIB_H += $(ARCH_INCLUDE)
+LIB_H += util/cgroup.h
 
 LIB_OBJS += $(OUTPUT)util/abspath.o
 LIB_OBJS += $(OUTPUT)util/alias.o
+LIB_OBJS += $(OUTPUT)util/annotate.o
 LIB_OBJS += $(OUTPUT)util/build-id.o
 LIB_OBJS += $(OUTPUT)util/config.o
 LIB_OBJS += $(OUTPUT)util/ctype.o
 LIB_OBJS += $(OUTPUT)util/debugfs.o
 LIB_OBJS += $(OUTPUT)util/environment.o
 LIB_OBJS += $(OUTPUT)util/event.o
+LIB_OBJS += $(OUTPUT)util/evlist.o
 LIB_OBJS += $(OUTPUT)util/evsel.o
 LIB_OBJS += $(OUTPUT)util/exec_cmd.o
 LIB_OBJS += $(OUTPUT)util/help.o
@@ -455,6 +295,8 @@ LIB_OBJS += $(OUTPUT)util/quote.o
 LIB_OBJS += $(OUTPUT)util/strbuf.o
 LIB_OBJS += $(OUTPUT)util/string.o
 LIB_OBJS += $(OUTPUT)util/strlist.o
+LIB_OBJS += $(OUTPUT)util/strfilter.o
+LIB_OBJS += $(OUTPUT)util/top.o
 LIB_OBJS += $(OUTPUT)util/usage.o
 LIB_OBJS += $(OUTPUT)util/wrapper.o
 LIB_OBJS += $(OUTPUT)util/sigchain.o
@@ -469,6 +311,7 @@ LIB_OBJS += $(OUTPUT)util/map.o
 LIB_OBJS += $(OUTPUT)util/pstack.o
 LIB_OBJS += $(OUTPUT)util/session.o
 LIB_OBJS += $(OUTPUT)util/thread.o
+LIB_OBJS += $(OUTPUT)util/thread_map.o
 LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
 LIB_OBJS += $(OUTPUT)util/trace-event-read.o
 LIB_OBJS += $(OUTPUT)util/trace-event-info.o
@@ -480,6 +323,7 @@ LIB_OBJS += $(OUTPUT)util/probe-event.o
 LIB_OBJS += $(OUTPUT)util/util.o
 LIB_OBJS += $(OUTPUT)util/xyarray.o
 LIB_OBJS += $(OUTPUT)util/cpumap.o
+LIB_OBJS += $(OUTPUT)util/cgroup.o
 
 BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
 
@@ -514,6 +358,20 @@ BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
 
 PERFLIBS = $(LIB_FILE)
 
+# Files needed for the python binding, perf.so
+# pyrf is just an internal name needed for all those wrappers.
+# This has to be in sync with what is in the 'sources' variable in
+# tools/perf/util/setup.py
+
+PYRF_OBJS += $(OUTPUT)util/cpumap.o
+PYRF_OBJS += $(OUTPUT)util/ctype.o
+PYRF_OBJS += $(OUTPUT)util/evlist.o
+PYRF_OBJS += $(OUTPUT)util/evsel.o
+PYRF_OBJS += $(OUTPUT)util/python.o
+PYRF_OBJS += $(OUTPUT)util/thread_map.o
+PYRF_OBJS += $(OUTPUT)util/util.o
+PYRF_OBJS += $(OUTPUT)util/xyarray.o
+
 #
 # Platform specific tweaks
 #
@@ -535,22 +393,6 @@ endif # NO_DWARF
 
 -include arch/$(ARCH)/Makefile
 
-ifeq ($(uname_S),Darwin)
-       ifndef NO_FINK
-               ifeq ($(shell test -d /sw/lib && echo y),y)
-                       BASIC_CFLAGS += -I/sw/include
-                       BASIC_LDFLAGS += -L/sw/lib
-               endif
-       endif
-       ifndef NO_DARWIN_PORTS
-               ifeq ($(shell test -d /opt/local/lib && echo y),y)
-                       BASIC_CFLAGS += -I/opt/local/include
-                       BASIC_LDFLAGS += -L/opt/local/lib
-               endif
-       endif
-       PTHREAD_LIBS =
-endif
-
 ifneq ($(OUTPUT),)
        BASIC_CFLAGS += -I$(OUTPUT)
 endif
@@ -595,6 +437,7 @@ else
                LIB_OBJS += $(OUTPUT)util/ui/browsers/annotate.o
                LIB_OBJS += $(OUTPUT)util/ui/browsers/hists.o
                LIB_OBJS += $(OUTPUT)util/ui/browsers/map.o
+               LIB_OBJS += $(OUTPUT)util/ui/browsers/top.o
                LIB_OBJS += $(OUTPUT)util/ui/helpline.o
                LIB_OBJS += $(OUTPUT)util/ui/progress.o
                LIB_OBJS += $(OUTPUT)util/ui/util.o
@@ -604,6 +447,7 @@ else
                LIB_H += util/ui/libslang.h
                LIB_H += util/ui/progress.h
                LIB_H += util/ui/util.h
+               LIB_H += util/ui/ui.h
        endif
 endif
 
@@ -635,12 +479,14 @@ else
        PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
        FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
        ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y)
+               msg := $(warning No Python.h found, install python-dev[el] to have python support in 'perf script' and to build the python bindings)
                BASIC_CFLAGS += -DNO_LIBPYTHON
        else
                ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
                EXTLIBS += $(PYTHON_EMBED_LIBADD)
                LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
                LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
+               LANG_BINDINGS += $(OUTPUT)python/perf.so
        endif
 endif
 
@@ -690,201 +536,13 @@ else
        endif
 endif
 
-ifndef CC_LD_DYNPATH
-       ifdef NO_R_TO_GCC_LINKER
-               # Some gcc does not accept and pass -R to the linker to specify
-               # the runtime dynamic library path.
-               CC_LD_DYNPATH = -Wl,-rpath,
-       else
-               CC_LD_DYNPATH = -R
-       endif
-endif
-
-ifdef NEEDS_SOCKET
-       EXTLIBS += -lsocket
-endif
-ifdef NEEDS_NSL
-       EXTLIBS += -lnsl
-endif
-ifdef NO_D_TYPE_IN_DIRENT
-       BASIC_CFLAGS += -DNO_D_TYPE_IN_DIRENT
-endif
-ifdef NO_D_INO_IN_DIRENT
-       BASIC_CFLAGS += -DNO_D_INO_IN_DIRENT
-endif
-ifdef NO_ST_BLOCKS_IN_STRUCT_STAT
-       BASIC_CFLAGS += -DNO_ST_BLOCKS_IN_STRUCT_STAT
-endif
-ifdef USE_NSEC
-       BASIC_CFLAGS += -DUSE_NSEC
-endif
-ifdef USE_ST_TIMESPEC
-       BASIC_CFLAGS += -DUSE_ST_TIMESPEC
-endif
-ifdef NO_NSEC
-       BASIC_CFLAGS += -DNO_NSEC
-endif
-ifdef NO_C99_FORMAT
-       BASIC_CFLAGS += -DNO_C99_FORMAT
-endif
-ifdef SNPRINTF_RETURNS_BOGUS
-       COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS
-       COMPAT_OBJS += $(OUTPUT)compat/snprintf.o
-endif
-ifdef FREAD_READS_DIRECTORIES
-       COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES
-       COMPAT_OBJS += $(OUTPUT)compat/fopen.o
-endif
-ifdef NO_SYMLINK_HEAD
-       BASIC_CFLAGS += -DNO_SYMLINK_HEAD
-endif
-ifdef NO_STRCASESTR
-       COMPAT_CFLAGS += -DNO_STRCASESTR
-       COMPAT_OBJS += $(OUTPUT)compat/strcasestr.o
-endif
-ifdef NO_STRTOUMAX
-       COMPAT_CFLAGS += -DNO_STRTOUMAX
-       COMPAT_OBJS += $(OUTPUT)compat/strtoumax.o
-endif
-ifdef NO_STRTOULL
-       COMPAT_CFLAGS += -DNO_STRTOULL
-endif
-ifdef NO_SETENV
-       COMPAT_CFLAGS += -DNO_SETENV
-       COMPAT_OBJS += $(OUTPUT)compat/setenv.o
-endif
-ifdef NO_MKDTEMP
-       COMPAT_CFLAGS += -DNO_MKDTEMP
-       COMPAT_OBJS += $(OUTPUT)compat/mkdtemp.o
-endif
-ifdef NO_UNSETENV
-       COMPAT_CFLAGS += -DNO_UNSETENV
-       COMPAT_OBJS += $(OUTPUT)compat/unsetenv.o
-endif
-ifdef NO_SYS_SELECT_H
-       BASIC_CFLAGS += -DNO_SYS_SELECT_H
-endif
-ifdef NO_MMAP
-       COMPAT_CFLAGS += -DNO_MMAP
-       COMPAT_OBJS += $(OUTPUT)compat/mmap.o
-else
-       ifdef USE_WIN32_MMAP
-               COMPAT_CFLAGS += -DUSE_WIN32_MMAP
-               COMPAT_OBJS += $(OUTPUT)compat/win32mmap.o
-       endif
-endif
-ifdef NO_PREAD
-       COMPAT_CFLAGS += -DNO_PREAD
-       COMPAT_OBJS += $(OUTPUT)compat/pread.o
-endif
-ifdef NO_FAST_WORKING_DIRECTORY
-       BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY
-endif
-ifdef NO_TRUSTABLE_FILEMODE
-       BASIC_CFLAGS += -DNO_TRUSTABLE_FILEMODE
-endif
-ifdef NO_IPV6
-       BASIC_CFLAGS += -DNO_IPV6
-endif
-ifdef NO_UINTMAX_T
-       BASIC_CFLAGS += -Duintmax_t=uint32_t
-endif
-ifdef NO_SOCKADDR_STORAGE
-ifdef NO_IPV6
-       BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in
-else
-       BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in6
-endif
-endif
-ifdef NO_INET_NTOP
-       LIB_OBJS += $(OUTPUT)compat/inet_ntop.o
-endif
-ifdef NO_INET_PTON
-       LIB_OBJS += $(OUTPUT)compat/inet_pton.o
-endif
-
-ifdef NO_ICONV
-       BASIC_CFLAGS += -DNO_ICONV
-endif
-
-ifdef OLD_ICONV
-       BASIC_CFLAGS += -DOLD_ICONV
-endif
-
-ifdef NO_DEFLATE_BOUND
-       BASIC_CFLAGS += -DNO_DEFLATE_BOUND
-endif
-
-ifdef PPC_SHA1
-       SHA1_HEADER = "ppc/sha1.h"
-       LIB_OBJS += $(OUTPUT)ppc/sha1.o ppc/sha1ppc.o
-else
-ifdef ARM_SHA1
-       SHA1_HEADER = "arm/sha1.h"
-       LIB_OBJS += $(OUTPUT)arm/sha1.o $(OUTPUT)arm/sha1_arm.o
-else
-ifdef MOZILLA_SHA1
-       SHA1_HEADER = "mozilla-sha1/sha1.h"
-       LIB_OBJS += $(OUTPUT)mozilla-sha1/sha1.o
-else
-       SHA1_HEADER = <openssl/sha.h>
-       EXTLIBS += $(LIB_4_CRYPTO)
-endif
-endif
-endif
-ifdef NO_PERL_MAKEMAKER
-       export NO_PERL_MAKEMAKER
-endif
-ifdef NO_HSTRERROR
-       COMPAT_CFLAGS += -DNO_HSTRERROR
-       COMPAT_OBJS += $(OUTPUT)compat/hstrerror.o
-endif
-ifdef NO_MEMMEM
-       COMPAT_CFLAGS += -DNO_MEMMEM
-       COMPAT_OBJS += $(OUTPUT)compat/memmem.o
-endif
-ifdef INTERNAL_QSORT
-       COMPAT_CFLAGS += -DINTERNAL_QSORT
-       COMPAT_OBJS += $(OUTPUT)compat/qsort.o
-endif
-ifdef RUNTIME_PREFIX
-       COMPAT_CFLAGS += -DRUNTIME_PREFIX
-endif
-
-ifdef DIR_HAS_BSD_GROUP_SEMANTICS
-       COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS
-endif
-ifdef NO_EXTERNAL_GREP
-       BASIC_CFLAGS += -DNO_EXTERNAL_GREP
-endif
-
-ifeq ($(PERL_PATH),)
-NO_PERL=NoThanks
-endif
-
-QUIET_SUBDIR0  = +$(MAKE) -C # space to separate -C and subdir
-QUIET_SUBDIR1  =
-
-ifneq ($(findstring $(MAKEFLAGS),w),w)
-PRINT_DIR = --no-print-directory
-else # "make -w"
-NO_SUBDIR = :
-endif
-
 ifneq ($(findstring $(MAKEFLAGS),s),s)
 ifndef V
        QUIET_CC       = @echo '   ' CC $@;
        QUIET_AR       = @echo '   ' AR $@;
        QUIET_LINK     = @echo '   ' LINK $@;
        QUIET_MKDIR    = @echo '   ' MKDIR $@;
-       QUIET_BUILT_IN = @echo '   ' BUILTIN $@;
        QUIET_GEN      = @echo '   ' GEN $@;
-       QUIET_SUBDIR0  = +@subdir=
-       QUIET_SUBDIR1  = ;$(NO_SUBDIR) echo '   ' SUBDIR $$subdir; \
-                        $(MAKE) $(PRINT_DIR) -C $$subdir
-       export V
-       export QUIET_GEN
-       export QUIET_BUILT_IN
 endif
 endif
 
@@ -894,7 +552,6 @@ endif
 
 # Shell quote (do not use $(call) to accommodate ancient setups);
 
-SHA1_HEADER_SQ = $(subst ','\'',$(SHA1_HEADER))
 ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
 
 DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
@@ -908,46 +565,36 @@ htmldir_SQ = $(subst ','\'',$(htmldir))
 prefix_SQ = $(subst ','\'',$(prefix))
 
 SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
-PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH))
 
 LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS)
 
-BASIC_CFLAGS += -DSHA1_HEADER='$(SHA1_HEADER_SQ)' \
-       $(COMPAT_CFLAGS)
-LIB_OBJS += $(COMPAT_OBJS)
-
 ALL_CFLAGS += $(BASIC_CFLAGS)
 ALL_CFLAGS += $(ARCH_CFLAGS)
 ALL_LDFLAGS += $(BASIC_LDFLAGS)
 
-export TAR INSTALL DESTDIR SHELL_PATH
+export INSTALL SHELL_PATH
 
 
 ### Build rules
 
 SHELL = $(SHELL_PATH)
 
-all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) $(OUTPUT)PERF-BUILD-OPTIONS
-ifneq (,$X)
-       $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';)
-endif
-
-all::
+all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
 
 please_set_SHELL_PATH_to_a_more_modern_shell:
        @$$(:)
 
 shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
 
-strip: $(PROGRAMS) $(OUTPUT)perf$X
-       $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf$X
+strip: $(PROGRAMS) $(OUTPUT)perf
+       $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf
 
 $(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \
                '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
                $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@
 
-$(OUTPUT)perf$X: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
+$(OUTPUT)perf: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
        $(QUIET_LINK)$(CC) $(ALL_CFLAGS) $(ALL_LDFLAGS) $(OUTPUT)perf.o \
                $(BUILTIN_OBJS) $(LIBS) -o $@
 
@@ -963,39 +610,17 @@ $(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPU
                '-DPERF_MAN_PATH="$(mandir_SQ)"' \
                '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
 
-$(BUILT_INS): $(OUTPUT)perf$X
-       $(QUIET_BUILT_IN)$(RM) $@ && \
-       ln perf$X $@ 2>/dev/null || \
-       ln -s perf$X $@ 2>/dev/null || \
-       cp perf$X $@
-
 $(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt
 
 $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
        $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
 
-$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
-       $(QUIET_GEN)$(RM) $(OUTPUT)$@ $(OUTPUT)$@+ && \
-       sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
-           -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \
-           -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \
-           -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \
-           -e 's/@@NO_CURL@@/$(NO_CURL)/g' \
-           $@.sh > $(OUTPUT)$@+ && \
-       chmod +x $(OUTPUT)$@+ && \
-       mv $(OUTPUT)$@+ $(OUTPUT)$@
-
-configure: configure.ac
-       $(QUIET_GEN)$(RM) $@ $<+ && \
-       sed -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \
-           $< > $<+ && \
-       autoconf -o $@ $<+ && \
-       $(RM) $<+
+$(SCRIPTS) : % : %.sh
+       $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
 
 # These can record PERF_VERSION
 $(OUTPUT)perf.o perf.spec \
-       $(patsubst %.sh,%,$(SCRIPT_SH)) \
-       $(patsubst %.perl,%,$(SCRIPT_PERL)) \
+       $(SCRIPTS) \
        : $(OUTPUT)PERF-VERSION-FILE
 
 $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
@@ -1012,9 +637,6 @@ $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
                '-DPREFIX="$(prefix_SQ)"' \
                $<
 
-$(OUTPUT)builtin-init-db.o: builtin-init-db.c $(OUTPUT)PERF-CFLAGS
-       $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $<
-
 $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
 
@@ -1024,6 +646,9 @@ $(OUTPUT)util/ui/browser.o: util/ui/browser.c $(OUTPUT)PERF-CFLAGS
 $(OUTPUT)util/ui/browsers/annotate.o: util/ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
 
+$(OUTPUT)util/ui/browsers/top.o: util/ui/browsers/top.c $(OUTPUT)PERF-CFLAGS
+       $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
 $(OUTPUT)util/ui/browsers/hists.o: util/ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
 
@@ -1045,12 +670,11 @@ $(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/tra
 $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
 
-$(OUTPUT)perf-%$X: %.o $(PERFLIBS)
+$(OUTPUT)perf-%: %.o $(PERFLIBS)
        $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
 
 $(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
-$(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
-builtin-revert.o wt-status.o: wt-status.h
+$(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
 
 # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
 # we depend the various files onto their directories.
@@ -1063,6 +687,36 @@ $(sort $(dir $(DIRECTORY_DEPS))):
 $(LIB_FILE): $(LIB_OBJS)
        $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
 
+help:
+       @echo 'Perf make targets:'
+       @echo '  doc            - make *all* documentation (see below)'
+       @echo '  man            - make manpage documentation (access with man <foo>)'
+       @echo '  html           - make html documentation'
+       @echo '  info           - make GNU info documentation (access with info <foo>)'
+       @echo '  pdf            - make pdf documentation'
+       @echo '  TAGS           - use etags to make tag information for source browsing'
+       @echo '  tags           - use ctags to make tag information for source browsing'
+       @echo '  cscope - use cscope to make interactive browsing database'
+       @echo ''
+       @echo 'Perf install targets:'
+       @echo '  NOTE: documentation build requires asciidoc, xmlto packages to be installed'
+       @echo '  HINT: use "make prefix=<path> <install target>" to install to a particular'
+       @echo '        path like make prefix=/usr/local install install-doc'
+       @echo '  install        - install compiled binaries'
+       @echo '  install-doc    - install *all* documentation'
+       @echo '  install-man    - install manpage documentation'
+       @echo '  install-html   - install html documentation'
+       @echo '  install-info   - install GNU info documentation'
+       @echo '  install-pdf    - install pdf documentation'
+       @echo ''
+       @echo '  quick-install-doc      - alias for quick-install-man'
+       @echo '  quick-install-man      - install the documentation quickly'
+       @echo '  quick-install-html     - install the html documentation quickly'
+       @echo ''
+       @echo 'Perf maintainer targets:'
+       @echo '  distclean              - alias to clean'
+       @echo '  clean                  - clean all binary objects and build output'
+
 doc:
        $(MAKE) -C Documentation all
 
@@ -1101,30 +755,12 @@ $(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS
                echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \
             fi
 
-# We need to apply sq twice, once to protect from the shell
-# that runs $(OUTPUT)PERF-BUILD-OPTIONS, and then again to protect it
-# and the first level quoting from the shell that runs "echo".
-$(OUTPUT)PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS
-       @echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@
-       @echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@
-       @echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@
-       @echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@
-
 ### Testing rules
 
-#
-# None right now:
-#
-# TEST_PROGRAMS += test-something$X
-
-all:: $(TEST_PROGRAMS)
-
 # GNU make supports exporting all variables by "export" without parameters.
 # However, the environment gets quite big, and some programs have problems
 # with that.
 
-export NO_SVN_TESTS
-
 check: $(OUTPUT)common-cmds.h
        if sparse; \
        then \
@@ -1133,33 +769,21 @@ check: $(OUTPUT)common-cmds.h
                        sparse $(ALL_CFLAGS) $(SPARSE_FLAGS) $$i || exit; \
                done; \
        else \
-               echo 2>&1 "Did you mean 'make test'?"; \
                exit 1; \
        fi
 
-remove-dashes:
-       ./fixup-builtins $(BUILT_INS) $(PROGRAMS) $(SCRIPTS)
-
 ### Installation rules
 
-ifneq ($(filter /%,$(firstword $(template_dir))),)
-template_instdir = $(template_dir)
-else
-template_instdir = $(prefix)/$(template_dir)
-endif
-export template_instdir
-
 ifneq ($(filter /%,$(firstword $(perfexecdir))),)
 perfexec_instdir = $(perfexecdir)
 else
 perfexec_instdir = $(prefix)/$(perfexecdir)
 endif
 perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
-export perfexec_instdir
 
 install: all
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
-       $(INSTALL) $(OUTPUT)perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
+       $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
        $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
@@ -1172,14 +796,6 @@ install: all
        $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
        $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
 
-ifdef BUILT_INS
-       $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
-       $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
-ifneq (,$X)
-       $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) $(OUTPUT)perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';)
-endif
-endif
-
 install-doc:
        $(MAKE) -C Documentation install
 
@@ -1204,104 +820,17 @@ quick-install-man:
 quick-install-html:
        $(MAKE) -C Documentation quick-install-html
 
-
-### Maintainer's dist rules
-#
-# None right now
-#
-#
-# perf.spec: perf.spec.in
-#      sed -e 's/@@VERSION@@/$(PERF_VERSION)/g' < $< > $@+
-#      mv $@+ $@
-#
-# PERF_TARNAME=perf-$(PERF_VERSION)
-# dist: perf.spec perf-archive$(X) configure
-#      ./perf-archive --format=tar \
-#              --prefix=$(PERF_TARNAME)/ HEAD^{tree} > $(PERF_TARNAME).tar
-#      @mkdir -p $(PERF_TARNAME)
-#      @cp perf.spec configure $(PERF_TARNAME)
-#      @echo $(PERF_VERSION) > $(PERF_TARNAME)/version
-#      $(TAR) rf $(PERF_TARNAME).tar \
-#              $(PERF_TARNAME)/perf.spec \
-#              $(PERF_TARNAME)/configure \
-#              $(PERF_TARNAME)/version
-#      @$(RM) -r $(PERF_TARNAME)
-#      gzip -f -9 $(PERF_TARNAME).tar
-#
-# htmldocs = perf-htmldocs-$(PERF_VERSION)
-# manpages = perf-manpages-$(PERF_VERSION)
-# dist-doc:
-#      $(RM) -r .doc-tmp-dir
-#      mkdir .doc-tmp-dir
-#      $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc
-#      cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar .
-#      gzip -n -9 -f $(htmldocs).tar
-#      :
-#      $(RM) -r .doc-tmp-dir
-#      mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7
-#      $(MAKE) -C Documentation DESTDIR=./ \
-#              man1dir=../.doc-tmp-dir/man1 \
-#              man5dir=../.doc-tmp-dir/man5 \
-#              man7dir=../.doc-tmp-dir/man7 \
-#              install
-#      cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar .
-#      gzip -n -9 -f $(manpages).tar
-#      $(RM) -r .doc-tmp-dir
-#
-# rpm: dist
-#      $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz
-
 ### Cleaning rules
 
-distclean: clean
-#      $(RM) configure
-
 clean:
-       $(RM) *.o */*.o */*/*.o */*/*/*.o $(LIB_FILE)
-       $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X
-       $(RM) $(TEST_PROGRAMS)
+       $(RM) $(OUTPUT){*.o,*/*.o,*/*/*.o,*/*/*/*.o,$(LIB_FILE),perf-archive}
+       $(RM) $(ALL_PROGRAMS) perf
        $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
-       $(RM) -r autom4te.cache
-       $(RM) config.log config.mak.autogen config.mak.append config.status config.cache
-       $(RM) -r $(PERF_TARNAME) .doc-tmp-dir
-       $(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz
-       $(RM) $(htmldocs).tar.gz $(manpages).tar.gz
        $(MAKE) -C Documentation/ clean
-       $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-BUILD-OPTIONS
+       $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
+       @python util/setup.py clean --build-lib='$(OUTPUT)python' \
+                                  --build-temp='$(OUTPUT)python/temp'
 
 .PHONY: all install clean strip
 .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
 .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS
-.PHONY: .FORCE-PERF-BUILD-OPTIONS
-
-### Make sure built-ins do not have dups and listed in perf.c
-#
-check-builtins::
-       ./check-builtins.sh
-
-### Test suite coverage testing
-#
-# None right now
-#
-# .PHONY: coverage coverage-clean coverage-build coverage-report
-#
-# coverage:
-#      $(MAKE) coverage-build
-#      $(MAKE) coverage-report
-#
-# coverage-clean:
-#      rm -f *.gcda *.gcno
-#
-# COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs
-# COVERAGE_LDFLAGS = $(CFLAGS)  -O0 -lgcov
-#
-# coverage-build: coverage-clean
-#      $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all
-#      $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \
-#              -j1 test
-#
-# coverage-report:
-#      gcov -b *.c */*.c
-#      grep '^function.*called 0 ' *.c.gcov */*.c.gcov \
-#              | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \
-#              | tee coverage-untested-functions
index d9ab3ce446acf88a4c86238dd3ea01e3c498b169..0c7454f8b8a9841f039568f5af0d74fecec0329e 100644 (file)
@@ -55,7 +55,7 @@ int bench_sched_pipe(int argc, const char **argv,
         * discarding returned value of read(), write()
         * causes error in building environment for perf
         */
-       int ret, wait_stat;
+       int __used ret, wait_stat;
        pid_t pid, retpid;
 
        argc = parse_options(argc, argv, options,
index 8879463807e4a06269e11e5594731ace4fd80ff2..695de4b5ae633efbadfe13af7c85774ab9fc337f 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "util/util.h"
 
+#include "util/util.h"
 #include "util/color.h"
 #include <linux/list.h>
 #include "util/cache.h"
@@ -18,6 +19,9 @@
 #include "perf.h"
 #include "util/debug.h"
 
+#include "util/evlist.h"
+#include "util/evsel.h"
+#include "util/annotate.h"
 #include "util/event.h"
 #include "util/parse-options.h"
 #include "util/parse-events.h"
@@ -36,9 +40,13 @@ static bool          print_line;
 
 static const char *sym_hist_filter;
 
-static int hists__add_entry(struct hists *self, struct addr_location *al)
+static int perf_evlist__add_sample(struct perf_evlist *evlist,
+                                  struct perf_sample *sample,
+                                  struct addr_location *al)
 {
+       struct perf_evsel *evsel;
        struct hist_entry *he;
+       int ret;
 
        if (sym_hist_filter != NULL &&
            (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
@@ -51,25 +59,51 @@ static int hists__add_entry(struct hists *self, struct addr_location *al)
                return 0;
        }
 
-       he = __hists__add_entry(self, al, NULL, 1);
+       evsel = perf_evlist__id2evsel(evlist, sample->id);
+       if (evsel == NULL) {
+               /*
+                * FIXME: Propagate this back, but at least we're in a builtin,
+                * where exit() is allowed. ;-)
+                */
+               ui__warning("Invalid %s file, contains samples with id not in "
+                           "its header!\n", input_name);
+               exit_browser(0);
+               exit(1);
+       }
+
+       he = __hists__add_entry(&evsel->hists, al, NULL, 1);
        if (he == NULL)
                return -ENOMEM;
 
-       return hist_entry__inc_addr_samples(he, al->addr);
+       ret = 0;
+       if (he->ms.sym != NULL) {
+               struct annotation *notes = symbol__annotation(he->ms.sym);
+               if (notes->src == NULL &&
+                   symbol__alloc_hist(he->ms.sym, evlist->nr_entries) < 0)
+                       return -ENOMEM;
+
+               ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
+       }
+
+       evsel->hists.stats.total_period += sample->period;
+       hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
+       return ret;
 }
 
-static int process_sample_event(event_t *event, struct sample_data *sample,
+static int process_sample_event(union perf_event *event,
+                               struct perf_sample *sample,
                                struct perf_session *session)
 {
        struct addr_location al;
 
-       if (event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
+       if (perf_event__preprocess_sample(event, session, &al, sample,
+                                         symbol__annotate_init) < 0) {
                pr_warning("problem processing %d event, skipping it.\n",
                           event->header.type);
                return -1;
        }
 
-       if (!al.filtered && hists__add_entry(&session->hists, &al)) {
+       if (!al.filtered && perf_evlist__add_sample(session->evlist, sample, &al)) {
                pr_warning("problem incrementing symbol count, "
                           "skipping event\n");
                return -1;
@@ -78,261 +112,26 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
        return 0;
 }
 
-static int objdump_line__print(struct objdump_line *self,
-                              struct list_head *head,
-                              struct hist_entry *he, u64 len)
-{
-       struct symbol *sym = he->ms.sym;
-       static const char *prev_line;
-       static const char *prev_color;
-
-       if (self->offset != -1) {
-               const char *path = NULL;
-               unsigned int hits = 0;
-               double percent = 0.0;
-               const char *color;
-               struct sym_priv *priv = symbol__priv(sym);
-               struct sym_ext *sym_ext = priv->ext;
-               struct sym_hist *h = priv->hist;
-               s64 offset = self->offset;
-               struct objdump_line *next = objdump__get_next_ip_line(head, self);
-
-               while (offset < (s64)len &&
-                      (next == NULL || offset < next->offset)) {
-                       if (sym_ext) {
-                               if (path == NULL)
-                                       path = sym_ext[offset].path;
-                               percent += sym_ext[offset].percent;
-                       } else
-                               hits += h->ip[offset];
-
-                       ++offset;
-               }
-
-               if (sym_ext == NULL && h->sum)
-                       percent = 100.0 * hits / h->sum;
-
-               color = get_percent_color(percent);
-
-               /*
-                * Also color the filename and line if needed, with
-                * the same color than the percentage. Don't print it
-                * twice for close colored ip with the same filename:line
-                */
-               if (path) {
-                       if (!prev_line || strcmp(prev_line, path)
-                                      || color != prev_color) {
-                               color_fprintf(stdout, color, " %s", path);
-                               prev_line = path;
-                               prev_color = color;
-                       }
-               }
-
-               color_fprintf(stdout, color, " %7.2f", percent);
-               printf(" :      ");
-               color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", self->line);
-       } else {
-               if (!*self->line)
-                       printf("         :\n");
-               else
-                       printf("         :      %s\n", self->line);
-       }
-
-       return 0;
-}
-
-static struct rb_root root_sym_ext;
-
-static void insert_source_line(struct sym_ext *sym_ext)
-{
-       struct sym_ext *iter;
-       struct rb_node **p = &root_sym_ext.rb_node;
-       struct rb_node *parent = NULL;
-
-       while (*p != NULL) {
-               parent = *p;
-               iter = rb_entry(parent, struct sym_ext, node);
-
-               if (sym_ext->percent > iter->percent)
-                       p = &(*p)->rb_left;
-               else
-                       p = &(*p)->rb_right;
-       }
-
-       rb_link_node(&sym_ext->node, parent, p);
-       rb_insert_color(&sym_ext->node, &root_sym_ext);
-}
-
-static void free_source_line(struct hist_entry *he, int len)
-{
-       struct sym_priv *priv = symbol__priv(he->ms.sym);
-       struct sym_ext *sym_ext = priv->ext;
-       int i;
-
-       if (!sym_ext)
-               return;
-
-       for (i = 0; i < len; i++)
-               free(sym_ext[i].path);
-       free(sym_ext);
-
-       priv->ext = NULL;
-       root_sym_ext = RB_ROOT;
-}
-
-/* Get the filename:line for the colored entries */
-static void
-get_source_line(struct hist_entry *he, int len, const char *filename)
-{
-       struct symbol *sym = he->ms.sym;
-       u64 start;
-       int i;
-       char cmd[PATH_MAX * 2];
-       struct sym_ext *sym_ext;
-       struct sym_priv *priv = symbol__priv(sym);
-       struct sym_hist *h = priv->hist;
-
-       if (!h->sum)
-               return;
-
-       sym_ext = priv->ext = calloc(len, sizeof(struct sym_ext));
-       if (!priv->ext)
-               return;
-
-       start = he->ms.map->unmap_ip(he->ms.map, sym->start);
-
-       for (i = 0; i < len; i++) {
-               char *path = NULL;
-               size_t line_len;
-               u64 offset;
-               FILE *fp;
-
-               sym_ext[i].percent = 100.0 * h->ip[i] / h->sum;
-               if (sym_ext[i].percent <= 0.5)
-                       continue;
-
-               offset = start + i;
-               sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
-               fp = popen(cmd, "r");
-               if (!fp)
-                       continue;
-
-               if (getline(&path, &line_len, fp) < 0 || !line_len)
-                       goto next;
-
-               sym_ext[i].path = malloc(sizeof(char) * line_len + 1);
-               if (!sym_ext[i].path)
-                       goto next;
-
-               strcpy(sym_ext[i].path, path);
-               insert_source_line(&sym_ext[i]);
-
-       next:
-               pclose(fp);
-       }
-}
-
-static void print_summary(const char *filename)
-{
-       struct sym_ext *sym_ext;
-       struct rb_node *node;
-
-       printf("\nSorted summary for file %s\n", filename);
-       printf("----------------------------------------------\n\n");
-
-       if (RB_EMPTY_ROOT(&root_sym_ext)) {
-               printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
-               return;
-       }
-
-       node = rb_first(&root_sym_ext);
-       while (node) {
-               double percent;
-               const char *color;
-               char *path;
-
-               sym_ext = rb_entry(node, struct sym_ext, node);
-               percent = sym_ext->percent;
-               color = get_percent_color(percent);
-               path = sym_ext->path;
-
-               color_fprintf(stdout, color, " %7.2f %s", percent, path);
-               node = rb_next(node);
-       }
-}
-
-static void hist_entry__print_hits(struct hist_entry *self)
-{
-       struct symbol *sym = self->ms.sym;
-       struct sym_priv *priv = symbol__priv(sym);
-       struct sym_hist *h = priv->hist;
-       u64 len = sym->end - sym->start, offset;
-
-       for (offset = 0; offset < len; ++offset)
-               if (h->ip[offset] != 0)
-                       printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
-                              sym->start + offset, h->ip[offset]);
-       printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
-}
-
-static int hist_entry__tty_annotate(struct hist_entry *he)
+static int hist_entry__tty_annotate(struct hist_entry *he, int evidx)
 {
-       struct map *map = he->ms.map;
-       struct dso *dso = map->dso;
-       struct symbol *sym = he->ms.sym;
-       const char *filename = dso->long_name, *d_filename;
-       u64 len;
-       LIST_HEAD(head);
-       struct objdump_line *pos, *n;
-
-       if (hist_entry__annotate(he, &head, 0) < 0)
-               return -1;
-
-       if (full_paths)
-               d_filename = filename;
-       else
-               d_filename = basename(filename);
-
-       len = sym->end - sym->start;
-
-       if (print_line) {
-               get_source_line(he, len, filename);
-               print_summary(filename);
-       }
-
-       printf("\n\n------------------------------------------------\n");
-       printf(" Percent |      Source code & Disassembly of %s\n", d_filename);
-       printf("------------------------------------------------\n");
-
-       if (verbose)
-               hist_entry__print_hits(he);
-
-       list_for_each_entry_safe(pos, n, &head, node) {
-               objdump_line__print(pos, &head, he, len);
-               list_del(&pos->node);
-               objdump_line__free(pos);
-       }
-
-       if (print_line)
-               free_source_line(he, len);
-
-       return 0;
+       return symbol__tty_annotate(he->ms.sym, he->ms.map, evidx,
+                                   print_line, full_paths, 0, 0);
 }
 
-static void hists__find_annotations(struct hists *self)
+static void hists__find_annotations(struct hists *self, int evidx)
 {
        struct rb_node *nd = rb_first(&self->entries), *next;
        int key = KEY_RIGHT;
 
        while (nd) {
                struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
-               struct sym_priv *priv;
+               struct annotation *notes;
 
                if (he->ms.sym == NULL || he->ms.map->dso->annotate_warned)
                        goto find_next;
 
-               priv = symbol__priv(he->ms.sym);
-               if (priv->hist == NULL) {
+               notes = symbol__annotation(he->ms.sym);
+               if (notes->src == NULL) {
 find_next:
                        if (key == KEY_LEFT)
                                nd = rb_prev(nd);
@@ -342,7 +141,7 @@ find_next:
                }
 
                if (use_browser > 0) {
-                       key = hist_entry__tui_annotate(he);
+                       key = hist_entry__tui_annotate(he, evidx);
                        switch (key) {
                        case KEY_RIGHT:
                                next = rb_next(nd);
@@ -357,24 +156,24 @@ find_next:
                        if (next != NULL)
                                nd = next;
                } else {
-                       hist_entry__tty_annotate(he);
+                       hist_entry__tty_annotate(he, evidx);
                        nd = rb_next(nd);
                        /*
                         * Since we have a hist_entry per IP for the same
-                        * symbol, free he->ms.sym->hist to signal we already
+                        * symbol, free he->ms.sym->src to signal we already
                         * processed this symbol.
                         */
-                       free(priv->hist);
-                       priv->hist = NULL;
+                       free(notes->src);
+                       notes->src = NULL;
                }
        }
 }
 
 static struct perf_event_ops event_ops = {
        .sample = process_sample_event,
-       .mmap   = event__process_mmap,
-       .comm   = event__process_comm,
-       .fork   = event__process_task,
+       .mmap   = perf_event__process_mmap,
+       .comm   = perf_event__process_comm,
+       .fork   = perf_event__process_task,
        .ordered_samples = true,
        .ordering_requires_timestamps = true,
 };
@@ -383,6 +182,8 @@ static int __cmd_annotate(void)
 {
        int ret;
        struct perf_session *session;
+       struct perf_evsel *pos;
+       u64 total_nr_samples;
 
        session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops);
        if (session == NULL)
@@ -403,12 +204,36 @@ static int __cmd_annotate(void)
        if (verbose > 2)
                perf_session__fprintf_dsos(session, stdout);
 
-       hists__collapse_resort(&session->hists);
-       hists__output_resort(&session->hists);
-       hists__find_annotations(&session->hists);
-out_delete:
-       perf_session__delete(session);
+       total_nr_samples = 0;
+       list_for_each_entry(pos, &session->evlist->entries, node) {
+               struct hists *hists = &pos->hists;
+               u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+
+               if (nr_samples > 0) {
+                       total_nr_samples += nr_samples;
+                       hists__collapse_resort(hists);
+                       hists__output_resort(hists);
+                       hists__find_annotations(hists, pos->idx);
+               }
+       }
 
+       if (total_nr_samples == 0) {
+               ui__warning("The %s file has no samples!\n", input_name);
+               goto out_delete;
+       }
+out_delete:
+       /*
+        * Speed up the exit process, for large files this can
+        * take quite a while.
+        *
+        * XXX Enable this when using valgrind or if we ever
+        * librarize this command.
+        *
+        * Also experiment with obstacks to see how much speed
+        * up we'll get here.
+        *
+        * perf_session__delete(session);
+        */
        return ret;
 }
 
@@ -451,9 +276,9 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
        else if (use_tui)
                use_browser = 1;
 
-       setup_browser();
+       setup_browser(true);
 
-       symbol_conf.priv_size = sizeof(struct sym_priv);
+       symbol_conf.priv_size = sizeof(struct annotation);
        symbol_conf.try_vmlinux_path = true;
 
        if (symbol__init() < 0)
index 3153e492dbcc29e1593b6df29357424dd012da99..6b7d91160ecb3f844e0f6342f311e1785609d5c8 100644 (file)
@@ -30,13 +30,13 @@ static int hists__add_entry(struct hists *self,
        return -ENOMEM;
 }
 
-static int diff__process_sample_event(event_t *event,
-                                     struct sample_data *sample,
+static int diff__process_sample_event(union perf_event *event,
+                                     struct perf_sample *sample,
                                      struct perf_session *session)
 {
        struct addr_location al;
 
-       if (event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
+       if (perf_event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
                pr_warning("problem processing %d event, skipping it.\n",
                           event->header.type);
                return -1;
@@ -56,11 +56,11 @@ static int diff__process_sample_event(event_t *event,
 
 static struct perf_event_ops event_ops = {
        .sample = diff__process_sample_event,
-       .mmap   = event__process_mmap,
-       .comm   = event__process_comm,
-       .exit   = event__process_task,
-       .fork   = event__process_task,
-       .lost   = event__process_lost,
+       .mmap   = perf_event__process_mmap,
+       .comm   = perf_event__process_comm,
+       .exit   = perf_event__process_task,
+       .fork   = perf_event__process_task,
+       .lost   = perf_event__process_lost,
        .ordered_samples = true,
        .ordering_requires_timestamps = true,
 };
index 0c78ffa7bf675f46c9e631d9fa8d51fbc71aded4..e29f04ed33963c0839b8d1c7e9d2fe1e310f4ea6 100644 (file)
@@ -16,8 +16,8 @@
 static char            const *input_name = "-";
 static bool            inject_build_ids;
 
-static int event__repipe_synth(event_t *event,
-                              struct perf_session *session __used)
+static int perf_event__repipe_synth(union perf_event *event,
+                                   struct perf_session *session __used)
 {
        uint32_t size;
        void *buf = event;
@@ -36,41 +36,44 @@ static int event__repipe_synth(event_t *event,
        return 0;
 }
 
-static int event__repipe(event_t *event, struct sample_data *sample __used,
-                        struct perf_session *session)
+static int perf_event__repipe(union perf_event *event,
+                             struct perf_sample *sample __used,
+                             struct perf_session *session)
 {
-       return event__repipe_synth(event, session);
+       return perf_event__repipe_synth(event, session);
 }
 
-static int event__repipe_mmap(event_t *self, struct sample_data *sample,
-                             struct perf_session *session)
+static int perf_event__repipe_mmap(union perf_event *event,
+                                  struct perf_sample *sample,
+                                  struct perf_session *session)
 {
        int err;
 
-       err = event__process_mmap(self, sample, session);
-       event__repipe(self, sample, session);
+       err = perf_event__process_mmap(event, sample, session);
+       perf_event__repipe(event, sample, session);
 
        return err;
 }
 
-static int event__repipe_task(event_t *self, struct sample_data *sample,
-                             struct perf_session *session)
+static int perf_event__repipe_task(union perf_event *event,
+                                  struct perf_sample *sample,
+                                  struct perf_session *session)
 {
        int err;
 
-       err = event__process_task(self, sample, session);
-       event__repipe(self, sample, session);
+       err = perf_event__process_task(event, sample, session);
+       perf_event__repipe(event, sample, session);
 
        return err;
 }
 
-static int event__repipe_tracing_data(event_t *self,
-                                     struct perf_session *session)
+static int perf_event__repipe_tracing_data(union perf_event *event,
+                                          struct perf_session *session)
 {
        int err;
 
-       event__repipe_synth(self, session);
-       err = event__process_tracing_data(self, session);
+       perf_event__repipe_synth(event, session);
+       err = perf_event__process_tracing_data(event, session);
 
        return err;
 }
@@ -109,8 +112,8 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session)
        if (self->kernel)
                misc = PERF_RECORD_MISC_KERNEL;
 
-       err = event__synthesize_build_id(self, misc, event__repipe,
-                                        machine, session);
+       err = perf_event__synthesize_build_id(self, misc, perf_event__repipe,
+                                             machine, session);
        if (err) {
                pr_err("Can't synthesize build_id event for %s\n", self->long_name);
                return -1;
@@ -119,8 +122,9 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session)
        return 0;
 }
 
-static int event__inject_buildid(event_t *event, struct sample_data *sample,
-                                struct perf_session *session)
+static int perf_event__inject_buildid(union perf_event *event,
+                                     struct perf_sample *sample,
+                                     struct perf_session *session)
 {
        struct addr_location al;
        struct thread *thread;
@@ -155,24 +159,24 @@ static int event__inject_buildid(event_t *event, struct sample_data *sample,
        }
 
 repipe:
-       event__repipe(event, sample, session);
+       perf_event__repipe(event, sample, session);
        return 0;
 }
 
 struct perf_event_ops inject_ops = {
-       .sample         = event__repipe,
-       .mmap           = event__repipe,
-       .comm           = event__repipe,
-       .fork           = event__repipe,
-       .exit           = event__repipe,
-       .lost           = event__repipe,
-       .read           = event__repipe,
-       .throttle       = event__repipe,
-       .unthrottle     = event__repipe,
-       .attr           = event__repipe_synth,
-       .event_type     = event__repipe_synth,
-       .tracing_data   = event__repipe_synth,
-       .build_id       = event__repipe_synth,
+       .sample         = perf_event__repipe,
+       .mmap           = perf_event__repipe,
+       .comm           = perf_event__repipe,
+       .fork           = perf_event__repipe,
+       .exit           = perf_event__repipe,
+       .lost           = perf_event__repipe,
+       .read           = perf_event__repipe,
+       .throttle       = perf_event__repipe,
+       .unthrottle     = perf_event__repipe,
+       .attr           = perf_event__repipe_synth,
+       .event_type     = perf_event__repipe_synth,
+       .tracing_data   = perf_event__repipe_synth,
+       .build_id       = perf_event__repipe_synth,
 };
 
 extern volatile int session_done;
@@ -190,10 +194,10 @@ static int __cmd_inject(void)
        signal(SIGINT, sig_handler);
 
        if (inject_build_ids) {
-               inject_ops.sample       = event__inject_buildid;
-               inject_ops.mmap         = event__repipe_mmap;
-               inject_ops.fork         = event__repipe_task;
-               inject_ops.tracing_data = event__repipe_tracing_data;
+               inject_ops.sample       = perf_event__inject_buildid;
+               inject_ops.mmap         = perf_event__repipe_mmap;
+               inject_ops.fork         = perf_event__repipe_task;
+               inject_ops.tracing_data = perf_event__repipe_tracing_data;
        }
 
        session = perf_session__new(input_name, O_RDONLY, false, true, &inject_ops);
index d97256d6598044fab9e9d092e190fd70bdca4d93..7f618f4e7b795c593c059a3198f4f89f6621bb80 100644 (file)
@@ -275,9 +275,8 @@ static void process_free_event(void *data,
        s_alloc->alloc_cpu = -1;
 }
 
-static void
-process_raw_event(event_t *raw_event __used, void *data,
-                 int cpu, u64 timestamp, struct thread *thread)
+static void process_raw_event(union perf_event *raw_event __used, void *data,
+                             int cpu, u64 timestamp, struct thread *thread)
 {
        struct event *event;
        int type;
@@ -304,7 +303,8 @@ process_raw_event(event_t *raw_event __used, void *data,
        }
 }
 
-static int process_sample_event(event_t *event, struct sample_data *sample,
+static int process_sample_event(union perf_event *event,
+                               struct perf_sample *sample,
                                struct perf_session *session)
 {
        struct thread *thread = perf_session__findnew(session, event->ip.pid);
@@ -325,7 +325,7 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
 
 static struct perf_event_ops event_ops = {
        .sample                 = process_sample_event,
-       .comm                   = event__process_comm,
+       .comm                   = perf_event__process_comm,
        .ordered_samples        = true,
 };
 
index d88c6961274cf2e961eec9d1b420dc0621965a6a..6313b6eb3ebbff85f1527692d93416adf9845c31 100644 (file)
@@ -5,6 +5,7 @@
  *
  * Copyright (C) 2009, Thomas Gleixner <tglx@linutronix.de>
  * Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  */
 #include "builtin.h"
 
 #include "util/parse-events.h"
 #include "util/cache.h"
 
-int cmd_list(int argc __used, const char **argv __used, const char *prefix __used)
+int cmd_list(int argc, const char **argv, const char *prefix __used)
 {
        setup_pager();
-       print_events();
+
+       if (argc == 1)
+               print_events(NULL);
+       else {
+               int i;
+
+               for (i = 1; i < argc; ++i) {
+                       if (i > 1)
+                               putchar('\n');
+                       if (strncmp(argv[i], "tracepoint", 10) == 0)
+                               print_tracepoint_events(NULL, NULL);
+                       else if (strcmp(argv[i], "hw") == 0 ||
+                                strcmp(argv[i], "hardware") == 0)
+                               print_events_type(PERF_TYPE_HARDWARE);
+                       else if (strcmp(argv[i], "sw") == 0 ||
+                                strcmp(argv[i], "software") == 0)
+                               print_events_type(PERF_TYPE_SOFTWARE);
+                       else if (strcmp(argv[i], "cache") == 0 ||
+                                strcmp(argv[i], "hwcache") == 0)
+                               print_hwcache_events(NULL);
+                       else {
+                               char *sep = strchr(argv[i], ':'), *s;
+                               int sep_idx;
+
+                               if (sep == NULL) {
+                                       print_events(argv[i]);
+                                       continue;
+                               }
+                               sep_idx = sep - argv[i];
+                               s = strdup(argv[i]);
+                               if (s == NULL)
+                                       return -1;
+
+                               s[sep_idx] = '\0';
+                               print_tracepoint_events(s, s + sep_idx + 1);
+                               free(s);
+                       }
+               }
+       }
        return 0;
 }
index 2b36defc5d73dd447fbe20ffd2d357c77d2c2058..2e93f99b148063a638016b3b874779e9c601ebdb 100644 (file)
@@ -834,14 +834,14 @@ static void dump_info(void)
                die("Unknown type of information\n");
 }
 
-static int process_sample_event(event_t *self, struct sample_data *sample,
+static int process_sample_event(union perf_event *event, struct perf_sample *sample,
                                struct perf_session *s)
 {
        struct thread *thread = perf_session__findnew(s, sample->tid);
 
        if (thread == NULL) {
                pr_debug("problem processing %d event, skipping it.\n",
-                       self->header.type);
+                       event->header.type);
                return -1;
        }
 
@@ -852,7 +852,7 @@ static int process_sample_event(event_t *self, struct sample_data *sample,
 
 static struct perf_event_ops eops = {
        .sample                 = process_sample_event,
-       .comm                   = event__process_comm,
+       .comm                   = perf_event__process_comm,
        .ordered_samples        = true,
 };
 
@@ -893,7 +893,7 @@ static const char * const report_usage[] = {
 
 static const struct option report_options[] = {
        OPT_STRING('k', "key", &sort_key, "acquired",
-                   "key for sorting"),
+                   "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"),
        /* TODO: type */
        OPT_END()
 };
index add163c9f0e7d4db3fe01afe704a72c5a923597b..2c0e64d0b4aa6107800dd9736d50da050fa84429 100644 (file)
@@ -36,6 +36,7 @@
 #include "builtin.h"
 #include "util/util.h"
 #include "util/strlist.h"
+#include "util/strfilter.h"
 #include "util/symbol.h"
 #include "util/debug.h"
 #include "util/debugfs.h"
@@ -43,6 +44,8 @@
 #include "util/probe-finder.h"
 #include "util/probe-event.h"
 
+#define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
+#define DEFAULT_FUNC_FILTER "!_*"
 #define MAX_PATH_LEN 256
 
 /* Session management structure */
@@ -52,6 +55,7 @@ static struct {
        bool show_lines;
        bool show_vars;
        bool show_ext_vars;
+       bool show_funcs;
        bool mod_events;
        int nevents;
        struct perf_probe_event events[MAX_PROBES];
@@ -59,6 +63,7 @@ static struct {
        struct line_range line_range;
        const char *target_module;
        int max_probe_points;
+       struct strfilter *filter;
 } params;
 
 /* Parse an event definition. Note that any error must die. */
@@ -157,6 +162,27 @@ static int opt_show_vars(const struct option *opt __used,
 }
 #endif
 
+static int opt_set_filter(const struct option *opt __used,
+                         const char *str, int unset __used)
+{
+       const char *err;
+
+       if (str) {
+               pr_debug2("Set filter: %s\n", str);
+               if (params.filter)
+                       strfilter__delete(params.filter);
+               params.filter = strfilter__new(str, &err);
+               if (!params.filter) {
+                       pr_err("Filter parse error at %td.\n", err - str + 1);
+                       pr_err("Source: \"%s\"\n", str);
+                       pr_err("         %*c\n", (int)(err - str + 1), '^');
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static const char * const probe_usage[] = {
        "perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]",
        "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
@@ -221,6 +247,13 @@ static const struct option options[] = {
        OPT__DRY_RUN(&probe_event_dry_run),
        OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
                 "Set how many probe points can be found for a probe."),
+       OPT_BOOLEAN('F', "funcs", &params.show_funcs,
+                   "Show potential probe-able functions."),
+       OPT_CALLBACK('\0', "filter", NULL,
+                    "[!]FILTER", "Set a filter (with --vars/funcs only)\n"
+                    "\t\t\t(default: \"" DEFAULT_VAR_FILTER "\" for --vars,\n"
+                    "\t\t\t \"" DEFAULT_FUNC_FILTER "\" for --funcs)",
+                    opt_set_filter),
        OPT_END()
 };
 
@@ -246,7 +279,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
                params.max_probe_points = MAX_PROBES;
 
        if ((!params.nevents && !params.dellist && !params.list_events &&
-            !params.show_lines))
+            !params.show_lines && !params.show_funcs))
                usage_with_options(probe_usage, options);
 
        /*
@@ -267,12 +300,41 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
                        pr_err(" Error: Don't use --list with --vars.\n");
                        usage_with_options(probe_usage, options);
                }
+               if (params.show_funcs) {
+                       pr_err("  Error: Don't use --list with --funcs.\n");
+                       usage_with_options(probe_usage, options);
+               }
                ret = show_perf_probe_events();
                if (ret < 0)
                        pr_err("  Error: Failed to show event list. (%d)\n",
                               ret);
                return ret;
        }
+       if (params.show_funcs) {
+               if (params.nevents != 0 || params.dellist) {
+                       pr_err("  Error: Don't use --funcs with"
+                              " --add/--del.\n");
+                       usage_with_options(probe_usage, options);
+               }
+               if (params.show_lines) {
+                       pr_err("  Error: Don't use --funcs with --line.\n");
+                       usage_with_options(probe_usage, options);
+               }
+               if (params.show_vars) {
+                       pr_err("  Error: Don't use --funcs with --vars.\n");
+                       usage_with_options(probe_usage, options);
+               }
+               if (!params.filter)
+                       params.filter = strfilter__new(DEFAULT_FUNC_FILTER,
+                                                      NULL);
+               ret = show_available_funcs(params.target_module,
+                                          params.filter);
+               strfilter__delete(params.filter);
+               if (ret < 0)
+                       pr_err("  Error: Failed to show functions."
+                              " (%d)\n", ret);
+               return ret;
+       }
 
 #ifdef DWARF_SUPPORT
        if (params.show_lines) {
@@ -297,10 +359,16 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
                               " --add/--del.\n");
                        usage_with_options(probe_usage, options);
                }
+               if (!params.filter)
+                       params.filter = strfilter__new(DEFAULT_VAR_FILTER,
+                                                      NULL);
+
                ret = show_available_vars(params.events, params.nevents,
                                          params.max_probe_points,
                                          params.target_module,
+                                         params.filter,
                                          params.show_ext_vars);
+               strfilter__delete(params.filter);
                if (ret < 0)
                        pr_err("  Error: Failed to show vars. (%d)\n", ret);
                return ret;
index 60cac6f92e8b8d17bf165a6a592c6653f6141a96..6febcc168a8cc261e2dc10314ef082d40c717ec8 100644 (file)
 
 #include "util/header.h"
 #include "util/event.h"
+#include "util/evlist.h"
 #include "util/evsel.h"
 #include "util/debug.h"
 #include "util/session.h"
 #include "util/symbol.h"
 #include "util/cpumap.h"
+#include "util/thread_map.h"
 
 #include <unistd.h>
 #include <sched.h>
@@ -37,16 +39,14 @@ enum write_mode_t {
 
 static u64                     user_interval                   = ULLONG_MAX;
 static u64                     default_interval                =      0;
-static u64                     sample_type;
 
-static struct cpu_map          *cpus;
 static unsigned int            page_size;
 static unsigned int            mmap_pages                      =    128;
 static unsigned int            user_freq                       = UINT_MAX;
 static int                     freq                            =   1000;
 static int                     output;
 static int                     pipe_output                     =      0;
-static const char              *output_name                    = "perf.data";
+static const char              *output_name                    = NULL;
 static int                     group                           =      0;
 static int                     realtime_prio                   =      0;
 static bool                    nodelay                         =  false;
@@ -55,7 +55,6 @@ static bool                   sample_id_all_avail             =   true;
 static bool                    system_wide                     =  false;
 static pid_t                   target_pid                      =     -1;
 static pid_t                   target_tid                      =     -1;
-static struct thread_map       *threads;
 static pid_t                   child_pid                       =     -1;
 static bool                    no_inherit                      =  false;
 static enum write_mode_t       write_mode                      = WRITE_FORCE;
@@ -66,51 +65,17 @@ static bool                 sample_address                  =  false;
 static bool                    sample_time                     =  false;
 static bool                    no_buildid                      =  false;
 static bool                    no_buildid_cache                =  false;
+static struct perf_evlist      *evsel_list;
 
 static long                    samples                         =      0;
 static u64                     bytes_written                   =      0;
 
-static struct pollfd           *event_array;
-
-static int                     nr_poll                         =      0;
-static int                     nr_cpu                          =      0;
-
 static int                     file_new                        =      1;
 static off_t                   post_processing_offset;
 
 static struct perf_session     *session;
 static const char              *cpu_list;
 
-struct mmap_data {
-       void                    *base;
-       unsigned int            mask;
-       unsigned int            prev;
-};
-
-static struct mmap_data                mmap_array[MAX_NR_CPUS];
-
-static unsigned long mmap_read_head(struct mmap_data *md)
-{
-       struct perf_event_mmap_page *pc = md->base;
-       long head;
-
-       head = pc->data_head;
-       rmb();
-
-       return head;
-}
-
-static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
-{
-       struct perf_event_mmap_page *pc = md->base;
-
-       /*
-        * ensure all reads are done before we write the tail out.
-        */
-       /* mb(); */
-       pc->data_tail = tail;
-}
-
 static void advance_output(size_t size)
 {
        bytes_written += size;
@@ -131,42 +96,26 @@ static void write_output(void *buf, size_t size)
        }
 }
 
-static int process_synthesized_event(event_t *event,
-                                    struct sample_data *sample __used,
+static int process_synthesized_event(union perf_event *event,
+                                    struct perf_sample *sample __used,
                                     struct perf_session *self __used)
 {
        write_output(event, event->header.size);
        return 0;
 }
 
-static void mmap_read(struct mmap_data *md)
+static void mmap_read(struct perf_mmap *md)
 {
-       unsigned int head = mmap_read_head(md);
+       unsigned int head = perf_mmap__read_head(md);
        unsigned int old = md->prev;
        unsigned char *data = md->base + page_size;
        unsigned long size;
        void *buf;
-       int diff;
 
-       /*
-        * If we're further behind than half the buffer, there's a chance
-        * the writer will bite our tail and mess up the samples under us.
-        *
-        * If we somehow ended up ahead of the head, we got messed up.
-        *
-        * In either case, truncate and restart at head.
-        */
-       diff = head - old;
-       if (diff < 0) {
-               fprintf(stderr, "WARNING: failed to keep up with mmap data\n");
-               /*
-                * head points to a known good entry, start there.
-                */
-               old = head;
-       }
+       if (old == head)
+               return;
 
-       if (old != head)
-               samples++;
+       samples++;
 
        size = head - old;
 
@@ -185,7 +134,7 @@ static void mmap_read(struct mmap_data *md)
        write_output(buf, size);
 
        md->prev = old;
-       mmap_write_tail(md, old);
+       perf_mmap__write_tail(md, old);
 }
 
 static volatile int done = 0;
@@ -209,53 +158,10 @@ static void sig_atexit(void)
        kill(getpid(), signr);
 }
 
-static int group_fd;
-
-static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr)
-{
-       struct perf_header_attr *h_attr;
-
-       if (nr < session->header.attrs) {
-               h_attr = session->header.attr[nr];
-       } else {
-               h_attr = perf_header_attr__new(a);
-               if (h_attr != NULL)
-                       if (perf_header__add_attr(&session->header, h_attr) < 0) {
-                               perf_header_attr__delete(h_attr);
-                               h_attr = NULL;
-                       }
-       }
-
-       return h_attr;
-}
-
-static void create_counter(struct perf_evsel *evsel, int cpu)
+static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
 {
-       char *filter = evsel->filter;
        struct perf_event_attr *attr = &evsel->attr;
-       struct perf_header_attr *h_attr;
        int track = !evsel->idx; /* only the first counter needs these */
-       int thread_index;
-       int ret;
-       struct {
-               u64 count;
-               u64 time_enabled;
-               u64 time_running;
-               u64 id;
-       } read_data;
-       /*
-        * Check if parse_single_tracepoint_event has already asked for
-        * PERF_SAMPLE_TIME.
-        *
-        * XXX this is kludgy but short term fix for problems introduced by
-        * eac23d1c that broke 'perf script' by having different sample_types
-        * when using multiple tracepoint events when we use a perf binary
-        * that tries to use sample_id_all on an older kernel.
-        *
-        * We need to move counter creation to perf_session, support
-        * different sample_types, etc.
-        */
-       bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
 
        attr->read_format       = PERF_FORMAT_TOTAL_TIME_ENABLED |
                                  PERF_FORMAT_TOTAL_TIME_RUNNING |
@@ -263,7 +169,7 @@ static void create_counter(struct perf_evsel *evsel, int cpu)
 
        attr->sample_type       |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
 
-       if (nr_counters > 1)
+       if (evlist->nr_entries > 1)
                attr->sample_type |= PERF_SAMPLE_ID;
 
        /*
@@ -315,19 +221,58 @@ static void create_counter(struct perf_evsel *evsel, int cpu)
 
        attr->mmap              = track;
        attr->comm              = track;
-       attr->inherit           = !no_inherit;
+
        if (target_pid == -1 && target_tid == -1 && !system_wide) {
                attr->disabled = 1;
                attr->enable_on_exec = 1;
        }
-retry_sample_id:
-       attr->sample_id_all = sample_id_all_avail ? 1 : 0;
+}
 
-       for (thread_index = 0; thread_index < threads->nr; thread_index++) {
-try_again:
-               FD(evsel, nr_cpu, thread_index) = sys_perf_event_open(attr, threads->map[thread_index], cpu, group_fd, 0);
+static bool perf_evlist__equal(struct perf_evlist *evlist,
+                              struct perf_evlist *other)
+{
+       struct perf_evsel *pos, *pair;
+
+       if (evlist->nr_entries != other->nr_entries)
+               return false;
+
+       pair = list_entry(other->entries.next, struct perf_evsel, node);
 
-               if (FD(evsel, nr_cpu, thread_index) < 0) {
+       list_for_each_entry(pos, &evlist->entries, node) {
+               if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
+                       return false;
+               pair = list_entry(pair->node.next, struct perf_evsel, node);
+       }
+
+       return true;
+}
+
+static void open_counters(struct perf_evlist *evlist)
+{
+       struct perf_evsel *pos;
+
+       list_for_each_entry(pos, &evlist->entries, node) {
+               struct perf_event_attr *attr = &pos->attr;
+               /*
+                * Check if parse_single_tracepoint_event has already asked for
+                * PERF_SAMPLE_TIME.
+                *
+                * XXX this is kludgy but short term fix for problems introduced by
+                * eac23d1c that broke 'perf script' by having different sample_types
+                * when using multiple tracepoint events when we use a perf binary
+                * that tries to use sample_id_all on an older kernel.
+                *
+                * We need to move counter creation to perf_session, support
+                * different sample_types, etc.
+                */
+               bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
+
+               config_attr(pos, evlist);
+retry_sample_id:
+               attr->sample_id_all = sample_id_all_avail ? 1 : 0;
+try_again:
+               if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
+                                    !no_inherit) < 0) {
                        int err = errno;
 
                        if (err == EPERM || err == EACCES)
@@ -364,7 +309,7 @@ try_again:
                        }
                        printf("\n");
                        error("sys_perf_event_open() syscall returned with %d (%s).  /bin/dmesg may provide additional information.\n",
-                             FD(evsel, nr_cpu, thread_index), strerror(err));
+                             err, strerror(err));
 
 #if defined(__i386__) || defined(__x86_64__)
                        if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
@@ -375,90 +320,28 @@ try_again:
 #endif
 
                        die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
-                       exit(-1);
                }
+       }
 
-               h_attr = get_header_attr(attr, evsel->idx);
-               if (h_attr == NULL)
-                       die("nomem\n");
+       if (perf_evlist__set_filters(evlist)) {
+               error("failed to set filter with %d (%s)\n", errno,
+                       strerror(errno));
+               exit(-1);
+       }
 
-               if (!file_new) {
-                       if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
-                               fprintf(stderr, "incompatible append\n");
-                               exit(-1);
-                       }
-               }
+       if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
+               die("failed to mmap with %d (%s)\n", errno, strerror(errno));
 
-               if (read(FD(evsel, nr_cpu, thread_index), &read_data, sizeof(read_data)) == -1) {
-                       perror("Unable to read perf file descriptor");
+       if (file_new)
+               session->evlist = evlist;
+       else {
+               if (!perf_evlist__equal(session->evlist, evlist)) {
+                       fprintf(stderr, "incompatible append\n");
                        exit(-1);
                }
+       }
 
-               if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
-                       pr_warning("Not enough memory to add id\n");
-                       exit(-1);
-               }
-
-               assert(FD(evsel, nr_cpu, thread_index) >= 0);
-               fcntl(FD(evsel, nr_cpu, thread_index), F_SETFL, O_NONBLOCK);
-
-               /*
-                * First counter acts as the group leader:
-                */
-               if (group && group_fd == -1)
-                       group_fd = FD(evsel, nr_cpu, thread_index);
-
-               if (evsel->idx || thread_index) {
-                       struct perf_evsel *first;
-                       first = list_entry(evsel_list.next, struct perf_evsel, node);
-                       ret = ioctl(FD(evsel, nr_cpu, thread_index),
-                                   PERF_EVENT_IOC_SET_OUTPUT,
-                                   FD(first, nr_cpu, 0));
-                       if (ret) {
-                               error("failed to set output: %d (%s)\n", errno,
-                                               strerror(errno));
-                               exit(-1);
-                       }
-               } else {
-                       mmap_array[nr_cpu].prev = 0;
-                       mmap_array[nr_cpu].mask = mmap_pages*page_size - 1;
-                       mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size,
-                               PROT_READ | PROT_WRITE, MAP_SHARED, FD(evsel, nr_cpu, thread_index), 0);
-                       if (mmap_array[nr_cpu].base == MAP_FAILED) {
-                               error("failed to mmap with %d (%s)\n", errno, strerror(errno));
-                               exit(-1);
-                       }
-
-                       event_array[nr_poll].fd = FD(evsel, nr_cpu, thread_index);
-                       event_array[nr_poll].events = POLLIN;
-                       nr_poll++;
-               }
-
-               if (filter != NULL) {
-                       ret = ioctl(FD(evsel, nr_cpu, thread_index),
-                                   PERF_EVENT_IOC_SET_FILTER, filter);
-                       if (ret) {
-                               error("failed to set filter with %d (%s)\n", errno,
-                                               strerror(errno));
-                               exit(-1);
-                       }
-               }
-       }
-
-       if (!sample_type)
-               sample_type = attr->sample_type;
-}
-
-static void open_counters(int cpu)
-{
-       struct perf_evsel *pos;
-
-       group_fd = -1;
-
-       list_for_each_entry(pos, &evsel_list, node)
-               create_counter(pos, cpu);
-
-       nr_cpu++;
+       perf_session__update_sample_type(session);
 }
 
 static int process_buildids(void)
@@ -481,14 +364,14 @@ static void atexit_header(void)
 
                if (!no_buildid)
                        process_buildids();
-               perf_header__write(&session->header, output, true);
+               perf_session__write_header(session, evsel_list, output, true);
                perf_session__delete(session);
-               perf_evsel_list__delete();
+               perf_evlist__delete(evsel_list);
                symbol__exit();
        }
 }
 
-static void event__synthesize_guest_os(struct machine *machine, void *data)
+static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
 {
        int err;
        struct perf_session *psession = data;
@@ -504,8 +387,8 @@ static void event__synthesize_guest_os(struct machine *machine, void *data)
         *method is used to avoid symbol missing when the first addr is
         *in module instead of in guest kernel.
         */
-       err = event__synthesize_modules(process_synthesized_event,
-                                       psession, machine);
+       err = perf_event__synthesize_modules(process_synthesized_event,
+                                            psession, machine);
        if (err < 0)
                pr_err("Couldn't record guest kernel [%d]'s reference"
                       " relocation symbol.\n", machine->pid);
@@ -514,11 +397,12 @@ static void event__synthesize_guest_os(struct machine *machine, void *data)
         * We use _stext for guest kernel because guest kernel's /proc/kallsyms
         * have no _text sometimes.
         */
-       err = event__synthesize_kernel_mmap(process_synthesized_event,
-                                           psession, machine, "_text");
+       err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
+                                                psession, machine, "_text");
        if (err < 0)
-               err = event__synthesize_kernel_mmap(process_synthesized_event,
-                                                   psession, machine, "_stext");
+               err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
+                                                        psession, machine,
+                                                        "_stext");
        if (err < 0)
                pr_err("Couldn't record guest kernel [%d]'s reference"
                       " relocation symbol.\n", machine->pid);
@@ -533,9 +417,9 @@ static void mmap_read_all(void)
 {
        int i;
 
-       for (i = 0; i < nr_cpu; i++) {
-               if (mmap_array[i].base)
-                       mmap_read(&mmap_array[i]);
+       for (i = 0; i < evsel_list->cpus->nr; i++) {
+               if (evsel_list->mmap[i].base)
+                       mmap_read(&evsel_list->mmap[i]);
        }
 
        if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
@@ -566,18 +450,26 @@ static int __cmd_record(int argc, const char **argv)
                exit(-1);
        }
 
-       if (!strcmp(output_name, "-"))
-               pipe_output = 1;
-       else if (!stat(output_name, &st) && st.st_size) {
-               if (write_mode == WRITE_FORCE) {
-                       char oldname[PATH_MAX];
-                       snprintf(oldname, sizeof(oldname), "%s.old",
-                                output_name);
-                       unlink(oldname);
-                       rename(output_name, oldname);
+       if (!output_name) {
+               if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
+                       pipe_output = 1;
+               else
+                       output_name = "perf.data";
+       }
+       if (output_name) {
+               if (!strcmp(output_name, "-"))
+                       pipe_output = 1;
+               else if (!stat(output_name, &st) && st.st_size) {
+                       if (write_mode == WRITE_FORCE) {
+                               char oldname[PATH_MAX];
+                               snprintf(oldname, sizeof(oldname), "%s.old",
+                                        output_name);
+                               unlink(oldname);
+                               rename(output_name, oldname);
+                       }
+               } else if (write_mode == WRITE_APPEND) {
+                       write_mode = WRITE_FORCE;
                }
-       } else if (write_mode == WRITE_APPEND) {
-               write_mode = WRITE_FORCE;
        }
 
        flags = O_CREAT|O_RDWR;
@@ -606,19 +498,14 @@ static int __cmd_record(int argc, const char **argv)
                perf_header__set_feat(&session->header, HEADER_BUILD_ID);
 
        if (!file_new) {
-               err = perf_header__read(session, output);
+               err = perf_session__read_header(session, output);
                if (err < 0)
                        goto out_delete_session;
        }
 
-       if (have_tracepoints(&evsel_list))
+       if (have_tracepoints(&evsel_list->entries))
                perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
 
-       /*
-        * perf_session__delete(session) will be called at atexit_header()
-        */
-       atexit(atexit_header);
-
        if (forks) {
                child_pid = fork();
                if (child_pid < 0) {
@@ -659,7 +546,7 @@ static int __cmd_record(int argc, const char **argv)
                }
 
                if (!system_wide && target_tid == -1 && target_pid == -1)
-                       threads->map[0] = child_pid;
+                       evsel_list->threads->map[0] = child_pid;
 
                close(child_ready_pipe[1]);
                close(go_pipe[0]);
@@ -673,46 +560,42 @@ static int __cmd_record(int argc, const char **argv)
                close(child_ready_pipe[0]);
        }
 
-       if (!system_wide && no_inherit && !cpu_list) {
-               open_counters(-1);
-       } else {
-               for (i = 0; i < cpus->nr; i++)
-                       open_counters(cpus->map[i]);
-       }
+       open_counters(evsel_list);
 
-       perf_session__set_sample_type(session, sample_type);
+       /*
+        * perf_session__delete(session) will be called at atexit_header()
+        */
+       atexit(atexit_header);
 
        if (pipe_output) {
                err = perf_header__write_pipe(output);
                if (err < 0)
                        return err;
        } else if (file_new) {
-               err = perf_header__write(&session->header, output, false);
+               err = perf_session__write_header(session, evsel_list,
+                                                output, false);
                if (err < 0)
                        return err;
        }
 
        post_processing_offset = lseek(output, 0, SEEK_CUR);
 
-       perf_session__set_sample_id_all(session, sample_id_all_avail);
-
        if (pipe_output) {
-               err = event__synthesize_attrs(&session->header,
-                                             process_synthesized_event,
-                                             session);
+               err = perf_session__synthesize_attrs(session,
+                                                    process_synthesized_event);
                if (err < 0) {
                        pr_err("Couldn't synthesize attrs.\n");
                        return err;
                }
 
-               err = event__synthesize_event_types(process_synthesized_event,
-                                                   session);
+               err = perf_event__synthesize_event_types(process_synthesized_event,
+                                                        session);
                if (err < 0) {
                        pr_err("Couldn't synthesize event_types.\n");
                        return err;
                }
 
-               if (have_tracepoints(&evsel_list)) {
+               if (have_tracepoints(&evsel_list->entries)) {
                        /*
                         * FIXME err <= 0 here actually means that
                         * there were no tracepoints so its not really
@@ -721,9 +604,9 @@ static int __cmd_record(int argc, const char **argv)
                         * return this more properly and also
                         * propagate errors that now are calling die()
                         */
-                       err = event__synthesize_tracing_data(output, &evsel_list,
-                                                            process_synthesized_event,
-                                                            session);
+                       err = perf_event__synthesize_tracing_data(output, evsel_list,
+                                                                 process_synthesized_event,
+                                                                 session);
                        if (err <= 0) {
                                pr_err("Couldn't record tracing data.\n");
                                return err;
@@ -738,31 +621,34 @@ static int __cmd_record(int argc, const char **argv)
                return -1;
        }
 
-       err = event__synthesize_kernel_mmap(process_synthesized_event,
-                                           session, machine, "_text");
+       err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
+                                                session, machine, "_text");
        if (err < 0)
-               err = event__synthesize_kernel_mmap(process_synthesized_event,
-                                                   session, machine, "_stext");
+               err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
+                                                        session, machine, "_stext");
        if (err < 0)
                pr_err("Couldn't record kernel reference relocation symbol\n"
                       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
                       "Check /proc/kallsyms permission or run as root.\n");
 
-       err = event__synthesize_modules(process_synthesized_event,
-                                       session, machine);
+       err = perf_event__synthesize_modules(process_synthesized_event,
+                                            session, machine);
        if (err < 0)
                pr_err("Couldn't record kernel module information.\n"
                       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
                       "Check /proc/modules permission or run as root.\n");
 
        if (perf_guest)
-               perf_session__process_machines(session, event__synthesize_guest_os);
+               perf_session__process_machines(session,
+                                              perf_event__synthesize_guest_os);
 
        if (!system_wide)
-               event__synthesize_thread_map(threads, process_synthesized_event,
-                                            session);
+               perf_event__synthesize_thread_map(evsel_list->threads,
+                                                 process_synthesized_event,
+                                                 session);
        else
-               event__synthesize_threads(process_synthesized_event, session);
+               perf_event__synthesize_threads(process_synthesized_event,
+                                              session);
 
        if (realtime_prio) {
                struct sched_param param;
@@ -789,17 +675,17 @@ static int __cmd_record(int argc, const char **argv)
                if (hits == samples) {
                        if (done)
                                break;
-                       err = poll(event_array, nr_poll, -1);
+                       err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
                        waking++;
                }
 
                if (done) {
-                       for (i = 0; i < nr_cpu; i++) {
+                       for (i = 0; i < evsel_list->cpus->nr; i++) {
                                struct perf_evsel *pos;
 
-                               list_for_each_entry(pos, &evsel_list, node) {
+                               list_for_each_entry(pos, &evsel_list->entries, node) {
                                        for (thread = 0;
-                                               thread < threads->nr;
+                                               thread < evsel_list->threads->nr;
                                                thread++)
                                                ioctl(FD(pos, i, thread),
                                                        PERF_EVENT_IOC_DISABLE);
@@ -838,10 +724,10 @@ static const char * const record_usage[] = {
 static bool force, append_file;
 
 const struct option record_options[] = {
-       OPT_CALLBACK('e', "event", NULL, "event",
+       OPT_CALLBACK('e', "event", &evsel_list, "event",
                     "event selector. use 'perf list' to list available events",
                     parse_events),
-       OPT_CALLBACK(0, "filter", NULL, "filter",
+       OPT_CALLBACK(0, "filter", &evsel_list, "filter",
                     "event filter", parse_filter),
        OPT_INTEGER('p', "pid", &target_pid,
                    "record events on existing process id"),
@@ -884,6 +770,9 @@ const struct option record_options[] = {
                    "do not update the buildid cache"),
        OPT_BOOLEAN('B', "no-buildid", &no_buildid,
                    "do not collect buildids in perf.data"),
+       OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
+                    "monitor event in cgroup name only",
+                    parse_cgroups),
        OPT_END()
 };
 
@@ -892,6 +781,10 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
        int err = -ENOMEM;
        struct perf_evsel *pos;
 
+       evsel_list = perf_evlist__new(NULL, NULL);
+       if (evsel_list == NULL)
+               return -ENOMEM;
+
        argc = parse_options(argc, argv, record_options, record_usage,
                            PARSE_OPT_STOP_AT_NON_OPTION);
        if (!argc && target_pid == -1 && target_tid == -1 &&
@@ -908,12 +801,19 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
                write_mode = WRITE_FORCE;
        }
 
+       if (nr_cgroups && !system_wide) {
+               fprintf(stderr, "cgroup monitoring only available in"
+                       " system-wide mode\n");
+               usage_with_options(record_usage, record_options);
+       }
+
        symbol__init();
 
        if (no_buildid_cache || no_buildid)
                disable_buildid_cache();
 
-       if (list_empty(&evsel_list) && perf_evsel_list__create_default() < 0) {
+       if (evsel_list->nr_entries == 0 &&
+           perf_evlist__add_default(evsel_list) < 0) {
                pr_err("Not enough memory for event selector list\n");
                goto out_symbol_exit;
        }
@@ -921,27 +821,19 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
        if (target_pid != -1)
                target_tid = target_pid;
 
-       threads = thread_map__new(target_pid, target_tid);
-       if (threads == NULL) {
-               pr_err("Problems finding threads of monitor\n");
+       if (perf_evlist__create_maps(evsel_list, target_pid,
+                                    target_tid, cpu_list) < 0)
                usage_with_options(record_usage, record_options);
-       }
 
-       cpus = cpu_map__new(cpu_list);
-       if (cpus == NULL) {
-               perror("failed to parse CPUs map");
-               return -1;
-       }
-
-       list_for_each_entry(pos, &evsel_list, node) {
-               if (perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
+       list_for_each_entry(pos, &evsel_list->entries, node) {
+               if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
+                                        evsel_list->threads->nr) < 0)
                        goto out_free_fd;
                if (perf_header__push_event(pos->attr.config, event_name(pos)))
                        goto out_free_fd;
        }
-       event_array = malloc((sizeof(struct pollfd) * MAX_NR_CPUS *
-                             MAX_COUNTERS * threads->nr));
-       if (!event_array)
+
+       if (perf_evlist__alloc_pollfd(evsel_list) < 0)
                goto out_free_fd;
 
        if (user_interval != ULLONG_MAX)
@@ -959,16 +851,12 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
        } else {
                fprintf(stderr, "frequency and count are zero, aborting\n");
                err = -EINVAL;
-               goto out_free_event_array;
+               goto out_free_fd;
        }
 
        err = __cmd_record(argc, argv);
-
-out_free_event_array:
-       free(event_array);
 out_free_fd:
-       thread_map__delete(threads);
-       threads = NULL;
+       perf_evlist__delete_maps(evsel_list);
 out_symbol_exit:
        symbol__exit();
        return err;
index c27e31f289e61b74f980923c88cd402ea2ab5f7c..b1b82009ab9b8f8ce5065e644b2985915155d3ae 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "util/util.h"
 
+#include "util/annotate.h"
 #include "util/color.h"
 #include <linux/list.h>
 #include "util/cache.h"
@@ -20,6 +21,8 @@
 
 #include "perf.h"
 #include "util/debug.h"
+#include "util/evlist.h"
+#include "util/evsel.h"
 #include "util/header.h"
 #include "util/session.h"
 
@@ -43,120 +46,79 @@ static const char  default_pretty_printing_style[] = "normal";
 static const char      *pretty_printing_style = default_pretty_printing_style;
 
 static char            callchain_default_opt[] = "fractal,0.5";
+static symbol_filter_t annotate_init;
 
-static struct hists *perf_session__hists_findnew(struct perf_session *self,
-                                                u64 event_stream, u32 type,
-                                                u64 config)
-{
-       struct rb_node **p = &self->hists_tree.rb_node;
-       struct rb_node *parent = NULL;
-       struct hists *iter, *new;
-
-       while (*p != NULL) {
-               parent = *p;
-               iter = rb_entry(parent, struct hists, rb_node);
-               if (iter->config == config)
-                       return iter;
-
-
-               if (config > iter->config)
-                       p = &(*p)->rb_right;
-               else
-                       p = &(*p)->rb_left;
-       }
-
-       new = malloc(sizeof(struct hists));
-       if (new == NULL)
-               return NULL;
-       memset(new, 0, sizeof(struct hists));
-       new->event_stream = event_stream;
-       new->config = config;
-       new->type = type;
-       rb_link_node(&new->rb_node, parent, p);
-       rb_insert_color(&new->rb_node, &self->hists_tree);
-       return new;
-}
-
-static int perf_session__add_hist_entry(struct perf_session *self,
+static int perf_session__add_hist_entry(struct perf_session *session,
                                        struct addr_location *al,
-                                       struct sample_data *data)
+                                       struct perf_sample *sample)
 {
-       struct map_symbol *syms = NULL;
        struct symbol *parent = NULL;
-       int err = -ENOMEM;
+       int err = 0;
        struct hist_entry *he;
-       struct hists *hists;
-       struct perf_event_attr *attr;
-
-       if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain) {
-               syms = perf_session__resolve_callchain(self, al->thread,
-                                                      data->callchain, &parent);
-               if (syms == NULL)
-                       return -ENOMEM;
+       struct perf_evsel *evsel;
+
+       if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
+               err = perf_session__resolve_callchain(session, al->thread,
+                                                     sample->callchain, &parent);
+               if (err)
+                       return err;
        }
 
-       attr = perf_header__find_attr(data->id, &self->header);
-       if (attr)
-               hists = perf_session__hists_findnew(self, data->id, attr->type, attr->config);
-       else
-               hists = perf_session__hists_findnew(self, data->id, 0, 0);
-       if (hists == NULL)
-               goto out_free_syms;
-       he = __hists__add_entry(hists, al, parent, data->period);
+       evsel = perf_evlist__id2evsel(session->evlist, sample->id);
+       if (evsel == NULL) {
+               /*
+                * FIXME: Propagate this back, but at least we're in a builtin,
+                * where exit() is allowed. ;-)
+                */
+               ui__warning("Invalid %s file, contains samples with id %" PRIu64 " not in "
+                           "its header!\n", input_name, sample->id);
+               exit_browser(0);
+               exit(1);
+       }
+
+       he = __hists__add_entry(&evsel->hists, al, parent, sample->period);
        if (he == NULL)
-               goto out_free_syms;
-       err = 0;
+               return -ENOMEM;
+
        if (symbol_conf.use_callchain) {
-               err = callchain_append(he->callchain, data->callchain, syms,
-                                      data->period);
+               err = callchain_append(he->callchain, &session->callchain_cursor,
+                                      sample->period);
                if (err)
-                       goto out_free_syms;
+                       return err;
        }
        /*
         * Only in the newt browser we are doing integrated annotation,
         * so we don't allocated the extra space needed because the stdio
         * code will not use it.
         */
-       if (use_browser > 0)
-               err = hist_entry__inc_addr_samples(he, al->addr);
-out_free_syms:
-       free(syms);
-       return err;
-}
+       if (al->sym != NULL && use_browser > 0) {
+               struct annotation *notes = symbol__annotation(he->ms.sym);
 
-static int add_event_total(struct perf_session *session,
-                          struct sample_data *data,
-                          struct perf_event_attr *attr)
-{
-       struct hists *hists;
+               assert(evsel != NULL);
 
-       if (attr)
-               hists = perf_session__hists_findnew(session, data->id,
-                                                   attr->type, attr->config);
-       else
-               hists = perf_session__hists_findnew(session, data->id, 0, 0);
+               err = -ENOMEM;
+               if (notes->src == NULL &&
+                   symbol__alloc_hist(he->ms.sym, session->evlist->nr_entries) < 0)
+                       goto out;
 
-       if (!hists)
-               return -ENOMEM;
+               err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
+       }
 
-       hists->stats.total_period += data->period;
-       /*
-        * FIXME: add_event_total should be moved from here to
-        * perf_session__process_event so that the proper hist is passed to
-        * the event_op methods.
-        */
-       hists__inc_nr_events(hists, PERF_RECORD_SAMPLE);
-       session->hists.stats.total_period += data->period;
-       return 0;
+       evsel->hists.stats.total_period += sample->period;
+       hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
+out:
+       return err;
 }
 
-static int process_sample_event(event_t *event, struct sample_data *sample,
+
+static int process_sample_event(union perf_event *event,
+                               struct perf_sample *sample,
                                struct perf_session *session)
 {
        struct addr_location al;
-       struct perf_event_attr *attr;
 
-       if (event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
+       if (perf_event__preprocess_sample(event, session, &al, sample,
+                                         annotate_init) < 0) {
                fprintf(stderr, "problem processing %d event, skipping it.\n",
                        event->header.type);
                return -1;
@@ -170,26 +132,17 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
                return -1;
        }
 
-       attr = perf_header__find_attr(sample->id, &session->header);
-
-       if (add_event_total(session, sample, attr)) {
-               pr_debug("problem adding event period\n");
-               return -1;
-       }
-
        return 0;
 }
 
-static int process_read_event(event_t *event, struct sample_data *sample __used,
-                             struct perf_session *session __used)
+static int process_read_event(union perf_event *event,
+                             struct perf_sample *sample __used,
+                             struct perf_session *session)
 {
-       struct perf_event_attr *attr;
-
-       attr = perf_header__find_attr(event->read.id, &session->header);
-
+       struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist,
+                                                        event->read.id);
        if (show_threads) {
-               const char *name = attr ? __event_name(attr->type, attr->config)
-                                  : "unknown";
+               const char *name = evsel ? event_name(evsel) : "unknown";
                perf_read_values_add_value(&show_threads_values,
                                           event->read.pid, event->read.tid,
                                           event->read.id,
@@ -198,7 +151,7 @@ static int process_read_event(event_t *event, struct sample_data *sample __used,
        }
 
        dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
-                   attr ? __event_name(attr->type, attr->config) : "FAIL",
+                   evsel ? event_name(evsel) : "FAIL",
                    event->read.value);
 
        return 0;
@@ -222,7 +175,7 @@ static int perf_session__setup_sample_type(struct perf_session *self)
        } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE &&
                   !symbol_conf.use_callchain) {
                        symbol_conf.use_callchain = true;
-                       if (register_callchain_param(&callchain_param) < 0) {
+                       if (callchain_register_param(&callchain_param) < 0) {
                                fprintf(stderr, "Can't register callchain"
                                                " params\n");
                                return -EINVAL;
@@ -233,17 +186,17 @@ static int perf_session__setup_sample_type(struct perf_session *self)
 }
 
 static struct perf_event_ops event_ops = {
-       .sample = process_sample_event,
-       .mmap   event__process_mmap,
-       .comm   event__process_comm,
-       .exit   event__process_task,
-       .fork   event__process_task,
-       .lost   event__process_lost,
-       .read   = process_read_event,
-       .attr   event__process_attr,
-       .event_type event__process_event_type,
-       .tracing_data event__process_tracing_data,
-       .build_id event__process_build_id,
+       .sample          = process_sample_event,
+       .mmap            = perf_event__process_mmap,
+       .comm            = perf_event__process_comm,
+       .exit            = perf_event__process_task,
+       .fork            = perf_event__process_task,
+       .lost            = perf_event__process_lost,
+       .read            = process_read_event,
+       .attr            = perf_event__process_attr,
+       .event_type      = perf_event__process_event_type,
+       .tracing_data    = perf_event__process_tracing_data,
+       .build_id        = perf_event__process_build_id,
        .ordered_samples = true,
        .ordering_requires_timestamps = true,
 };
@@ -269,21 +222,21 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self,
        return ret + fprintf(fp, "\n#\n");
 }
 
-static int hists__tty_browse_tree(struct rb_root *tree, const char *help)
+static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
+                                        const char *help)
 {
-       struct rb_node *next = rb_first(tree);
+       struct perf_evsel *pos;
 
-       while (next) {
-               struct hists *hists = rb_entry(next, struct hists, rb_node);
+       list_for_each_entry(pos, &evlist->entries, node) {
+               struct hists *hists = &pos->hists;
                const char *evname = NULL;
 
                if (rb_first(&hists->entries) != rb_last(&hists->entries))
-                       evname = __event_name(hists->type, hists->config);
+                       evname = event_name(pos);
 
                hists__fprintf_nr_sample_events(hists, evname, stdout);
                hists__fprintf(hists, NULL, false, stdout);
                fprintf(stdout, "\n\n");
-               next = rb_next(&hists->rb_node);
        }
 
        if (sort_order == default_sort_order &&
@@ -304,8 +257,9 @@ static int hists__tty_browse_tree(struct rb_root *tree, const char *help)
 static int __cmd_report(void)
 {
        int ret = -EINVAL;
+       u64 nr_samples;
        struct perf_session *session;
-       struct rb_node *next;
+       struct perf_evsel *pos;
        const char *help = "For a higher level overview, try: perf report --sort comm,dso";
 
        signal(SIGINT, sig_handler);
@@ -336,20 +290,24 @@ static int __cmd_report(void)
        if (verbose > 2)
                perf_session__fprintf_dsos(session, stdout);
 
-       next = rb_first(&session->hists_tree);
-       while (next) {
-               struct hists *hists;
+       nr_samples = 0;
+       list_for_each_entry(pos, &session->evlist->entries, node) {
+               struct hists *hists = &pos->hists;
 
-               hists = rb_entry(next, struct hists, rb_node);
                hists__collapse_resort(hists);
                hists__output_resort(hists);
-               next = rb_next(&hists->rb_node);
+               nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE];
+       }
+
+       if (nr_samples == 0) {
+               ui__warning("The %s file has no samples!\n", input_name);
+               goto out_delete;
        }
 
        if (use_browser > 0)
-               hists__tui_browse_tree(&session->hists_tree, help);
+               perf_evlist__tui_browse_hists(session->evlist, help);
        else
-               hists__tty_browse_tree(&session->hists_tree, help);
+               perf_evlist__tty_browse_hists(session->evlist, help);
 
 out_delete:
        /*
@@ -424,7 +382,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
        if (tok2)
                callchain_param.print_limit = strtod(tok2, &endptr);
 setup:
-       if (register_callchain_param(&callchain_param) < 0) {
+       if (callchain_register_param(&callchain_param) < 0) {
                fprintf(stderr, "Can't register callchain params\n");
                return -1;
        }
@@ -498,7 +456,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
                use_browser = 1;
 
        if (strcmp(input_name, "-") != 0)
-               setup_browser();
+               setup_browser(true);
        else
                use_browser = 0;
        /*
@@ -507,7 +465,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
         * implementation.
         */
        if (use_browser > 0) {
-               symbol_conf.priv_size = sizeof(struct sym_priv);
+               symbol_conf.priv_size = sizeof(struct annotation);
+               annotate_init         = symbol__annotate_init;
                /*
                 * For searching by name on the "Browse map details".
                 * providing it only in verbose mode not to bloat too
index 29acb894e035154432c708e1bf6ae7b7db5c2be1..a32f411faeac15f10fbd54e2cac2e547bcc0972f 100644 (file)
@@ -369,11 +369,6 @@ static void
 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
 {
        int ret = 0;
-       u64 now;
-       long long delta;
-
-       now = get_nsecs();
-       delta = start_time + atom->timestamp - now;
 
        switch (atom->type) {
                case SCHED_EVENT_RUN:
@@ -562,7 +557,7 @@ static void wait_for_tasks(void)
 
 static void run_one_test(void)
 {
-       u64 T0, T1, delta, avg_delta, fluct, std_dev;
+       u64 T0, T1, delta, avg_delta, fluct;
 
        T0 = get_nsecs();
        wait_for_tasks();
@@ -578,7 +573,6 @@ static void run_one_test(void)
        else
                fluct = delta - avg_delta;
        sum_fluct += fluct;
-       std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
        if (!run_avg)
                run_avg = delta;
        run_avg = (run_avg*9 + delta)/10;
@@ -799,7 +793,7 @@ replay_switch_event(struct trace_switch_event *switch_event,
                    u64 timestamp,
                    struct thread *thread __used)
 {
-       struct task_desc *prev, *next;
+       struct task_desc *prev, __used *next;
        u64 timestamp0;
        s64 delta;
 
@@ -1404,7 +1398,7 @@ map_switch_event(struct trace_switch_event *switch_event,
                 u64 timestamp,
                 struct thread *thread __used)
 {
-       struct thread *sched_out, *sched_in;
+       struct thread *sched_out __used, *sched_in;
        int new_shortname;
        u64 timestamp0;
        s64 delta;
@@ -1580,9 +1574,9 @@ process_sched_migrate_task_event(void *data, struct perf_session *session,
                                                 event, cpu, timestamp, thread);
 }
 
-static void
-process_raw_event(event_t *raw_event __used, struct perf_session *session,
-                 void *data, int cpu, u64 timestamp, struct thread *thread)
+static void process_raw_event(union perf_event *raw_event __used,
+                             struct perf_session *session, void *data, int cpu,
+                             u64 timestamp, struct thread *thread)
 {
        struct event *event;
        int type;
@@ -1607,7 +1601,8 @@ process_raw_event(event_t *raw_event __used, struct perf_session *session,
                process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
 }
 
-static int process_sample_event(event_t *event, struct sample_data *sample,
+static int process_sample_event(union perf_event *event,
+                               struct perf_sample *sample,
                                struct perf_session *session)
 {
        struct thread *thread;
@@ -1635,9 +1630,9 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
 
 static struct perf_event_ops event_ops = {
        .sample                 = process_sample_event,
-       .comm                   = event__process_comm,
-       .lost                   = event__process_lost,
-       .fork                   = event__process_task,
+       .comm                   = perf_event__process_comm,
+       .lost                   = perf_event__process_lost,
+       .fork                   = perf_event__process_task,
        .ordered_samples        = true,
 };
 
index b766c2a9ac975614a7507156b258519ec2a50d0a..5f40df635dcb0dbaddd6e65aa7f6b9678120b6d8 100644 (file)
@@ -63,7 +63,8 @@ static int cleanup_scripting(void)
 
 static char const              *input_name = "perf.data";
 
-static int process_sample_event(event_t *event, struct sample_data *sample,
+static int process_sample_event(union perf_event *event,
+                               struct perf_sample *sample,
                                struct perf_session *session)
 {
        struct thread *thread = perf_session__findnew(session, event->ip.pid);
@@ -100,14 +101,14 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
 }
 
 static struct perf_event_ops event_ops = {
-       .sample = process_sample_event,
-       .comm   = event__process_comm,
-       .attr   = event__process_attr,
-       .event_type = event__process_event_type,
-       .tracing_data = event__process_tracing_data,
-       .build_id = event__process_build_id,
-       .ordering_requires_timestamps = true,
+       .sample          = process_sample_event,
+       .comm            = perf_event__process_comm,
+       .attr            = perf_event__process_attr,
+       .event_type      = perf_event__process_event_type,
+       .tracing_data    = perf_event__process_tracing_data,
+       .build_id        = perf_event__process_build_id,
        .ordered_samples = true,
+       .ordering_requires_timestamps = true,
 };
 
 extern volatile int session_done;
index a482a191a0ca3efac6cafe16ec1c827ba70a97f6..21c025222496ae7dcadc24c052f3bcd93cd2ec94 100644 (file)
 #include "util/parse-options.h"
 #include "util/parse-events.h"
 #include "util/event.h"
+#include "util/evlist.h"
 #include "util/evsel.h"
 #include "util/debug.h"
 #include "util/header.h"
 #include "util/cpumap.h"
 #include "util/thread.h"
+#include "util/thread_map.h"
 
 #include <sys/prctl.h>
 #include <math.h>
@@ -71,8 +73,9 @@ static struct perf_event_attr default_attrs[] = {
 
 };
 
+struct perf_evlist             *evsel_list;
+
 static bool                    system_wide                     =  false;
-static struct cpu_map          *cpus;
 static int                     run_idx                         =  0;
 
 static int                     run_count                       =  1;
@@ -81,7 +84,6 @@ static bool                   scale                           =  true;
 static bool                    no_aggr                         = false;
 static pid_t                   target_pid                      = -1;
 static pid_t                   target_tid                      = -1;
-static struct thread_map       *threads;
 static pid_t                   child_pid                       = -1;
 static bool                    null_run                        =  false;
 static bool                    big_num                         =  true;
@@ -166,7 +168,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
                                    PERF_FORMAT_TOTAL_TIME_RUNNING;
 
        if (system_wide)
-               return perf_evsel__open_per_cpu(evsel, cpus);
+               return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false);
 
        attr->inherit = !no_inherit;
        if (target_pid == -1 && target_tid == -1) {
@@ -174,7 +176,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
                attr->enable_on_exec = 1;
        }
 
-       return perf_evsel__open_per_thread(evsel, threads);
+       return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false);
 }
 
 /*
@@ -199,7 +201,8 @@ static int read_counter_aggr(struct perf_evsel *counter)
        u64 *count = counter->counts->aggr.values;
        int i;
 
-       if (__perf_evsel__read(counter, cpus->nr, threads->nr, scale) < 0)
+       if (__perf_evsel__read(counter, evsel_list->cpus->nr,
+                              evsel_list->threads->nr, scale) < 0)
                return -1;
 
        for (i = 0; i < 3; i++)
@@ -232,7 +235,7 @@ static int read_counter(struct perf_evsel *counter)
        u64 *count;
        int cpu;
 
-       for (cpu = 0; cpu < cpus->nr; cpu++) {
+       for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
                if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
                        return -1;
 
@@ -297,7 +300,7 @@ static int run_perf_stat(int argc __used, const char **argv)
                }
 
                if (target_tid == -1 && target_pid == -1 && !system_wide)
-                       threads->map[0] = child_pid;
+                       evsel_list->threads->map[0] = child_pid;
 
                /*
                 * Wait for the child to be ready to exec.
@@ -309,7 +312,7 @@ static int run_perf_stat(int argc __used, const char **argv)
                close(child_ready_pipe[0]);
        }
 
-       list_for_each_entry(counter, &evsel_list, node) {
+       list_for_each_entry(counter, &evsel_list->entries, node) {
                if (create_perf_stat_counter(counter) < 0) {
                        if (errno == -EPERM || errno == -EACCES) {
                                error("You may not have permission to collect %sstats.\n"
@@ -347,14 +350,15 @@ static int run_perf_stat(int argc __used, const char **argv)
        update_stats(&walltime_nsecs_stats, t1 - t0);
 
        if (no_aggr) {
-               list_for_each_entry(counter, &evsel_list, node) {
+               list_for_each_entry(counter, &evsel_list->entries, node) {
                        read_counter(counter);
-                       perf_evsel__close_fd(counter, cpus->nr, 1);
+                       perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1);
                }
        } else {
-               list_for_each_entry(counter, &evsel_list, node) {
+               list_for_each_entry(counter, &evsel_list->entries, node) {
                        read_counter_aggr(counter);
-                       perf_evsel__close_fd(counter, cpus->nr, threads->nr);
+                       perf_evsel__close_fd(counter, evsel_list->cpus->nr,
+                                            evsel_list->threads->nr);
                }
        }
 
@@ -382,10 +386,13 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
        if (no_aggr)
                sprintf(cpustr, "CPU%*d%s",
                        csv_output ? 0 : -4,
-                       cpus->map[cpu], csv_sep);
+                       evsel_list->cpus->map[cpu], csv_sep);
 
        fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(evsel));
 
+       if (evsel->cgrp)
+               fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name);
+
        if (csv_output)
                return;
 
@@ -410,12 +417,15 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
        if (no_aggr)
                sprintf(cpustr, "CPU%*d%s",
                        csv_output ? 0 : -4,
-                       cpus->map[cpu], csv_sep);
+                       evsel_list->cpus->map[cpu], csv_sep);
        else
                cpu = 0;
 
        fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(evsel));
 
+       if (evsel->cgrp)
+               fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name);
+
        if (csv_output)
                return;
 
@@ -456,9 +466,17 @@ static void print_counter_aggr(struct perf_evsel *counter)
        int scaled = counter->counts->scaled;
 
        if (scaled == -1) {
-               fprintf(stderr, "%*s%s%-24s\n",
+               fprintf(stderr, "%*s%s%*s",
                        csv_output ? 0 : 18,
-                       "<not counted>", csv_sep, event_name(counter));
+                       "<not counted>",
+                       csv_sep,
+                       csv_output ? 0 : -24,
+                       event_name(counter));
+
+               if (counter->cgrp)
+                       fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name);
+
+               fputc('\n', stderr);
                return;
        }
 
@@ -483,7 +501,6 @@ static void print_counter_aggr(struct perf_evsel *counter)
                fprintf(stderr, "  (scaled from %.2f%%)",
                                100 * avg_running / avg_enabled);
        }
-
        fprintf(stderr, "\n");
 }
 
@@ -496,19 +513,23 @@ static void print_counter(struct perf_evsel *counter)
        u64 ena, run, val;
        int cpu;
 
-       for (cpu = 0; cpu < cpus->nr; cpu++) {
+       for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
                val = counter->counts->cpu[cpu].val;
                ena = counter->counts->cpu[cpu].ena;
                run = counter->counts->cpu[cpu].run;
                if (run == 0 || ena == 0) {
-                       fprintf(stderr, "CPU%*d%s%*s%s%-24s",
+                       fprintf(stderr, "CPU%*d%s%*s%s%*s",
                                csv_output ? 0 : -4,
-                               cpus->map[cpu], csv_sep,
+                               evsel_list->cpus->map[cpu], csv_sep,
                                csv_output ? 0 : 18,
                                "<not counted>", csv_sep,
+                               csv_output ? 0 : -24,
                                event_name(counter));
 
-                       fprintf(stderr, "\n");
+                       if (counter->cgrp)
+                               fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name);
+
+                       fputc('\n', stderr);
                        continue;
                }
 
@@ -525,7 +546,7 @@ static void print_counter(struct perf_evsel *counter)
                                        100.0 * run / ena);
                        }
                }
-               fprintf(stderr, "\n");
+               fputc('\n', stderr);
        }
 }
 
@@ -555,10 +576,10 @@ static void print_stat(int argc, const char **argv)
        }
 
        if (no_aggr) {
-               list_for_each_entry(counter, &evsel_list, node)
+               list_for_each_entry(counter, &evsel_list->entries, node)
                        print_counter(counter);
        } else {
-               list_for_each_entry(counter, &evsel_list, node)
+               list_for_each_entry(counter, &evsel_list->entries, node)
                        print_counter_aggr(counter);
        }
 
@@ -610,7 +631,7 @@ static int stat__set_big_num(const struct option *opt __used,
 }
 
 static const struct option options[] = {
-       OPT_CALLBACK('e', "event", NULL, "event",
+       OPT_CALLBACK('e', "event", &evsel_list, "event",
                     "event selector. use 'perf list' to list available events",
                     parse_events),
        OPT_BOOLEAN('i', "no-inherit", &no_inherit,
@@ -638,6 +659,9 @@ static const struct option options[] = {
                    "disable CPU count aggregation"),
        OPT_STRING('x', "field-separator", &csv_sep, "separator",
                   "print counts with custom separator"),
+       OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
+                    "monitor event in cgroup name only",
+                    parse_cgroups),
        OPT_END()
 };
 
@@ -648,6 +672,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
 
        setlocale(LC_ALL, "");
 
+       evsel_list = perf_evlist__new(NULL, NULL);
+       if (evsel_list == NULL)
+               return -ENOMEM;
+
        argc = parse_options(argc, argv, options, stat_usage,
                PARSE_OPT_STOP_AT_NON_OPTION);
 
@@ -674,49 +702,50 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
        if (run_count <= 0)
                usage_with_options(stat_usage, options);
 
-       /* no_aggr is for system-wide only */
-       if (no_aggr && !system_wide)
+       /* no_aggr, cgroup are for system-wide only */
+       if ((no_aggr || nr_cgroups) && !system_wide) {
+               fprintf(stderr, "both cgroup and no-aggregation "
+                       "modes only available in system-wide mode\n");
+
                usage_with_options(stat_usage, options);
+       }
 
        /* Set attrs and nr_counters if no event is selected and !null_run */
-       if (!null_run && !nr_counters) {
+       if (!null_run && !evsel_list->nr_entries) {
                size_t c;
 
-               nr_counters = ARRAY_SIZE(default_attrs);
-
                for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
-                       pos = perf_evsel__new(&default_attrs[c],
-                                             nr_counters);
+                       pos = perf_evsel__new(&default_attrs[c], c);
                        if (pos == NULL)
                                goto out;
-                       list_add(&pos->node, &evsel_list);
+                       perf_evlist__add(evsel_list, pos);
                }
        }
 
        if (target_pid != -1)
                target_tid = target_pid;
 
-       threads = thread_map__new(target_pid, target_tid);
-       if (threads == NULL) {
+       evsel_list->threads = thread_map__new(target_pid, target_tid);
+       if (evsel_list->threads == NULL) {
                pr_err("Problems finding threads of monitor\n");
                usage_with_options(stat_usage, options);
        }
 
        if (system_wide)
-               cpus = cpu_map__new(cpu_list);
+               evsel_list->cpus = cpu_map__new(cpu_list);
        else
-               cpus = cpu_map__dummy_new();
+               evsel_list->cpus = cpu_map__dummy_new();
 
-       if (cpus == NULL) {
+       if (evsel_list->cpus == NULL) {
                perror("failed to parse CPUs map");
                usage_with_options(stat_usage, options);
                return -1;
        }
 
-       list_for_each_entry(pos, &evsel_list, node) {
+       list_for_each_entry(pos, &evsel_list->entries, node) {
                if (perf_evsel__alloc_stat_priv(pos) < 0 ||
-                   perf_evsel__alloc_counts(pos, cpus->nr) < 0 ||
-                   perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
+                   perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 ||
+                   perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0)
                        goto out_free_fd;
        }
 
@@ -741,11 +770,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
        if (status != -1)
                print_stat(argc, argv);
 out_free_fd:
-       list_for_each_entry(pos, &evsel_list, node)
+       list_for_each_entry(pos, &evsel_list->entries, node)
                perf_evsel__free_stat_priv(pos);
-       perf_evsel_list__delete();
+       perf_evlist__delete_maps(evsel_list);
 out:
-       thread_map__delete(threads);
-       threads = NULL;
+       perf_evlist__delete(evsel_list);
        return status;
 }
index 5dcdba653d7021d4ed0e2b04ad53443b034833c7..1b2106c58f660c83a858636df0862c458d127eb8 100644 (file)
@@ -7,10 +7,11 @@
 
 #include "util/cache.h"
 #include "util/debug.h"
+#include "util/evlist.h"
 #include "util/parse-options.h"
-#include "util/session.h"
+#include "util/parse-events.h"
 #include "util/symbol.h"
-#include "util/thread.h"
+#include "util/thread_map.h"
 
 static long page_size;
 
@@ -238,14 +239,14 @@ out:
 #include "util/evsel.h"
 #include <sys/types.h>
 
-static int trace_event__id(const char *event_name)
+static int trace_event__id(const char *evname)
 {
        char *filename;
        int err = -1, fd;
 
        if (asprintf(&filename,
                     "/sys/kernel/debug/tracing/events/syscalls/%s/id",
-                    event_name) < 0)
+                    evname) < 0)
                return -1;
 
        fd = open(filename, O_RDONLY);
@@ -289,7 +290,7 @@ static int test__open_syscall_event(void)
                goto out_thread_map_delete;
        }
 
-       if (perf_evsel__open_per_thread(evsel, threads) < 0) {
+       if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) {
                pr_debug("failed to open counter: %s, "
                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
                         strerror(errno));
@@ -347,9 +348,9 @@ static int test__open_syscall_event_on_all_cpus(void)
        }
 
        cpus = cpu_map__new(NULL);
-       if (threads == NULL) {
-               pr_debug("thread_map__new\n");
-               return -1;
+       if (cpus == NULL) {
+               pr_debug("cpu_map__new\n");
+               goto out_thread_map_delete;
        }
 
 
@@ -364,7 +365,7 @@ static int test__open_syscall_event_on_all_cpus(void)
                goto out_thread_map_delete;
        }
 
-       if (perf_evsel__open(evsel, cpus, threads) < 0) {
+       if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) {
                pr_debug("failed to open counter: %s, "
                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
                         strerror(errno));
@@ -408,6 +409,8 @@ static int test__open_syscall_event_on_all_cpus(void)
                goto out_close_fd;
        }
 
+       err = 0;
+
        for (cpu = 0; cpu < cpus->nr; ++cpu) {
                unsigned int expected;
 
@@ -416,18 +419,18 @@ static int test__open_syscall_event_on_all_cpus(void)
 
                if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
                        pr_debug("perf_evsel__open_read_on_cpu\n");
-                       goto out_close_fd;
+                       err = -1;
+                       break;
                }
 
                expected = nr_open_calls + cpu;
                if (evsel->counts->cpu[cpu].val != expected) {
                        pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
                                 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
-                       goto out_close_fd;
+                       err = -1;
                }
        }
 
-       err = 0;
 out_close_fd:
        perf_evsel__close_fd(evsel, 1, threads->nr);
 out_evsel_delete:
@@ -437,6 +440,159 @@ out_thread_map_delete:
        return err;
 }
 
+/*
+ * This test will generate random numbers of calls to some getpid syscalls,
+ * then establish an mmap for a group of events that are created to monitor
+ * the syscalls.
+ *
+ * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
+ * sample.id field to map back to its respective perf_evsel instance.
+ *
+ * Then it checks if the number of syscalls reported as perf events by
+ * the kernel corresponds to the number of syscalls made.
+ */
+static int test__basic_mmap(void)
+{
+       int err = -1;
+       union perf_event *event;
+       struct thread_map *threads;
+       struct cpu_map *cpus;
+       struct perf_evlist *evlist;
+       struct perf_event_attr attr = {
+               .type           = PERF_TYPE_TRACEPOINT,
+               .read_format    = PERF_FORMAT_ID,
+               .sample_type    = PERF_SAMPLE_ID,
+               .watermark      = 0,
+       };
+       cpu_set_t cpu_set;
+       const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
+                                       "getpgid", };
+       pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
+                                     (void*)getpgid };
+#define nsyscalls ARRAY_SIZE(syscall_names)
+       int ids[nsyscalls];
+       unsigned int nr_events[nsyscalls],
+                    expected_nr_events[nsyscalls], i, j;
+       struct perf_evsel *evsels[nsyscalls], *evsel;
+
+       for (i = 0; i < nsyscalls; ++i) {
+               char name[64];
+
+               snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
+               ids[i] = trace_event__id(name);
+               if (ids[i] < 0) {
+                       pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
+                       return -1;
+               }
+               nr_events[i] = 0;
+               expected_nr_events[i] = random() % 257;
+       }
+
+       threads = thread_map__new(-1, getpid());
+       if (threads == NULL) {
+               pr_debug("thread_map__new\n");
+               return -1;
+       }
+
+       cpus = cpu_map__new(NULL);
+       if (cpus == NULL) {
+               pr_debug("cpu_map__new\n");
+               goto out_free_threads;
+       }
+
+       CPU_ZERO(&cpu_set);
+       CPU_SET(cpus->map[0], &cpu_set);
+       sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
+       if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
+               pr_debug("sched_setaffinity() failed on CPU %d: %s ",
+                        cpus->map[0], strerror(errno));
+               goto out_free_cpus;
+       }
+
+       evlist = perf_evlist__new(cpus, threads);
+       if (evlist == NULL) {
+               pr_debug("perf_evlist__new\n");
+               goto out_free_cpus;
+       }
+
+       /* anonymous union fields, can't be initialized above */
+       attr.wakeup_events = 1;
+       attr.sample_period = 1;
+
+       for (i = 0; i < nsyscalls; ++i) {
+               attr.config = ids[i];
+               evsels[i] = perf_evsel__new(&attr, i);
+               if (evsels[i] == NULL) {
+                       pr_debug("perf_evsel__new\n");
+                       goto out_free_evlist;
+               }
+
+               perf_evlist__add(evlist, evsels[i]);
+
+               if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) {
+                       pr_debug("failed to open counter: %s, "
+                                "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+                                strerror(errno));
+                       goto out_close_fd;
+               }
+       }
+
+       if (perf_evlist__mmap(evlist, 128, true) < 0) {
+               pr_debug("failed to mmap events: %d (%s)\n", errno,
+                        strerror(errno));
+               goto out_close_fd;
+       }
+
+       for (i = 0; i < nsyscalls; ++i)
+               for (j = 0; j < expected_nr_events[i]; ++j) {
+                       int foo = syscalls[i]();
+                       ++foo;
+               }
+
+       while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) {
+               struct perf_sample sample;
+
+               if (event->header.type != PERF_RECORD_SAMPLE) {
+                       pr_debug("unexpected %s event\n",
+                                perf_event__name(event->header.type));
+                       goto out_munmap;
+               }
+
+               perf_event__parse_sample(event, attr.sample_type, false, &sample);
+               evsel = perf_evlist__id2evsel(evlist, sample.id);
+               if (evsel == NULL) {
+                       pr_debug("event with id %" PRIu64
+                                " doesn't map to an evsel\n", sample.id);
+                       goto out_munmap;
+               }
+               nr_events[evsel->idx]++;
+       }
+
+       list_for_each_entry(evsel, &evlist->entries, node) {
+               if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
+                       pr_debug("expected %d %s events, got %d\n",
+                                expected_nr_events[evsel->idx],
+                                event_name(evsel), nr_events[evsel->idx]);
+                       goto out_munmap;
+               }
+       }
+
+       err = 0;
+out_munmap:
+       perf_evlist__munmap(evlist);
+out_close_fd:
+       for (i = 0; i < nsyscalls; ++i)
+               perf_evsel__close_fd(evsels[i], 1, threads->nr);
+out_free_evlist:
+       perf_evlist__delete(evlist);
+out_free_cpus:
+       cpu_map__delete(cpus);
+out_free_threads:
+       thread_map__delete(threads);
+       return err;
+#undef nsyscalls
+}
+
 static struct test {
        const char *desc;
        int (*func)(void);
@@ -453,6 +609,10 @@ static struct test {
                .desc = "detect open syscall event on all cpus",
                .func = test__open_syscall_event_on_all_cpus,
        },
+       {
+               .desc = "read samples using the mmap interface",
+               .func = test__basic_mmap,
+       },
        {
                .func = NULL,
        },
index 746cf03cb05d86a2796c88fca27930ca6e0a8894..67c0459dc325276dbf690889f07f580632f46ab5 100644 (file)
@@ -264,9 +264,6 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
                c->start_time = start;
        if (p->start_time == 0 || p->start_time > start)
                p->start_time = start;
-
-       if (cpu > numcpus)
-               numcpus = cpu;
 }
 
 #define MAX_CPUS 4096
@@ -276,21 +273,24 @@ static int cpus_cstate_state[MAX_CPUS];
 static u64 cpus_pstate_start_times[MAX_CPUS];
 static u64 cpus_pstate_state[MAX_CPUS];
 
-static int process_comm_event(event_t *event, struct sample_data *sample __used,
+static int process_comm_event(union perf_event *event,
+                             struct perf_sample *sample __used,
                              struct perf_session *session __used)
 {
        pid_set_comm(event->comm.tid, event->comm.comm);
        return 0;
 }
 
-static int process_fork_event(event_t *event, struct sample_data *sample __used,
+static int process_fork_event(union perf_event *event,
+                             struct perf_sample *sample __used,
                              struct perf_session *session __used)
 {
        pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
        return 0;
 }
 
-static int process_exit_event(event_t *event, struct sample_data *sample __used,
+static int process_exit_event(union perf_event *event,
+                             struct perf_sample *sample __used,
                              struct perf_session *session __used)
 {
        pid_exit(event->fork.pid, event->fork.time);
@@ -486,8 +486,8 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
 }
 
 
-static int process_sample_event(event_t *event __used,
-                               struct sample_data *sample,
+static int process_sample_event(union perf_event *event __used,
+                               struct perf_sample *sample,
                                struct perf_session *session)
 {
        struct trace_entry *te;
@@ -511,6 +511,9 @@ static int process_sample_event(event_t *event __used,
                if (!event_str)
                        return 0;
 
+               if (sample->cpu > numcpus)
+                       numcpus = sample->cpu;
+
                if (strcmp(event_str, "power:cpu_idle") == 0) {
                        struct power_processor_entry *ppe = (void *)te;
                        if (ppe->state == (u32)PWR_EVENT_EXIT)
index 5a29d9cd948621a9e357c27a156baa3bff8b5556..80c9e062bd5b6e067c882d0f32cc3b2346ef2c8a 100644 (file)
 
 #include "perf.h"
 
+#include "util/annotate.h"
+#include "util/cache.h"
 #include "util/color.h"
+#include "util/evlist.h"
 #include "util/evsel.h"
 #include "util/session.h"
 #include "util/symbol.h"
 #include "util/thread.h"
+#include "util/thread_map.h"
+#include "util/top.h"
 #include "util/util.h"
 #include <linux/rbtree.h>
 #include "util/parse-options.h"
@@ -45,7 +50,6 @@
 #include <errno.h>
 #include <time.h>
 #include <sched.h>
-#include <pthread.h>
 
 #include <sys/syscall.h>
 #include <sys/ioctl.h>
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 
+static struct perf_top top = {
+       .count_filter           = 5,
+       .delay_secs             = 2,
+       .display_weighted       = -1,
+       .target_pid             = -1,
+       .target_tid             = -1,
+       .active_symbols         = LIST_HEAD_INIT(top.active_symbols),
+       .active_symbols_lock    = PTHREAD_MUTEX_INITIALIZER,
+       .active_symbols_cond    = PTHREAD_COND_INITIALIZER,
+       .freq                   = 1000, /* 1 KHz */
+};
+
 static bool                    system_wide                     =  false;
 
-static int                     default_interval                =      0;
+static bool                    use_tui, use_stdio;
 
-static int                     count_filter                    =      5;
-static int                     print_entries;
+static int                     default_interval                =      0;
 
-static int                     target_pid                      =     -1;
-static int                     target_tid                      =     -1;
-static struct thread_map       *threads;
 static bool                    inherit                         =  false;
-static struct cpu_map          *cpus;
 static int                     realtime_prio                   =      0;
 static bool                    group                           =  false;
 static unsigned int            page_size;
-static unsigned int            mmap_pages                      =     16;
-static int                     freq                            =   1000; /* 1 KHz */
+static unsigned int            mmap_pages                      =    128;
 
-static int                     delay_secs                      =      2;
-static bool                    zero                            =  false;
 static bool                    dump_symtab                     =  false;
 
-static bool                    hide_kernel_symbols             =  false;
-static bool                    hide_user_symbols               =  false;
 static struct winsize          winsize;
 
-/*
- * Source
- */
-
-struct source_line {
-       u64                     eip;
-       unsigned long           count[MAX_COUNTERS];
-       char                    *line;
-       struct source_line      *next;
-};
-
 static const char              *sym_filter                     =   NULL;
-struct sym_entry               *sym_filter_entry               =   NULL;
 struct sym_entry               *sym_filter_entry_sched         =   NULL;
 static int                     sym_pcnt_filter                 =      5;
-static int                     sym_counter                     =      0;
-static struct perf_evsel       *sym_evsel                      =   NULL;
-static int                     display_weighted                =     -1;
-static const char              *cpu_list;
-
-/*
- * Symbols
- */
-
-struct sym_entry_source {
-       struct source_line      *source;
-       struct source_line      *lines;
-       struct source_line      **lines_tail;
-       pthread_mutex_t         lock;
-};
-
-struct sym_entry {
-       struct rb_node          rb_node;
-       struct list_head        node;
-       unsigned long           snap_count;
-       double                  weight;
-       int                     skip;
-       u16                     name_len;
-       u8                      origin;
-       struct map              *map;
-       struct sym_entry_source *src;
-       unsigned long           count[0];
-};
 
 /*
  * Source functions
  */
 
-static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
-{
-       return ((void *)self) + symbol_conf.priv_size;
-}
-
 void get_term_dimensions(struct winsize *ws)
 {
        char *s = getenv("LINES");
@@ -163,10 +124,10 @@ void get_term_dimensions(struct winsize *ws)
 
 static void update_print_entries(struct winsize *ws)
 {
-       print_entries = ws->ws_row;
+       top.print_entries = ws->ws_row;
 
-       if (print_entries > 9)
-               print_entries -= 9;
+       if (top.print_entries > 9)
+               top.print_entries -= 9;
 }
 
 static void sig_winch_handler(int sig __used)
@@ -178,12 +139,9 @@ static void sig_winch_handler(int sig __used)
 static int parse_source(struct sym_entry *syme)
 {
        struct symbol *sym;
-       struct sym_entry_source *source;
+       struct annotation *notes;
        struct map *map;
-       FILE *file;
-       char command[PATH_MAX*2];
-       const char *path;
-       u64 len;
+       int err = -1;
 
        if (!syme)
                return -1;
@@ -194,411 +152,137 @@ static int parse_source(struct sym_entry *syme)
        /*
         * We can't annotate with just /proc/kallsyms
         */
-       if (map->dso->origin == DSO__ORIG_KERNEL)
+       if (map->dso->origin == DSO__ORIG_KERNEL) {
+               pr_err("Can't annotate %s: No vmlinux file was found in the "
+                      "path\n", sym->name);
+               sleep(1);
                return -1;
-
-       if (syme->src == NULL) {
-               syme->src = zalloc(sizeof(*source));
-               if (syme->src == NULL)
-                       return -1;
-               pthread_mutex_init(&syme->src->lock, NULL);
        }
 
-       source = syme->src;
-
-       if (source->lines) {
-               pthread_mutex_lock(&source->lock);
+       notes = symbol__annotation(sym);
+       if (notes->src != NULL) {
+               pthread_mutex_lock(&notes->lock);
                goto out_assign;
        }
-       path = map->dso->long_name;
-
-       len = sym->end - sym->start;
-
-       sprintf(command,
-               "objdump --start-address=%#0*" PRIx64 " --stop-address=%#0*" PRIx64 " -dS %s",
-               BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
-               BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
-
-       file = popen(command, "r");
-       if (!file)
-               return -1;
-
-       pthread_mutex_lock(&source->lock);
-       source->lines_tail = &source->lines;
-       while (!feof(file)) {
-               struct source_line *src;
-               size_t dummy = 0;
-               char *c, *sep;
 
-               src = malloc(sizeof(struct source_line));
-               assert(src != NULL);
-               memset(src, 0, sizeof(struct source_line));
+       pthread_mutex_lock(&notes->lock);
 
-               if (getline(&src->line, &dummy, file) < 0)
-                       break;
-               if (!src->line)
-                       break;
-
-               c = strchr(src->line, '\n');
-               if (c)
-                       *c = 0;
-
-               src->next = NULL;
-               *source->lines_tail = src;
-               source->lines_tail = &src->next;
-
-               src->eip = strtoull(src->line, &sep, 16);
-               if (*sep == ':')
-                       src->eip = map__objdump_2ip(map, src->eip);
-               else /* this line has no ip info (e.g. source line) */
-                       src->eip = 0;
+       if (symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
+               pthread_mutex_unlock(&notes->lock);
+               pr_err("Not enough memory for annotating '%s' symbol!\n",
+                      sym->name);
+               sleep(1);
+               return err;
        }
-       pclose(file);
+
+       err = symbol__annotate(sym, syme->map, 0);
+       if (err == 0) {
 out_assign:
-       sym_filter_entry = syme;
-       pthread_mutex_unlock(&source->lock);
-       return 0;
+               top.sym_filter_entry = syme;
+       }
+
+       pthread_mutex_unlock(&notes->lock);
+       return err;
 }
 
 static void __zero_source_counters(struct sym_entry *syme)
 {
-       int i;
-       struct source_line *line;
-
-       line = syme->src->lines;
-       while (line) {
-               for (i = 0; i < nr_counters; i++)
-                       line->count[i] = 0;
-               line = line->next;
-       }
+       struct symbol *sym = sym_entry__symbol(syme);
+       symbol__annotate_zero_histograms(sym);
 }
 
 static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
 {
-       struct source_line *line;
-
-       if (syme != sym_filter_entry)
-               return;
+       struct annotation *notes;
+       struct symbol *sym;
 
-       if (pthread_mutex_trylock(&syme->src->lock))
+       if (syme != top.sym_filter_entry)
                return;
 
-       if (syme->src == NULL || syme->src->source == NULL)
-               goto out_unlock;
-
-       for (line = syme->src->lines; line; line = line->next) {
-               /* skip lines without IP info */
-               if (line->eip == 0)
-                       continue;
-               if (line->eip == ip) {
-                       line->count[counter]++;
-                       break;
-               }
-               if (line->eip > ip)
-                       break;
-       }
-out_unlock:
-       pthread_mutex_unlock(&syme->src->lock);
-}
-
-#define PATTERN_LEN            (BITS_PER_LONG / 4 + 2)
-
-static void lookup_sym_source(struct sym_entry *syme)
-{
-       struct symbol *symbol = sym_entry__symbol(syme);
-       struct source_line *line;
-       char pattern[PATTERN_LEN + 1];
-
-       sprintf(pattern, "%0*" PRIx64 " <", BITS_PER_LONG / 4,
-               map__rip_2objdump(syme->map, symbol->start));
-
-       pthread_mutex_lock(&syme->src->lock);
-       for (line = syme->src->lines; line; line = line->next) {
-               if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
-                       syme->src->source = line;
-                       break;
-               }
-       }
-       pthread_mutex_unlock(&syme->src->lock);
-}
+       sym = sym_entry__symbol(syme);
+       notes = symbol__annotation(sym);
 
-static void show_lines(struct source_line *queue, int count, int total)
-{
-       int i;
-       struct source_line *line;
+       if (pthread_mutex_trylock(&notes->lock))
+               return;
 
-       line = queue;
-       for (i = 0; i < count; i++) {
-               float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;
+       ip = syme->map->map_ip(syme->map, ip);
+       symbol__inc_addr_samples(sym, syme->map, counter, ip);
 
-               printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
-               line = line->next;
-       }
+       pthread_mutex_unlock(&notes->lock);
 }
 
-#define TRACE_COUNT     3
-
 static void show_details(struct sym_entry *syme)
 {
+       struct annotation *notes;
        struct symbol *symbol;
-       struct source_line *line;
-       struct source_line *line_queue = NULL;
-       int displayed = 0;
-       int line_queue_count = 0, total = 0, more = 0;
+       int more;
 
        if (!syme)
                return;
 
-       if (!syme->src->source)
-               lookup_sym_source(syme);
-
-       if (!syme->src->source)
-               return;
-
        symbol = sym_entry__symbol(syme);
-       printf("Showing %s for %s\n", event_name(sym_evsel), symbol->name);
-       printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);
-
-       pthread_mutex_lock(&syme->src->lock);
-       line = syme->src->source;
-       while (line) {
-               total += line->count[sym_counter];
-               line = line->next;
-       }
-
-       line = syme->src->source;
-       while (line) {
-               float pcnt = 0.0;
-
-               if (!line_queue_count)
-                       line_queue = line;
-               line_queue_count++;
-
-               if (line->count[sym_counter])
-                       pcnt = 100.0 * line->count[sym_counter] / (float)total;
-               if (pcnt >= (float)sym_pcnt_filter) {
-                       if (displayed <= print_entries)
-                               show_lines(line_queue, line_queue_count, total);
-                       else more++;
-                       displayed += line_queue_count;
-                       line_queue_count = 0;
-                       line_queue = NULL;
-               } else if (line_queue_count > TRACE_COUNT) {
-                       line_queue = line_queue->next;
-                       line_queue_count--;
-               }
-
-               line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
-               line = line->next;
-       }
-       pthread_mutex_unlock(&syme->src->lock);
-       if (more)
-               printf("%d lines not displayed, maybe increase display entries [e]\n", more);
-}
+       notes = symbol__annotation(symbol);
 
-/*
- * Symbols will be added here in event__process_sample and will get out
- * after decayed.
- */
-static LIST_HEAD(active_symbols);
-static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * Ordering weight: count-1 * count-2 * ... / count-n
- */
-static double sym_weight(const struct sym_entry *sym)
-{
-       double weight = sym->snap_count;
-       int counter;
-
-       if (!display_weighted)
-               return weight;
+       pthread_mutex_lock(&notes->lock);
 
-       for (counter = 1; counter < nr_counters-1; counter++)
-               weight *= sym->count[counter];
+       if (notes->src == NULL)
+               goto out_unlock;
 
-       weight /= (sym->count[counter] + 1);
+       printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name);
+       printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);
 
-       return weight;
+       more = symbol__annotate_printf(symbol, syme->map, top.sym_evsel->idx,
+                                      0, sym_pcnt_filter, top.print_entries, 4);
+       if (top.zero)
+               symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx);
+       else
+               symbol__annotate_decay_histogram(symbol, top.sym_evsel->idx);
+       if (more != 0)
+               printf("%d lines not displayed, maybe increase display entries [e]\n", more);
+out_unlock:
+       pthread_mutex_unlock(&notes->lock);
 }
 
-static long                    samples;
-static long                    kernel_samples, us_samples;
-static long                    exact_samples;
-static long                    guest_us_samples, guest_kernel_samples;
 static const char              CONSOLE_CLEAR[] = "\e[H\e[2J";
 
 static void __list_insert_active_sym(struct sym_entry *syme)
 {
-       list_add(&syme->node, &active_symbols);
-}
-
-static void list_remove_active_sym(struct sym_entry *syme)
-{
-       pthread_mutex_lock(&active_symbols_lock);
-       list_del_init(&syme->node);
-       pthread_mutex_unlock(&active_symbols_lock);
+       list_add(&syme->node, &top.active_symbols);
 }
 
-static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
+static void print_sym_table(struct perf_session *session)
 {
-       struct rb_node **p = &tree->rb_node;
-       struct rb_node *parent = NULL;
-       struct sym_entry *iter;
-
-       while (*p != NULL) {
-               parent = *p;
-               iter = rb_entry(parent, struct sym_entry, rb_node);
-
-               if (se->weight > iter->weight)
-                       p = &(*p)->rb_left;
-               else
-                       p = &(*p)->rb_right;
-       }
-
-       rb_link_node(&se->rb_node, parent, p);
-       rb_insert_color(&se->rb_node, tree);
-}
-
-static void print_sym_table(void)
-{
-       int printed = 0, j;
-       struct perf_evsel *counter;
-       int snap = !display_weighted ? sym_counter : 0;
-       float samples_per_sec = samples/delay_secs;
-       float ksamples_per_sec = kernel_samples/delay_secs;
-       float us_samples_per_sec = (us_samples)/delay_secs;
-       float guest_kernel_samples_per_sec = (guest_kernel_samples)/delay_secs;
-       float guest_us_samples_per_sec = (guest_us_samples)/delay_secs;
-       float esamples_percent = (100.0*exact_samples)/samples;
-       float sum_ksamples = 0.0;
-       struct sym_entry *syme, *n;
-       struct rb_root tmp = RB_ROOT;
+       char bf[160];
+       int printed = 0;
        struct rb_node *nd;
-       int sym_width = 0, dso_width = 0, dso_short_width = 0;
+       struct sym_entry *syme;
+       struct rb_root tmp = RB_ROOT;
        const int win_width = winsize.ws_col - 1;
-
-       samples = us_samples = kernel_samples = exact_samples = 0;
-       guest_kernel_samples = guest_us_samples = 0;
-
-       /* Sort the active symbols */
-       pthread_mutex_lock(&active_symbols_lock);
-       syme = list_entry(active_symbols.next, struct sym_entry, node);
-       pthread_mutex_unlock(&active_symbols_lock);
-
-       list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
-               syme->snap_count = syme->count[snap];
-               if (syme->snap_count != 0) {
-
-                       if ((hide_user_symbols &&
-                            syme->origin == PERF_RECORD_MISC_USER) ||
-                           (hide_kernel_symbols &&
-                            syme->origin == PERF_RECORD_MISC_KERNEL)) {
-                               list_remove_active_sym(syme);
-                               continue;
-                       }
-                       syme->weight = sym_weight(syme);
-                       rb_insert_active_sym(&tmp, syme);
-                       sum_ksamples += syme->snap_count;
-
-                       for (j = 0; j < nr_counters; j++)
-                               syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
-               } else
-                       list_remove_active_sym(syme);
-       }
+       int sym_width, dso_width, dso_short_width;
+       float sum_ksamples = perf_top__decay_samples(&top, &tmp);
 
        puts(CONSOLE_CLEAR);
 
-       printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
-       if (!perf_guest) {
-               printf("   PerfTop:%8.0f irqs/sec  kernel:%4.1f%%"
-                       "  exact: %4.1f%% [",
-                       samples_per_sec,
-                       100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
-                                        samples_per_sec)),
-                       esamples_percent);
-       } else {
-               printf("   PerfTop:%8.0f irqs/sec  kernel:%4.1f%% us:%4.1f%%"
-                       " guest kernel:%4.1f%% guest us:%4.1f%%"
-                       " exact: %4.1f%% [",
-                       samples_per_sec,
-                       100.0 - (100.0 * ((samples_per_sec-ksamples_per_sec) /
-                                         samples_per_sec)),
-                       100.0 - (100.0 * ((samples_per_sec-us_samples_per_sec) /
-                                         samples_per_sec)),
-                       100.0 - (100.0 * ((samples_per_sec -
-                                               guest_kernel_samples_per_sec) /
-                                         samples_per_sec)),
-                       100.0 - (100.0 * ((samples_per_sec -
-                                          guest_us_samples_per_sec) /
-                                         samples_per_sec)),
-                       esamples_percent);
-       }
-
-       if (nr_counters == 1 || !display_weighted) {
-               struct perf_evsel *first;
-               first = list_entry(evsel_list.next, struct perf_evsel, node);
-               printf("%" PRIu64, (uint64_t)first->attr.sample_period);
-               if (freq)
-                       printf("Hz ");
-               else
-                       printf(" ");
-       }
-
-       if (!display_weighted)
-               printf("%s", event_name(sym_evsel));
-       else list_for_each_entry(counter, &evsel_list, node) {
-               if (counter->idx)
-                       printf("/");
-
-               printf("%s", event_name(counter));
-       }
+       perf_top__header_snprintf(&top, bf, sizeof(bf));
+       printf("%s\n", bf);
 
-       printf( "], ");
-
-       if (target_pid != -1)
-               printf(" (target_pid: %d", target_pid);
-       else if (target_tid != -1)
-               printf(" (target_tid: %d", target_tid);
-       else
-               printf(" (all");
-
-       if (cpu_list)
-               printf(", CPU%s: %s)\n", cpus->nr > 1 ? "s" : "", cpu_list);
-       else {
-               if (target_tid != -1)
-                       printf(")\n");
-               else
-                       printf(", %d CPU%s)\n", cpus->nr, cpus->nr > 1 ? "s" : "");
-       }
+       perf_top__reset_sample_counters(&top);
 
        printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
 
-       if (sym_filter_entry) {
-               show_details(sym_filter_entry);
-               return;
+       if (session->hists.stats.total_lost != 0) {
+               color_fprintf(stdout, PERF_COLOR_RED, "WARNING:");
+               printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n",
+                      session->hists.stats.total_lost);
        }
 
-       /*
-        * Find the longest symbol name that will be displayed
-        */
-       for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
-               syme = rb_entry(nd, struct sym_entry, rb_node);
-               if (++printed > print_entries ||
-                   (int)syme->snap_count < count_filter)
-                       continue;
-
-               if (syme->map->dso->long_name_len > dso_width)
-                       dso_width = syme->map->dso->long_name_len;
-
-               if (syme->map->dso->short_name_len > dso_short_width)
-                       dso_short_width = syme->map->dso->short_name_len;
-
-               if (syme->name_len > sym_width)
-                       sym_width = syme->name_len;
+       if (top.sym_filter_entry) {
+               show_details(top.sym_filter_entry);
+               return;
        }
 
-       printed = 0;
+       perf_top__find_widths(&top, &tmp, &dso_width, &dso_short_width,
+                             &sym_width);
 
        if (sym_width + dso_width > winsize.ws_col - 29) {
                dso_width = dso_short_width;
@@ -606,7 +290,7 @@ static void print_sym_table(void)
                        sym_width = winsize.ws_col - dso_width - 29;
        }
        putchar('\n');
-       if (nr_counters == 1)
+       if (top.evlist->nr_entries == 1)
                printf("             samples  pcnt");
        else
                printf("   weight    samples  pcnt");
@@ -615,7 +299,7 @@ static void print_sym_table(void)
                printf("         RIP       ");
        printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
        printf("   %s    _______ _____",
-              nr_counters == 1 ? "      " : "______");
+              top.evlist->nr_entries == 1 ? "      " : "______");
        if (verbose)
                printf(" ________________");
        printf(" %-*.*s", sym_width, sym_width, graph_line);
@@ -628,13 +312,14 @@ static void print_sym_table(void)
 
                syme = rb_entry(nd, struct sym_entry, rb_node);
                sym = sym_entry__symbol(syme);
-               if (++printed > print_entries || (int)syme->snap_count < count_filter)
+               if (++printed > top.print_entries ||
+                   (int)syme->snap_count < top.count_filter)
                        continue;
 
                pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
                                         sum_ksamples));
 
-               if (nr_counters == 1 || !display_weighted)
+               if (top.evlist->nr_entries == 1 || !top.display_weighted)
                        printf("%20.2f ", syme->weight);
                else
                        printf("%9.1f %10ld ", syme->weight, syme->snap_count);
@@ -693,10 +378,8 @@ static void prompt_symbol(struct sym_entry **target, const char *msg)
 
        /* zero counters of active symbol */
        if (syme) {
-               pthread_mutex_lock(&syme->src->lock);
                __zero_source_counters(syme);
                *target = NULL;
-               pthread_mutex_unlock(&syme->src->lock);
        }
 
        fprintf(stdout, "\n%s: ", msg);
@@ -707,11 +390,11 @@ static void prompt_symbol(struct sym_entry **target, const char *msg)
        if (p)
                *p = 0;
 
-       pthread_mutex_lock(&active_symbols_lock);
-       syme = list_entry(active_symbols.next, struct sym_entry, node);
-       pthread_mutex_unlock(&active_symbols_lock);
+       pthread_mutex_lock(&top.active_symbols_lock);
+       syme = list_entry(top.active_symbols.next, struct sym_entry, node);
+       pthread_mutex_unlock(&top.active_symbols_lock);
 
-       list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
+       list_for_each_entry_safe_from(syme, n, &top.active_symbols, node) {
                struct symbol *sym = sym_entry__symbol(syme);
 
                if (!strcmp(buf, sym->name)) {
@@ -735,34 +418,34 @@ static void print_mapped_keys(void)
 {
        char *name = NULL;
 
-       if (sym_filter_entry) {
-               struct symbol *sym = sym_entry__symbol(sym_filter_entry);
+       if (top.sym_filter_entry) {
+               struct symbol *sym = sym_entry__symbol(top.sym_filter_entry);
                name = sym->name;
        }
 
        fprintf(stdout, "\nMapped keys:\n");
-       fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", delay_secs);
-       fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", print_entries);
+       fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", top.delay_secs);
+       fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top.print_entries);
 
-       if (nr_counters > 1)
-               fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(sym_evsel));
+       if (top.evlist->nr_entries > 1)
+               fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(top.sym_evsel));
 
-       fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", count_filter);
+       fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top.count_filter);
 
        fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
        fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
        fprintf(stdout, "\t[S]     stop annotation.\n");
 
-       if (nr_counters > 1)
-               fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
+       if (top.evlist->nr_entries > 1)
+               fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", top.display_weighted ? 1 : 0);
 
        fprintf(stdout,
                "\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
-               hide_kernel_symbols ? "yes" : "no");
+               top.hide_kernel_symbols ? "yes" : "no");
        fprintf(stdout,
                "\t[U]     hide user symbols.               \t(%s)\n",
-               hide_user_symbols ? "yes" : "no");
-       fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", zero ? 1 : 0);
+               top.hide_user_symbols ? "yes" : "no");
+       fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", top.zero ? 1 : 0);
        fprintf(stdout, "\t[qQ]    quit.\n");
 }
 
@@ -783,7 +466,7 @@ static int key_mapped(int c)
                        return 1;
                case 'E':
                case 'w':
-                       return nr_counters > 1 ? 1 : 0;
+                       return top.evlist->nr_entries > 1 ? 1 : 0;
                default:
                        break;
        }
@@ -818,47 +501,47 @@ static void handle_keypress(struct perf_session *session, int c)
 
        switch (c) {
                case 'd':
-                       prompt_integer(&delay_secs, "Enter display delay");
-                       if (delay_secs < 1)
-                               delay_secs = 1;
+                       prompt_integer(&top.delay_secs, "Enter display delay");
+                       if (top.delay_secs < 1)
+                               top.delay_secs = 1;
                        break;
                case 'e':
-                       prompt_integer(&print_entries, "Enter display entries (lines)");
-                       if (print_entries == 0) {
+                       prompt_integer(&top.print_entries, "Enter display entries (lines)");
+                       if (top.print_entries == 0) {
                                sig_winch_handler(SIGWINCH);
                                signal(SIGWINCH, sig_winch_handler);
                        } else
                                signal(SIGWINCH, SIG_DFL);
                        break;
                case 'E':
-                       if (nr_counters > 1) {
+                       if (top.evlist->nr_entries > 1) {
                                fprintf(stderr, "\nAvailable events:");
 
-                               list_for_each_entry(sym_evsel, &evsel_list, node)
-                                       fprintf(stderr, "\n\t%d %s", sym_evsel->idx, event_name(sym_evsel));
+                               list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
+                                       fprintf(stderr, "\n\t%d %s", top.sym_evsel->idx, event_name(top.sym_evsel));
 
-                               prompt_integer(&sym_counter, "Enter details event counter");
+                               prompt_integer(&top.sym_counter, "Enter details event counter");
 
-                               if (sym_counter >= nr_counters) {
-                                       sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
-                                       sym_counter = 0;
-                                       fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(sym_evsel));
+                               if (top.sym_counter >= top.evlist->nr_entries) {
+                                       top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
+                                       top.sym_counter = 0;
+                                       fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top.sym_evsel));
                                        sleep(1);
                                        break;
                                }
-                               list_for_each_entry(sym_evsel, &evsel_list, node)
-                                       if (sym_evsel->idx == sym_counter)
+                               list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
+                                       if (top.sym_evsel->idx == top.sym_counter)
                                                break;
-                       } else sym_counter = 0;
+                       } else top.sym_counter = 0;
                        break;
                case 'f':
-                       prompt_integer(&count_filter, "Enter display event count filter");
+                       prompt_integer(&top.count_filter, "Enter display event count filter");
                        break;
                case 'F':
                        prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
                        break;
                case 'K':
-                       hide_kernel_symbols = !hide_kernel_symbols;
+                       top.hide_kernel_symbols = !top.hide_kernel_symbols;
                        break;
                case 'q':
                case 'Q':
@@ -867,34 +550,50 @@ static void handle_keypress(struct perf_session *session, int c)
                                perf_session__fprintf_dsos(session, stderr);
                        exit(0);
                case 's':
-                       prompt_symbol(&sym_filter_entry, "Enter details symbol");
+                       prompt_symbol(&top.sym_filter_entry, "Enter details symbol");
                        break;
                case 'S':
-                       if (!sym_filter_entry)
+                       if (!top.sym_filter_entry)
                                break;
                        else {
-                               struct sym_entry *syme = sym_filter_entry;
+                               struct sym_entry *syme = top.sym_filter_entry;
 
-                               pthread_mutex_lock(&syme->src->lock);
-                               sym_filter_entry = NULL;
+                               top.sym_filter_entry = NULL;
                                __zero_source_counters(syme);
-                               pthread_mutex_unlock(&syme->src->lock);
                        }
                        break;
                case 'U':
-                       hide_user_symbols = !hide_user_symbols;
+                       top.hide_user_symbols = !top.hide_user_symbols;
                        break;
                case 'w':
-                       display_weighted = ~display_weighted;
+                       top.display_weighted = ~top.display_weighted;
                        break;
                case 'z':
-                       zero = !zero;
+                       top.zero = !top.zero;
                        break;
                default:
                        break;
        }
 }
 
+static void *display_thread_tui(void *arg __used)
+{
+       int err = 0;
+       pthread_mutex_lock(&top.active_symbols_lock);
+       while (list_empty(&top.active_symbols)) {
+               err = pthread_cond_wait(&top.active_symbols_cond,
+                                       &top.active_symbols_lock);
+               if (err)
+                       break;
+       }
+       pthread_mutex_unlock(&top.active_symbols_lock);
+       if (!err)
+               perf_top__tui_browser(&top);
+       exit_browser(0);
+       exit(0);
+       return NULL;
+}
+
 static void *display_thread(void *arg __used)
 {
        struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
@@ -909,13 +608,13 @@ static void *display_thread(void *arg __used)
        tc.c_cc[VTIME] = 0;
 
 repeat:
-       delay_msecs = delay_secs * 1000;
+       delay_msecs = top.delay_secs * 1000;
        tcsetattr(0, TCSANOW, &tc);
        /* trash return*/
        getc(stdin);
 
        do {
-               print_sym_table();
+               print_sym_table(session);
        } while (!poll(&stdin_poll, 1, delay_msecs) == 1);
 
        c = getc(stdin);
@@ -930,6 +629,7 @@ repeat:
 /* Tag samples to be skipped. */
 static const char *skip_symbols[] = {
        "default_idle",
+       "native_safe_halt",
        "cpu_idle",
        "enter_idle",
        "exit_idle",
@@ -965,9 +665,9 @@ static int symbol_filter(struct map *map, struct symbol *sym)
 
        syme = symbol__priv(sym);
        syme->map = map;
-       syme->src = NULL;
+       symbol__annotate_init(map, sym);
 
-       if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
+       if (!top.sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
                /* schedule initial sym_filter_entry setup */
                sym_filter_entry_sched = syme;
                sym_filter = NULL;
@@ -980,44 +680,40 @@ static int symbol_filter(struct map *map, struct symbol *sym)
                }
        }
 
-       if (!syme->skip)
-               syme->name_len = strlen(sym->name);
-
        return 0;
 }
 
-static void event__process_sample(const event_t *self,
-                                 struct sample_data *sample,
-                                 struct perf_session *session,
-                                 struct perf_evsel *evsel)
+static void perf_event__process_sample(const union perf_event *event,
+                                      struct perf_sample *sample,
+                                      struct perf_session *session)
 {
-       u64 ip = self->ip.ip;
+       u64 ip = event->ip.ip;
        struct sym_entry *syme;
        struct addr_location al;
        struct machine *machine;
-       u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+       u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
-       ++samples;
+       ++top.samples;
 
        switch (origin) {
        case PERF_RECORD_MISC_USER:
-               ++us_samples;
-               if (hide_user_symbols)
+               ++top.us_samples;
+               if (top.hide_user_symbols)
                        return;
                machine = perf_session__find_host_machine(session);
                break;
        case PERF_RECORD_MISC_KERNEL:
-               ++kernel_samples;
-               if (hide_kernel_symbols)
+               ++top.kernel_samples;
+               if (top.hide_kernel_symbols)
                        return;
                machine = perf_session__find_host_machine(session);
                break;
        case PERF_RECORD_MISC_GUEST_KERNEL:
-               ++guest_kernel_samples;
-               machine = perf_session__find_machine(session, self->ip.pid);
+               ++top.guest_kernel_samples;
+               machine = perf_session__find_machine(session, event->ip.pid);
                break;
        case PERF_RECORD_MISC_GUEST_USER:
-               ++guest_us_samples;
+               ++top.guest_us_samples;
                /*
                 * TODO: we don't process guest user from host side
                 * except simple counting.
@@ -1029,15 +725,15 @@ static void event__process_sample(const event_t *self,
 
        if (!machine && perf_guest) {
                pr_err("Can't find guest [%d]'s kernel information\n",
-                       self->ip.pid);
+                       event->ip.pid);
                return;
        }
 
-       if (self->header.misc & PERF_RECORD_MISC_EXACT_IP)
-               exact_samples++;
+       if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
+               top.exact_samples++;
 
-       if (event__preprocess_sample(self, session, &al, sample,
-                                    symbol_filter) < 0 ||
+       if (perf_event__preprocess_sample(event, session, &al, sample,
+                                         symbol_filter) < 0 ||
            al.filtered)
                return;
 
@@ -1055,8 +751,9 @@ static void event__process_sample(const event_t *self,
                 */
                if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
                    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
-                       pr_err("The %s file can't be used\n",
-                              symbol_conf.vmlinux_name);
+                       ui__warning("The %s file can't be used\n",
+                                   symbol_conf.vmlinux_name);
+                       exit_browser(0);
                        exit(1);
                }
 
@@ -1065,13 +762,13 @@ static void event__process_sample(const event_t *self,
 
        /* let's see, whether we need to install initial sym_filter_entry */
        if (sym_filter_entry_sched) {
-               sym_filter_entry = sym_filter_entry_sched;
+               top.sym_filter_entry = sym_filter_entry_sched;
                sym_filter_entry_sched = NULL;
-               if (parse_source(sym_filter_entry) < 0) {
-                       struct symbol *sym = sym_entry__symbol(sym_filter_entry);
+               if (parse_source(top.sym_filter_entry) < 0) {
+                       struct symbol *sym = sym_entry__symbol(top.sym_filter_entry);
 
                        pr_err("Can't annotate %s", sym->name);
-                       if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
+                       if (top.sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
                                pr_err(": No vmlinux file was found in the path:\n");
                                machine__fprintf_vmlinux_path(machine, stderr);
                        } else
@@ -1082,166 +779,73 @@ static void event__process_sample(const event_t *self,
 
        syme = symbol__priv(al.sym);
        if (!syme->skip) {
-               syme->count[evsel->idx]++;
+               struct perf_evsel *evsel;
+
                syme->origin = origin;
+               evsel = perf_evlist__id2evsel(top.evlist, sample->id);
+               assert(evsel != NULL);
+               syme->count[evsel->idx]++;
                record_precise_ip(syme, evsel->idx, ip);
-               pthread_mutex_lock(&active_symbols_lock);
-               if (list_empty(&syme->node) || !syme->node.next)
+               pthread_mutex_lock(&top.active_symbols_lock);
+               if (list_empty(&syme->node) || !syme->node.next) {
+                       static bool first = true;
                        __list_insert_active_sym(syme);
-               pthread_mutex_unlock(&active_symbols_lock);
+                       if (first) {
+                               pthread_cond_broadcast(&top.active_symbols_cond);
+                               first = false;
+                       }
+               }
+               pthread_mutex_unlock(&top.active_symbols_lock);
        }
 }
 
-struct mmap_data {
-       void                    *base;
-       int                     mask;
-       unsigned int            prev;
-};
-
-static int perf_evsel__alloc_mmap_per_thread(struct perf_evsel *evsel,
-                                            int ncpus, int nthreads)
-{
-       evsel->priv = xyarray__new(ncpus, nthreads, sizeof(struct mmap_data));
-       return evsel->priv != NULL ? 0 : -ENOMEM;
-}
-
-static void perf_evsel__free_mmap(struct perf_evsel *evsel)
-{
-       xyarray__delete(evsel->priv);
-       evsel->priv = NULL;
-}
-
-static unsigned int mmap_read_head(struct mmap_data *md)
-{
-       struct perf_event_mmap_page *pc = md->base;
-       int head;
-
-       head = pc->data_head;
-       rmb();
-
-       return head;
-}
-
-static void perf_session__mmap_read_counter(struct perf_session *self,
-                                           struct perf_evsel *evsel,
-                                           int cpu, int thread_idx)
+static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu)
 {
-       struct xyarray *mmap_array = evsel->priv;
-       struct mmap_data *md = xyarray__entry(mmap_array, cpu, thread_idx);
-       unsigned int head = mmap_read_head(md);
-       unsigned int old = md->prev;
-       unsigned char *data = md->base + page_size;
-       struct sample_data sample;
-       int diff;
-
-       /*
-        * If we're further behind than half the buffer, there's a chance
-        * the writer will bite our tail and mess up the samples under us.
-        *
-        * If we somehow ended up ahead of the head, we got messed up.
-        *
-        * In either case, truncate and restart at head.
-        */
-       diff = head - old;
-       if (diff > md->mask / 2 || diff < 0) {
-               fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
-
-               /*
-                * head points to a known good entry, start there.
-                */
-               old = head;
-       }
-
-       for (; old != head;) {
-               event_t *event = (event_t *)&data[old & md->mask];
-
-               event_t event_copy;
+       struct perf_sample sample;
+       union perf_event *event;
 
-               size_t size = event->header.size;
+       while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) {
+               perf_session__parse_sample(self, event, &sample);
 
-               /*
-                * Event straddles the mmap boundary -- header should always
-                * be inside due to u64 alignment of output.
-                */
-               if ((old & md->mask) + size != ((old + size) & md->mask)) {
-                       unsigned int offset = old;
-                       unsigned int len = min(sizeof(*event), size), cpy;
-                       void *dst = &event_copy;
-
-                       do {
-                               cpy = min(md->mask + 1 - (offset & md->mask), len);
-                               memcpy(dst, &data[offset & md->mask], cpy);
-                               offset += cpy;
-                               dst += cpy;
-                               len -= cpy;
-                       } while (len);
-
-                       event = &event_copy;
-               }
-
-               event__parse_sample(event, self, &sample);
                if (event->header.type == PERF_RECORD_SAMPLE)
-                       event__process_sample(event, &sample, self, evsel);
+                       perf_event__process_sample(event, &sample, self);
                else
-                       event__process(event, &sample, self);
-               old += size;
+                       perf_event__process(event, &sample, self);
        }
-
-       md->prev = old;
 }
 
-static struct pollfd *event_array;
-
 static void perf_session__mmap_read(struct perf_session *self)
 {
-       struct perf_evsel *counter;
-       int i, thread_index;
-
-       for (i = 0; i < cpus->nr; i++) {
-               list_for_each_entry(counter, &evsel_list, node) {
-                       for (thread_index = 0;
-                               thread_index < threads->nr;
-                               thread_index++) {
-                               perf_session__mmap_read_counter(self,
-                                       counter, i, thread_index);
-                       }
-               }
-       }
-}
+       int i;
 
-int nr_poll;
-int group_fd;
+       for (i = 0; i < top.evlist->cpus->nr; i++)
+               perf_session__mmap_read_cpu(self, i);
+}
 
-static void start_counter(int i, struct perf_evsel *evsel)
+static void start_counters(struct perf_evlist *evlist)
 {
-       struct xyarray *mmap_array = evsel->priv;
-       struct mmap_data *mm;
-       struct perf_event_attr *attr;
-       int cpu = -1;
-       int thread_index;
-
-       if (target_tid == -1)
-               cpu = cpus->map[i];
+       struct perf_evsel *counter;
 
-       attr = &evsel->attr;
+       list_for_each_entry(counter, &evlist->entries, node) {
+               struct perf_event_attr *attr = &counter->attr;
 
-       attr->sample_type       = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+               attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
 
-       if (freq) {
-               attr->sample_type       |= PERF_SAMPLE_PERIOD;
-               attr->freq              = 1;
-               attr->sample_freq       = freq;
-       }
+               if (top.freq) {
+                       attr->sample_type |= PERF_SAMPLE_PERIOD;
+                       attr->freq        = 1;
+                       attr->sample_freq = top.freq;
+               }
 
-       attr->inherit           = (cpu < 0) && inherit;
-       attr->mmap              = 1;
+               if (evlist->nr_entries > 1) {
+                       attr->sample_type |= PERF_SAMPLE_ID;
+                       attr->read_format |= PERF_FORMAT_ID;
+               }
 
-       for (thread_index = 0; thread_index < threads->nr; thread_index++) {
+               attr->mmap = 1;
 try_again:
-               FD(evsel, i, thread_index) = sys_perf_event_open(attr,
-                               threads->map[thread_index], cpu, group_fd, 0);
-
-               if (FD(evsel, i, thread_index) < 0) {
+               if (perf_evsel__open(counter, top.evlist->cpus,
+                                    top.evlist->threads, group, inherit) < 0) {
                        int err = errno;
 
                        if (err == EPERM || err == EACCES)
@@ -1253,8 +857,8 @@ try_again:
                         * based cpu-clock-tick sw counter, which
                         * is always available even if no PMU support:
                         */
-                       if (attr->type == PERF_TYPE_HARDWARE
-                                       && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
+                       if (attr->type == PERF_TYPE_HARDWARE &&
+                           attr->config == PERF_COUNT_HW_CPU_CYCLES) {
 
                                if (verbose)
                                        warning(" ... trying to fall back to cpu-clock-ticks\n");
@@ -1264,39 +868,22 @@ try_again:
                                goto try_again;
                        }
                        printf("\n");
-                       error("sys_perf_event_open() syscall returned with %d (%s).  /bin/dmesg may provide additional information.\n",
-                                       FD(evsel, i, thread_index), strerror(err));
+                       error("sys_perf_event_open() syscall returned with %d "
+                             "(%s).  /bin/dmesg may provide additional information.\n",
+                             err, strerror(err));
                        die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
                        exit(-1);
                }
-               assert(FD(evsel, i, thread_index) >= 0);
-               fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK);
-
-               /*
-                * First counter acts as the group leader:
-                */
-               if (group && group_fd == -1)
-                       group_fd = FD(evsel, i, thread_index);
-
-               event_array[nr_poll].fd = FD(evsel, i, thread_index);
-               event_array[nr_poll].events = POLLIN;
-               nr_poll++;
-
-               mm = xyarray__entry(mmap_array, i, thread_index);
-               mm->prev = 0;
-               mm->mask = mmap_pages*page_size - 1;
-               mm->base = mmap(NULL, (mmap_pages+1)*page_size,
-                               PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0);
-               if (mm->base == MAP_FAILED)
-                       die("failed to mmap with %d (%s)\n", errno, strerror(errno));
        }
+
+       if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
+               die("failed to mmap with %d (%s)\n", errno, strerror(errno));
 }
 
 static int __cmd_top(void)
 {
        pthread_t thread;
-       struct perf_evsel *counter;
-       int i, ret;
+       int ret __used;
        /*
         * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
         * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
@@ -1305,23 +892,23 @@ static int __cmd_top(void)
        if (session == NULL)
                return -ENOMEM;
 
-       if (target_tid != -1)
-               event__synthesize_thread_map(threads, event__process, session);
+       if (top.target_tid != -1)
+               perf_event__synthesize_thread_map(top.evlist->threads,
+                                                 perf_event__process, session);
        else
-               event__synthesize_threads(event__process, session);
+               perf_event__synthesize_threads(perf_event__process, session);
 
-       for (i = 0; i < cpus->nr; i++) {
-               group_fd = -1;
-               list_for_each_entry(counter, &evsel_list, node)
-                       start_counter(i, counter);
-       }
+       start_counters(top.evlist);
+       session->evlist = top.evlist;
+       perf_session__update_sample_type(session);
 
        /* Wait for a minimal set of events before starting the snapshot */
-       poll(&event_array[0], nr_poll, 100);
+       poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
 
        perf_session__mmap_read(session);
 
-       if (pthread_create(&thread, NULL, display_thread, session)) {
+       if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
+                                                            display_thread), session)) {
                printf("Could not create display thread.\n");
                exit(-1);
        }
@@ -1337,12 +924,12 @@ static int __cmd_top(void)
        }
 
        while (1) {
-               int hits = samples;
+               u64 hits = top.samples;
 
                perf_session__mmap_read(session);
 
-               if (hits == samples)
-                       ret = poll(event_array, nr_poll, 100);
+               if (hits == top.samples)
+                       ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
        }
 
        return 0;
@@ -1354,31 +941,31 @@ static const char * const top_usage[] = {
 };
 
 static const struct option options[] = {
-       OPT_CALLBACK('e', "event", NULL, "event",
+       OPT_CALLBACK('e', "event", &top.evlist, "event",
                     "event selector. use 'perf list' to list available events",
                     parse_events),
        OPT_INTEGER('c', "count", &default_interval,
                    "event period to sample"),
-       OPT_INTEGER('p', "pid", &target_pid,
+       OPT_INTEGER('p', "pid", &top.target_pid,
                    "profile events on existing process id"),
-       OPT_INTEGER('t', "tid", &target_tid,
+       OPT_INTEGER('t', "tid", &top.target_tid,
                    "profile events on existing thread id"),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                            "system-wide collection from all CPUs"),
-       OPT_STRING('C', "cpu", &cpu_list, "cpu",
+       OPT_STRING('C', "cpu", &top.cpu_list, "cpu",
                    "list of cpus to monitor"),
        OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
                   "file", "vmlinux pathname"),
-       OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
+       OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
                    "hide kernel symbols"),
        OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
        OPT_INTEGER('r', "realtime", &realtime_prio,
                    "collect data with this RT SCHED_FIFO priority"),
-       OPT_INTEGER('d', "delay", &delay_secs,
+       OPT_INTEGER('d', "delay", &top.delay_secs,
                    "number of seconds to delay between refreshes"),
        OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
                            "dump the symbol table used for profiling"),
-       OPT_INTEGER('f', "count-filter", &count_filter,
+       OPT_INTEGER('f', "count-filter", &top.count_filter,
                    "only display functions with more events than this"),
        OPT_BOOLEAN('g', "group", &group,
                            "put the counters into a counter group"),
@@ -1386,14 +973,16 @@ static const struct option options[] = {
                    "child tasks inherit counters"),
        OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
                    "symbol to annotate"),
-       OPT_BOOLEAN('z', "zero", &zero,
+       OPT_BOOLEAN('z', "zero", &top.zero,
                    "zero history across updates"),
-       OPT_INTEGER('F', "freq", &freq,
+       OPT_INTEGER('F', "freq", &top.freq,
                    "profile at this frequency"),
-       OPT_INTEGER('E', "entries", &print_entries,
+       OPT_INTEGER('E', "entries", &top.print_entries,
                    "display this many functions"),
-       OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
+       OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
                    "hide user symbols"),
+       OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
+       OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show counter open errors, etc)"),
        OPT_END()
@@ -1404,64 +993,68 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
        struct perf_evsel *pos;
        int status = -ENOMEM;
 
+       top.evlist = perf_evlist__new(NULL, NULL);
+       if (top.evlist == NULL)
+               return -ENOMEM;
+
        page_size = sysconf(_SC_PAGE_SIZE);
 
        argc = parse_options(argc, argv, options, top_usage, 0);
        if (argc)
                usage_with_options(top_usage, options);
 
-       if (target_pid != -1)
-               target_tid = target_pid;
+       /*
+        * XXX For now start disabled, only using TUI if explicitely asked for.
+        * Change that when handle_keys equivalent gets written, live annotation
+        * done, etc.
+        */
+       use_browser = 0;
 
-       threads = thread_map__new(target_pid, target_tid);
-       if (threads == NULL) {
-               pr_err("Problems finding threads of monitor\n");
-               usage_with_options(top_usage, options);
-       }
+       if (use_stdio)
+               use_browser = 0;
+       else if (use_tui)
+               use_browser = 1;
 
-       event_array = malloc((sizeof(struct pollfd) *
-                             MAX_NR_CPUS * MAX_COUNTERS * threads->nr));
-       if (!event_array)
-               return -ENOMEM;
+       setup_browser(false);
 
        /* CPU and PID are mutually exclusive */
-       if (target_tid > 0 && cpu_list) {
+       if (top.target_tid > 0 && top.cpu_list) {
                printf("WARNING: PID switch overriding CPU\n");
                sleep(1);
-               cpu_list = NULL;
+               top.cpu_list = NULL;
        }
 
-       if (!nr_counters && perf_evsel_list__create_default() < 0) {
+       if (top.target_pid != -1)
+               top.target_tid = top.target_pid;
+
+       if (perf_evlist__create_maps(top.evlist, top.target_pid,
+                                    top.target_tid, top.cpu_list) < 0)
+               usage_with_options(top_usage, options);
+
+       if (!top.evlist->nr_entries &&
+           perf_evlist__add_default(top.evlist) < 0) {
                pr_err("Not enough memory for event selector list\n");
                return -ENOMEM;
        }
 
-       if (delay_secs < 1)
-               delay_secs = 1;
+       if (top.delay_secs < 1)
+               top.delay_secs = 1;
 
        /*
         * User specified count overrides default frequency.
         */
        if (default_interval)
-               freq = 0;
-       else if (freq) {
-               default_interval = freq;
+               top.freq = 0;
+       else if (top.freq) {
+               default_interval = top.freq;
        } else {
                fprintf(stderr, "frequency and count are zero, aborting\n");
                exit(EXIT_FAILURE);
        }
 
-       if (target_tid != -1)
-               cpus = cpu_map__dummy_new();
-       else
-               cpus = cpu_map__new(cpu_list);
-
-       if (cpus == NULL)
-               usage_with_options(top_usage, options);
-
-       list_for_each_entry(pos, &evsel_list, node) {
-               if (perf_evsel__alloc_mmap_per_thread(pos, cpus->nr, threads->nr) < 0 ||
-                   perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
+       list_for_each_entry(pos, &top.evlist->entries, node) {
+               if (perf_evsel__alloc_fd(pos, top.evlist->cpus->nr,
+                                        top.evlist->threads->nr) < 0)
                        goto out_free_fd;
                /*
                 * Fill in the ones not specifically initialized via -c:
@@ -1472,26 +1065,28 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
                pos->attr.sample_period = default_interval;
        }
 
-       sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
+       if (perf_evlist__alloc_pollfd(top.evlist) < 0 ||
+           perf_evlist__alloc_mmap(top.evlist) < 0)
+               goto out_free_fd;
+
+       top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
 
-       symbol_conf.priv_size = (sizeof(struct sym_entry) +
-                                (nr_counters + 1) * sizeof(unsigned long));
+       symbol_conf.priv_size = (sizeof(struct sym_entry) + sizeof(struct annotation) +
+                                (top.evlist->nr_entries + 1) * sizeof(unsigned long));
 
        symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
        if (symbol__init() < 0)
                return -1;
 
        get_term_dimensions(&winsize);
-       if (print_entries == 0) {
+       if (top.print_entries == 0) {
                update_print_entries(&winsize);
                signal(SIGWINCH, sig_winch_handler);
        }
 
        status = __cmd_top();
 out_free_fd:
-       list_for_each_entry(pos, &evsel_list, node)
-               perf_evsel__free_mmap(pos);
-       perf_evsel_list__delete();
+       perf_evlist__delete(top.evlist);
 
        return status;
 }
index 95aaf565c704fb6ea67cee78d49e177f0ba8f595..a5fc660c1f1286512b299342ad3841509a080194 100644 (file)
@@ -94,6 +94,32 @@ void get_term_dimensions(struct winsize *ws);
 #include "util/types.h"
 #include <stdbool.h>
 
+struct perf_mmap {
+       void                    *base;
+       int                     mask;
+       unsigned int            prev;
+};
+
+static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
+{
+       struct perf_event_mmap_page *pc = mm->base;
+       int head = pc->data_head;
+       rmb();
+       return head;
+}
+
+static inline void perf_mmap__write_tail(struct perf_mmap *md,
+                                        unsigned long tail)
+{
+       struct perf_event_mmap_page *pc = md->base;
+
+       /*
+        * ensure all reads are done before we write the tail out.
+        */
+       /* mb(); */
+       pc->data_tail = tail;
+}
+
 /*
  * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
  * counters in the current task.
diff --git a/tools/perf/python/twatch.py b/tools/perf/python/twatch.py
new file mode 100755 (executable)
index 0000000..df638c4
--- /dev/null
@@ -0,0 +1,41 @@
+#! /usr/bin/python
+# -*- python -*-
+# -*- coding: utf-8 -*-
+#   twatch - Experimental use of the perf python interface
+#   Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
+#
+#   This application is free software; you can redistribute it and/or
+#   modify it under the terms of the GNU General Public License
+#   as published by the Free Software Foundation; version 2.
+#
+#   This application is distributed in the hope that it will be useful,
+#   but WITHOUT ANY WARRANTY; without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details.
+
+import perf
+
+def main():
+       cpus = perf.cpu_map()
+       threads = perf.thread_map()
+       evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
+                          wakeup_events = 1, sample_period = 1,
+                          sample_id_all = 1,
+                          sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
+       evsel.open(cpus = cpus, threads = threads);
+       evlist = perf.evlist(cpus, threads)
+       evlist.add(evsel)
+       evlist.mmap()
+       while True:
+               evlist.poll(timeout = -1)
+               for cpu in cpus:
+                       event = evlist.read_on_cpu(cpu)
+                       if not event:
+                               continue
+                       print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
+                                                               event.sample_pid,
+                                                               event.sample_tid),
+                       print event
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
new file mode 100644 (file)
index 0000000..0d0830c
--- /dev/null
@@ -0,0 +1,605 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-annotate.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "util.h"
+#include "build-id.h"
+#include "color.h"
+#include "cache.h"
+#include "symbol.h"
+#include "debug.h"
+#include "annotate.h"
+#include <pthread.h>
+
+int symbol__annotate_init(struct map *map __used, struct symbol *sym)
+{
+       struct annotation *notes = symbol__annotation(sym);
+       pthread_mutex_init(&notes->lock, NULL);
+       return 0;
+}
+
+int symbol__alloc_hist(struct symbol *sym, int nevents)
+{
+       struct annotation *notes = symbol__annotation(sym);
+       size_t sizeof_sym_hist = (sizeof(struct sym_hist) +
+                                 (sym->end - sym->start) * sizeof(u64));
+
+       notes->src = zalloc(sizeof(*notes->src) + nevents * sizeof_sym_hist);
+       if (notes->src == NULL)
+               return -1;
+       notes->src->sizeof_sym_hist = sizeof_sym_hist;
+       notes->src->nr_histograms   = nevents;
+       INIT_LIST_HEAD(&notes->src->source);
+       return 0;
+}
+
+void symbol__annotate_zero_histograms(struct symbol *sym)
+{
+       struct annotation *notes = symbol__annotation(sym);
+
+       pthread_mutex_lock(&notes->lock);
+       if (notes->src != NULL)
+               memset(notes->src->histograms, 0,
+                      notes->src->nr_histograms * notes->src->sizeof_sym_hist);
+       pthread_mutex_unlock(&notes->lock);
+}
+
+int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+                            int evidx, u64 addr)
+{
+       unsigned offset;
+       struct annotation *notes;
+       struct sym_hist *h;
+
+       notes = symbol__annotation(sym);
+       if (notes->src == NULL)
+               return -ENOMEM;
+
+       pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
+
+       if (addr >= sym->end)
+               return 0;
+
+       offset = addr - sym->start;
+       h = annotation__histogram(notes, evidx);
+       h->sum++;
+       h->addr[offset]++;
+
+       pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
+                 ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name,
+                 addr, addr - sym->start, evidx, h->addr[offset]);
+       return 0;
+}
+
+static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
+{
+       struct objdump_line *self = malloc(sizeof(*self) + privsize);
+
+       if (self != NULL) {
+               self->offset = offset;
+               self->line = line;
+       }
+
+       return self;
+}
+
+void objdump_line__free(struct objdump_line *self)
+{
+       free(self->line);
+       free(self);
+}
+
+static void objdump__add_line(struct list_head *head, struct objdump_line *line)
+{
+       list_add_tail(&line->node, head);
+}
+
+struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
+                                              struct objdump_line *pos)
+{
+       list_for_each_entry_continue(pos, head, node)
+               if (pos->offset >= 0)
+                       return pos;
+
+       return NULL;
+}
+
+static int objdump_line__print(struct objdump_line *oline, struct symbol *sym,
+                              int evidx, u64 len, int min_pcnt,
+                              int printed, int max_lines,
+                              struct objdump_line *queue)
+{
+       static const char *prev_line;
+       static const char *prev_color;
+
+       if (oline->offset != -1) {
+               const char *path = NULL;
+               unsigned int hits = 0;
+               double percent = 0.0;
+               const char *color;
+               struct annotation *notes = symbol__annotation(sym);
+               struct source_line *src_line = notes->src->lines;
+               struct sym_hist *h = annotation__histogram(notes, evidx);
+               s64 offset = oline->offset;
+               struct objdump_line *next;
+
+               next = objdump__get_next_ip_line(&notes->src->source, oline);
+
+               while (offset < (s64)len &&
+                      (next == NULL || offset < next->offset)) {
+                       if (src_line) {
+                               if (path == NULL)
+                                       path = src_line[offset].path;
+                               percent += src_line[offset].percent;
+                       } else
+                               hits += h->addr[offset];
+
+                       ++offset;
+               }
+
+               if (src_line == NULL && h->sum)
+                       percent = 100.0 * hits / h->sum;
+
+               if (percent < min_pcnt)
+                       return -1;
+
+               if (max_lines && printed >= max_lines)
+                       return 1;
+
+               if (queue != NULL) {
+                       list_for_each_entry_from(queue, &notes->src->source, node) {
+                               if (queue == oline)
+                                       break;
+                               objdump_line__print(queue, sym, evidx, len,
+                                                   0, 0, 1, NULL);
+                       }
+               }
+
+               color = get_percent_color(percent);
+
+               /*
+                * Also color the filename and line if needed, with
+                * the same color than the percentage. Don't print it
+                * twice for close colored addr with the same filename:line
+                */
+               if (path) {
+                       if (!prev_line || strcmp(prev_line, path)
+                                      || color != prev_color) {
+                               color_fprintf(stdout, color, " %s", path);
+                               prev_line = path;
+                               prev_color = color;
+                       }
+               }
+
+               color_fprintf(stdout, color, " %7.2f", percent);
+               printf(" :      ");
+               color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", oline->line);
+       } else if (max_lines && printed >= max_lines)
+               return 1;
+       else {
+               if (queue)
+                       return -1;
+
+               if (!*oline->line)
+                       printf("         :\n");
+               else
+                       printf("         :      %s\n", oline->line);
+       }
+
+       return 0;
+}
+
+static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
+                                     FILE *file, size_t privsize)
+{
+       struct annotation *notes = symbol__annotation(sym);
+       struct objdump_line *objdump_line;
+       char *line = NULL, *tmp, *tmp2, *c;
+       size_t line_len;
+       s64 line_ip, offset = -1;
+
+       if (getline(&line, &line_len, file) < 0)
+               return -1;
+
+       if (!line)
+               return -1;
+
+       while (line_len != 0 && isspace(line[line_len - 1]))
+               line[--line_len] = '\0';
+
+       c = strchr(line, '\n');
+       if (c)
+               *c = 0;
+
+       line_ip = -1;
+
+       /*
+        * Strip leading spaces:
+        */
+       tmp = line;
+       while (*tmp) {
+               if (*tmp != ' ')
+                       break;
+               tmp++;
+       }
+
+       if (*tmp) {
+               /*
+                * Parse hexa addresses followed by ':'
+                */
+               line_ip = strtoull(tmp, &tmp2, 16);
+               if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
+                       line_ip = -1;
+       }
+
+       if (line_ip != -1) {
+               u64 start = map__rip_2objdump(map, sym->start),
+                   end = map__rip_2objdump(map, sym->end);
+
+               offset = line_ip - start;
+               if (offset < 0 || (u64)line_ip > end)
+                       offset = -1;
+       }
+
+       objdump_line = objdump_line__new(offset, line, privsize);
+       if (objdump_line == NULL) {
+               free(line);
+               return -1;
+       }
+       objdump__add_line(&notes->src->source, objdump_line);
+
+       return 0;
+}
+
+int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
+{
+       struct dso *dso = map->dso;
+       char *filename = dso__build_id_filename(dso, NULL, 0);
+       bool free_filename = true;
+       char command[PATH_MAX * 2];
+       FILE *file;
+       int err = 0;
+       char symfs_filename[PATH_MAX];
+
+       if (filename) {
+               snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
+                        symbol_conf.symfs, filename);
+       }
+
+       if (filename == NULL) {
+               if (dso->has_build_id) {
+                       pr_err("Can't annotate %s: not enough memory\n",
+                              sym->name);
+                       return -ENOMEM;
+               }
+               goto fallback;
+       } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
+                  strstr(command, "[kernel.kallsyms]") ||
+                  access(symfs_filename, R_OK)) {
+               free(filename);
+fallback:
+               /*
+                * If we don't have build-ids or the build-id file isn't in the
+                * cache, or is just a kallsyms file, well, lets hope that this
+                * DSO is the same as when 'perf record' ran.
+                */
+               filename = dso->long_name;
+               snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
+                        symbol_conf.symfs, filename);
+               free_filename = false;
+       }
+
+       if (dso->origin == DSO__ORIG_KERNEL) {
+               char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
+               char *build_id_msg = NULL;
+
+               if (dso->annotate_warned)
+                       goto out_free_filename;
+
+               if (dso->has_build_id) {
+                       build_id__sprintf(dso->build_id,
+                                         sizeof(dso->build_id), bf + 15);
+                       build_id_msg = bf;
+               }
+               err = -ENOENT;
+               dso->annotate_warned = 1;
+               pr_err("Can't annotate %s: No vmlinux file%s was found in the "
+                      "path.\nPlease use 'perf buildid-cache -av vmlinux' or "
+                      "--vmlinux vmlinux.\n",
+                      sym->name, build_id_msg ?: "");
+               goto out_free_filename;
+       }
+
+       pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
+                filename, sym->name, map->unmap_ip(map, sym->start),
+                map->unmap_ip(map, sym->end));
+
+       pr_debug("annotating [%p] %30s : [%p] %30s\n",
+                dso, dso->long_name, sym, sym->name);
+
+       snprintf(command, sizeof(command),
+                "objdump --start-address=0x%016" PRIx64
+                " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
+                map__rip_2objdump(map, sym->start),
+                map__rip_2objdump(map, sym->end),
+                symfs_filename, filename);
+
+       pr_debug("Executing: %s\n", command);
+
+       file = popen(command, "r");
+       if (!file)
+               goto out_free_filename;
+
+       while (!feof(file))
+               if (symbol__parse_objdump_line(sym, map, file, privsize) < 0)
+                       break;
+
+       pclose(file);
+out_free_filename:
+       if (free_filename)
+               free(filename);
+       return err;
+}
+
+static void insert_source_line(struct rb_root *root, struct source_line *src_line)
+{
+       struct source_line *iter;
+       struct rb_node **p = &root->rb_node;
+       struct rb_node *parent = NULL;
+
+       while (*p != NULL) {
+               parent = *p;
+               iter = rb_entry(parent, struct source_line, node);
+
+               if (src_line->percent > iter->percent)
+                       p = &(*p)->rb_left;
+               else
+                       p = &(*p)->rb_right;
+       }
+
+       rb_link_node(&src_line->node, parent, p);
+       rb_insert_color(&src_line->node, root);
+}
+
+static void symbol__free_source_line(struct symbol *sym, int len)
+{
+       struct annotation *notes = symbol__annotation(sym);
+       struct source_line *src_line = notes->src->lines;
+       int i;
+
+       for (i = 0; i < len; i++)
+               free(src_line[i].path);
+
+       free(src_line);
+       notes->src->lines = NULL;
+}
+
+/* Get the filename:line for the colored entries */
+static int symbol__get_source_line(struct symbol *sym, struct map *map,
+                                  int evidx, struct rb_root *root, int len,
+                                  const char *filename)
+{
+       u64 start;
+       int i;
+       char cmd[PATH_MAX * 2];
+       struct source_line *src_line;
+       struct annotation *notes = symbol__annotation(sym);
+       struct sym_hist *h = annotation__histogram(notes, evidx);
+
+       if (!h->sum)
+               return 0;
+
+       src_line = notes->src->lines = calloc(len, sizeof(struct source_line));
+       if (!notes->src->lines)
+               return -1;
+
+       start = map->unmap_ip(map, sym->start);
+
+       for (i = 0; i < len; i++) {
+               char *path = NULL;
+               size_t line_len;
+               u64 offset;
+               FILE *fp;
+
+               src_line[i].percent = 100.0 * h->addr[i] / h->sum;
+               if (src_line[i].percent <= 0.5)
+                       continue;
+
+               offset = start + i;
+               sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
+               fp = popen(cmd, "r");
+               if (!fp)
+                       continue;
+
+               if (getline(&path, &line_len, fp) < 0 || !line_len)
+                       goto next;
+
+               src_line[i].path = malloc(sizeof(char) * line_len + 1);
+               if (!src_line[i].path)
+                       goto next;
+
+               strcpy(src_line[i].path, path);
+               insert_source_line(root, &src_line[i]);
+
+       next:
+               pclose(fp);
+       }
+
+       return 0;
+}
+
+static void print_summary(struct rb_root *root, const char *filename)
+{
+       struct source_line *src_line;
+       struct rb_node *node;
+
+       printf("\nSorted summary for file %s\n", filename);
+       printf("----------------------------------------------\n\n");
+
+       if (RB_EMPTY_ROOT(root)) {
+               printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
+               return;
+       }
+
+       node = rb_first(root);
+       while (node) {
+               double percent;
+               const char *color;
+               char *path;
+
+               src_line = rb_entry(node, struct source_line, node);
+               percent = src_line->percent;
+               color = get_percent_color(percent);
+               path = src_line->path;
+
+               color_fprintf(stdout, color, " %7.2f %s", percent, path);
+               node = rb_next(node);
+       }
+}
+
+static void symbol__annotate_hits(struct symbol *sym, int evidx)
+{
+       struct annotation *notes = symbol__annotation(sym);
+       struct sym_hist *h = annotation__histogram(notes, evidx);
+       u64 len = sym->end - sym->start, offset;
+
+       for (offset = 0; offset < len; ++offset)
+               if (h->addr[offset] != 0)
+                       printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
+                              sym->start + offset, h->addr[offset]);
+       printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+}
+
+int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
+                           bool full_paths, int min_pcnt, int max_lines,
+                           int context)
+{
+       struct dso *dso = map->dso;
+       const char *filename = dso->long_name, *d_filename;
+       struct annotation *notes = symbol__annotation(sym);
+       struct objdump_line *pos, *queue = NULL;
+       int printed = 2, queue_len = 0;
+       int more = 0;
+       u64 len;
+
+       if (full_paths)
+               d_filename = filename;
+       else
+               d_filename = basename(filename);
+
+       len = sym->end - sym->start;
+
+       printf(" Percent |      Source code & Disassembly of %s\n", d_filename);
+       printf("------------------------------------------------\n");
+
+       if (verbose)
+               symbol__annotate_hits(sym, evidx);
+
+       list_for_each_entry(pos, &notes->src->source, node) {
+               if (context && queue == NULL) {
+                       queue = pos;
+                       queue_len = 0;
+               }
+
+               switch (objdump_line__print(pos, sym, evidx, len, min_pcnt,
+                                           printed, max_lines, queue)) {
+               case 0:
+                       ++printed;
+                       if (context) {
+                               printed += queue_len;
+                               queue = NULL;
+                               queue_len = 0;
+                       }
+                       break;
+               case 1:
+                       /* filtered by max_lines */
+                       ++more;
+                       break;
+               case -1:
+               default:
+                       /*
+                        * Filtered by min_pcnt or non IP lines when
+                        * context != 0
+                        */
+                       if (!context)
+                               break;
+                       if (queue_len == context)
+                               queue = list_entry(queue->node.next, typeof(*queue), node);
+                       else
+                               ++queue_len;
+                       break;
+               }
+       }
+
+       return more;
+}
+
+void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
+{
+       struct annotation *notes = symbol__annotation(sym);
+       struct sym_hist *h = annotation__histogram(notes, evidx);
+
+       memset(h, 0, notes->src->sizeof_sym_hist);
+}
+
+void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
+{
+       struct annotation *notes = symbol__annotation(sym);
+       struct sym_hist *h = annotation__histogram(notes, evidx);
+       struct objdump_line *pos;
+       int len = sym->end - sym->start;
+
+       h->sum = 0;
+
+       list_for_each_entry(pos, &notes->src->source, node) {
+               if (pos->offset != -1 && pos->offset < len) {
+                       h->addr[pos->offset] = h->addr[pos->offset] * 7 / 8;
+                       h->sum += h->addr[pos->offset];
+               }
+       }
+}
+
+void objdump_line_list__purge(struct list_head *head)
+{
+       struct objdump_line *pos, *n;
+
+       list_for_each_entry_safe(pos, n, head, node) {
+               list_del(&pos->node);
+               objdump_line__free(pos);
+       }
+}
+
+int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
+                        bool print_lines, bool full_paths, int min_pcnt,
+                        int max_lines)
+{
+       struct dso *dso = map->dso;
+       const char *filename = dso->long_name;
+       struct rb_root source_line = RB_ROOT;
+       u64 len;
+
+       if (symbol__annotate(sym, map, 0) < 0)
+               return -1;
+
+       len = sym->end - sym->start;
+
+       if (print_lines) {
+               symbol__get_source_line(sym, map, evidx, &source_line,
+                                       len, filename);
+               print_summary(&source_line, filename);
+       }
+
+       symbol__annotate_printf(sym, map, evidx, full_paths,
+                               min_pcnt, max_lines, 0);
+       if (print_lines)
+               symbol__free_source_line(sym, len);
+
+       objdump_line_list__purge(&symbol__annotation(sym)->src->source);
+
+       return 0;
+}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
new file mode 100644 (file)
index 0000000..c2c2868
--- /dev/null
@@ -0,0 +1,103 @@
+#ifndef __PERF_ANNOTATE_H
+#define __PERF_ANNOTATE_H
+
+#include <stdbool.h>
+#include "types.h"
+#include "symbol.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+struct objdump_line {
+       struct list_head node;
+       s64              offset;
+       char             *line;
+};
+
+void objdump_line__free(struct objdump_line *self);
+struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
+                                              struct objdump_line *pos);
+
+struct sym_hist {
+       u64             sum;
+       u64             addr[0];
+};
+
+struct source_line {
+       struct rb_node  node;
+       double          percent;
+       char            *path;
+};
+
+/** struct annotated_source - symbols with hits have this attached as in sannotation
+ *
+ * @histogram: Array of addr hit histograms per event being monitored
+ * @lines: If 'print_lines' is specified, per source code line percentages
+ * @source: source parsed from objdump -dS
+ *
+ * lines is allocated, percentages calculated and all sorted by percentage
+ * when the annotation is about to be presented, so the percentages are for
+ * one of the entries in the histogram array, i.e. for the event/counter being
+ * presented. It is deallocated right after symbol__{tui,tty,etc}_annotate
+ * returns.
+ */
+struct annotated_source {
+       struct list_head   source;
+       struct source_line *lines;
+       int                nr_histograms;
+       int                sizeof_sym_hist;
+       struct sym_hist    histograms[0];
+};
+
+struct annotation {
+       pthread_mutex_t         lock;
+       struct annotated_source *src;
+};
+
+struct sannotation {
+       struct annotation annotation;
+       struct symbol     symbol;
+};
+
+static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
+{
+       return (((void *)&notes->src->histograms) +
+               (notes->src->sizeof_sym_hist * idx));
+}
+
+static inline struct annotation *symbol__annotation(struct symbol *sym)
+{
+       struct sannotation *a = container_of(sym, struct sannotation, symbol);
+       return &a->annotation;
+}
+
+int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+                            int evidx, u64 addr);
+int symbol__alloc_hist(struct symbol *sym, int nevents);
+void symbol__annotate_zero_histograms(struct symbol *sym);
+
+int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
+int symbol__annotate_init(struct map *map __used, struct symbol *sym);
+int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
+                           bool full_paths, int min_pcnt, int max_lines,
+                           int context);
+void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
+void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
+void objdump_line_list__purge(struct list_head *head);
+
+int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
+                        bool print_lines, bool full_paths, int min_pcnt,
+                        int max_lines);
+
+#ifdef NO_NEWT_SUPPORT
+static inline int symbol__tui_annotate(struct symbol *sym __used,
+                                      struct map *map __used,
+                                      int evidx __used, int refresh __used)
+{
+       return 0;
+}
+#else
+int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
+                        int refresh);
+#endif
+
+#endif /* __PERF_ANNOTATE_H */
index deffb8c960716213124b7fd36f56edf41bf8207d..31f934af9861e69437c09b286735451a12bf78fd 100644 (file)
@@ -14,8 +14,8 @@
 #include <linux/kernel.h>
 #include "debug.h"
 
-static int build_id__mark_dso_hit(event_t *event,
-                                 struct sample_data *sample __used,
+static int build_id__mark_dso_hit(union perf_event *event,
+                                 struct perf_sample *sample __used,
                                  struct perf_session *session)
 {
        struct addr_location al;
@@ -37,13 +37,14 @@ static int build_id__mark_dso_hit(event_t *event,
        return 0;
 }
 
-static int event__exit_del_thread(event_t *self, struct sample_data *sample __used,
-                                 struct perf_session *session)
+static int perf_event__exit_del_thread(union perf_event *event,
+                                      struct perf_sample *sample __used,
+                                      struct perf_session *session)
 {
-       struct thread *thread = perf_session__findnew(session, self->fork.tid);
+       struct thread *thread = perf_session__findnew(session, event->fork.tid);
 
-       dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
-                   self->fork.ppid, self->fork.ptid);
+       dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
+                   event->fork.ppid, event->fork.ptid);
 
        if (thread) {
                rb_erase(&thread->rb_node, &session->threads);
@@ -56,9 +57,9 @@ static int event__exit_del_thread(event_t *self, struct sample_data *sample __us
 
 struct perf_event_ops build_id__mark_dso_hit_ops = {
        .sample = build_id__mark_dso_hit,
-       .mmap   = event__process_mmap,
-       .fork   = event__process_task,
-       .exit   = event__exit_del_thread,
+       .mmap   = perf_event__process_mmap,
+       .fork   = perf_event__process_task,
+       .exit   = perf_event__exit_del_thread,
 };
 
 char *dso__build_id_filename(struct dso *self, char *bf, size_t size)
index a7729797fd96254bc35326077337a71f919c19b5..fc5e5a09d5b94102b7adaed11518dd8be2c0eb84 100644 (file)
@@ -34,13 +34,14 @@ extern int pager_use_color;
 extern int use_browser;
 
 #ifdef NO_NEWT_SUPPORT
-static inline void setup_browser(void)
+static inline void setup_browser(bool fallback_to_pager)
 {
-       setup_pager();
+       if (fallback_to_pager)
+               setup_pager();
 }
 static inline void exit_browser(bool wait_for_ok __used) {}
 #else
-void setup_browser(void);
+void setup_browser(bool fallback_to_pager);
 void exit_browser(bool wait_for_ok);
 #endif
 
index e12d539417b2cc4644e2d5a919cfb8a23e8ae163..9f7106a8d9a48cb6f9600beac10bfa3a38bd9be5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010, Frederic Weisbecker <fweisbec@gmail.com>
+ * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
  *
  * Handle the callchains from the stream in an ad-hoc radix tree and then
  * sort them in an rbtree.
@@ -18,7 +18,8 @@
 #include "util.h"
 #include "callchain.h"
 
-bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
+bool ip_callchain__valid(struct ip_callchain *chain,
+                        const union perf_event *event)
 {
        unsigned int chain_size = event->header.size;
        chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
@@ -26,10 +27,10 @@ bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
 }
 
 #define chain_for_each_child(child, parent)    \
-       list_for_each_entry(child, &parent->children, brothers)
+       list_for_each_entry(child, &parent->children, siblings)
 
 #define chain_for_each_child_safe(child, next, parent) \
-       list_for_each_entry_safe(child, next, &parent->children, brothers)
+       list_for_each_entry_safe(child, next, &parent->children, siblings)
 
 static void
 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
@@ -38,14 +39,14 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
        struct rb_node **p = &root->rb_node;
        struct rb_node *parent = NULL;
        struct callchain_node *rnode;
-       u64 chain_cumul = cumul_hits(chain);
+       u64 chain_cumul = callchain_cumul_hits(chain);
 
        while (*p) {
                u64 rnode_cumul;
 
                parent = *p;
                rnode = rb_entry(parent, struct callchain_node, rb_node);
-               rnode_cumul = cumul_hits(rnode);
+               rnode_cumul = callchain_cumul_hits(rnode);
 
                switch (mode) {
                case CHAIN_FLAT:
@@ -104,7 +105,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node,
 
        chain_for_each_child(child, node) {
                __sort_chain_graph_abs(child, min_hit);
-               if (cumul_hits(child) >= min_hit)
+               if (callchain_cumul_hits(child) >= min_hit)
                        rb_insert_callchain(&node->rb_root, child,
                                            CHAIN_GRAPH_ABS);
        }
@@ -129,7 +130,7 @@ static void __sort_chain_graph_rel(struct callchain_node *node,
 
        chain_for_each_child(child, node) {
                __sort_chain_graph_rel(child, min_percent);
-               if (cumul_hits(child) >= min_hit)
+               if (callchain_cumul_hits(child) >= min_hit)
                        rb_insert_callchain(&node->rb_root, child,
                                            CHAIN_GRAPH_REL);
        }
@@ -143,7 +144,7 @@ sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
        rb_root->rb_node = chain_root->node.rb_root.rb_node;
 }
 
-int register_callchain_param(struct callchain_param *param)
+int callchain_register_param(struct callchain_param *param)
 {
        switch (param->mode) {
        case CHAIN_GRAPH_ABS:
@@ -189,32 +190,27 @@ create_child(struct callchain_node *parent, bool inherit_children)
                chain_for_each_child(next, new)
                        next->parent = new;
        }
-       list_add_tail(&new->brothers, &parent->children);
+       list_add_tail(&new->siblings, &parent->children);
 
        return new;
 }
 
 
-struct resolved_ip {
-       u64               ip;
-       struct map_symbol ms;
-};
-
-struct resolved_chain {
-       u64                     nr;
-       struct resolved_ip      ips[0];
-};
-
-
 /*
  * Fill the node with callchain values
  */
 static void
-fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
+fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
 {
-       unsigned int i;
+       struct callchain_cursor_node *cursor_node;
+
+       node->val_nr = cursor->nr - cursor->pos;
+       if (!node->val_nr)
+               pr_warning("Warning: empty node in callchain tree\n");
 
-       for (i = start; i < chain->nr; i++) {
+       cursor_node = callchain_cursor_current(cursor);
+
+       while (cursor_node) {
                struct callchain_list *call;
 
                call = zalloc(sizeof(*call));
@@ -222,23 +218,25 @@ fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
                        perror("not enough memory for the code path tree");
                        return;
                }
-               call->ip = chain->ips[i].ip;
-               call->ms = chain->ips[i].ms;
+               call->ip = cursor_node->ip;
+               call->ms.sym = cursor_node->sym;
+               call->ms.map = cursor_node->map;
                list_add_tail(&call->list, &node->val);
+
+               callchain_cursor_advance(cursor);
+               cursor_node = callchain_cursor_current(cursor);
        }
-       node->val_nr = chain->nr - start;
-       if (!node->val_nr)
-               pr_warning("Warning: empty node in callchain tree\n");
 }
 
 static void
-add_child(struct callchain_node *parent, struct resolved_chain *chain,
-         int start, u64 period)
+add_child(struct callchain_node *parent,
+         struct callchain_cursor *cursor,
+         u64 period)
 {
        struct callchain_node *new;
 
        new = create_child(parent, false);
-       fill_node(new, chain, start);
+       fill_node(new, cursor);
 
        new->children_hit = 0;
        new->hit = period;
@@ -250,9 +248,10 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain,
  * Then create another child to host the given callchain of new branch
  */
 static void
-split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
-               struct callchain_list *to_split, int idx_parents, int idx_local,
-               u64 period)
+split_add_child(struct callchain_node *parent,
+               struct callchain_cursor *cursor,
+               struct callchain_list *to_split,
+               u64 idx_parents, u64 idx_local, u64 period)
 {
        struct callchain_node *new;
        struct list_head *old_tail;
@@ -272,14 +271,14 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
        /* split the hits */
        new->hit = parent->hit;
        new->children_hit = parent->children_hit;
-       parent->children_hit = cumul_hits(new);
+       parent->children_hit = callchain_cumul_hits(new);
        new->val_nr = parent->val_nr - idx_local;
        parent->val_nr = idx_local;
 
        /* create a new child for the new branch if any */
-       if (idx_total < chain->nr) {
+       if (idx_total < cursor->nr) {
                parent->hit = 0;
-               add_child(parent, chain, idx_total, period);
+               add_child(parent, cursor, period);
                parent->children_hit += period;
        } else {
                parent->hit = period;
@@ -287,36 +286,41 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
 }
 
 static int
-append_chain(struct callchain_node *root, struct resolved_chain *chain,
-            unsigned int start, u64 period);
+append_chain(struct callchain_node *root,
+            struct callchain_cursor *cursor,
+            u64 period);
 
 static void
-append_chain_children(struct callchain_node *root, struct resolved_chain *chain,
-                     unsigned int start, u64 period)
+append_chain_children(struct callchain_node *root,
+                     struct callchain_cursor *cursor,
+                     u64 period)
 {
        struct callchain_node *rnode;
 
        /* lookup in childrens */
        chain_for_each_child(rnode, root) {
-               unsigned int ret = append_chain(rnode, chain, start, period);
+               unsigned int ret = append_chain(rnode, cursor, period);
 
                if (!ret)
                        goto inc_children_hit;
        }
        /* nothing in children, add to the current node */
-       add_child(root, chain, start, period);
+       add_child(root, cursor, period);
 
 inc_children_hit:
        root->children_hit += period;
 }
 
 static int
-append_chain(struct callchain_node *root, struct resolved_chain *chain,
-            unsigned int start, u64 period)
+append_chain(struct callchain_node *root,
+            struct callchain_cursor *cursor,
+            u64 period)
 {
+       struct callchain_cursor_node *curr_snap = cursor->curr;
        struct callchain_list *cnode;
-       unsigned int i = start;
+       u64 start = cursor->pos;
        bool found = false;
+       u64 matches;
 
        /*
         * Lookup in the current node
@@ -324,141 +328,134 @@ append_chain(struct callchain_node *root, struct resolved_chain *chain,
         * anywhere inside a function.
         */
        list_for_each_entry(cnode, &root->val, list) {
+               struct callchain_cursor_node *node;
                struct symbol *sym;
 
-               if (i == chain->nr)
+               node = callchain_cursor_current(cursor);
+               if (!node)
                        break;
 
-               sym = chain->ips[i].ms.sym;
+               sym = node->sym;
 
                if (cnode->ms.sym && sym) {
                        if (cnode->ms.sym->start != sym->start)
                                break;
-               } else if (cnode->ip != chain->ips[i].ip)
+               } else if (cnode->ip != node->ip)
                        break;
 
                if (!found)
                        found = true;
-               i++;
+
+               callchain_cursor_advance(cursor);
        }
 
        /* matches not, relay on the parent */
-       if (!found)
+       if (!found) {
+               cursor->curr = curr_snap;
+               cursor->pos = start;
                return -1;
+       }
+
+       matches = cursor->pos - start;
 
        /* we match only a part of the node. Split it and add the new chain */
-       if (i - start < root->val_nr) {
-               split_add_child(root, chain, cnode, start, i - start, period);
+       if (matches < root->val_nr) {
+               split_add_child(root, cursor, cnode, start, matches, period);
                return 0;
        }
 
        /* we match 100% of the path, increment the hit */
-       if (i - start == root->val_nr && i == chain->nr) {
+       if (matches == root->val_nr && cursor->pos == cursor->nr) {
                root->hit += period;
                return 0;
        }
 
        /* We match the node and still have a part remaining */
-       append_chain_children(root, chain, i, period);
+       append_chain_children(root, cursor, period);
 
        return 0;
 }
 
-static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
-                          struct map_symbol *syms)
-{
-       int i, j = 0;
-
-       for (i = 0; i < (int)old->nr; i++) {
-               if (old->ips[i] >= PERF_CONTEXT_MAX)
-                       continue;
-
-               new->ips[j].ip = old->ips[i];
-               new->ips[j].ms = syms[i];
-               j++;
-       }
-
-       new->nr = j;
-}
-
-
-int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
-                    struct map_symbol *syms, u64 period)
+int callchain_append(struct callchain_root *root,
+                    struct callchain_cursor *cursor,
+                    u64 period)
 {
-       struct resolved_chain *filtered;
-
-       if (!chain->nr)
+       if (!cursor->nr)
                return 0;
 
-       filtered = zalloc(sizeof(*filtered) +
-                         chain->nr * sizeof(struct resolved_ip));
-       if (!filtered)
-               return -ENOMEM;
-
-       filter_context(chain, filtered, syms);
-
-       if (!filtered->nr)
-               goto end;
+       callchain_cursor_commit(cursor);
 
-       append_chain_children(&root->node, filtered, 0, period);
+       append_chain_children(&root->node, cursor, period);
 
-       if (filtered->nr > root->max_depth)
-               root->max_depth = filtered->nr;
-end:
-       free(filtered);
+       if (cursor->nr > root->max_depth)
+               root->max_depth = cursor->nr;
 
        return 0;
 }
 
 static int
-merge_chain_branch(struct callchain_node *dst, struct callchain_node *src,
-                  struct resolved_chain *chain)
+merge_chain_branch(struct callchain_cursor *cursor,
+                  struct callchain_node *dst, struct callchain_node *src)
 {
+       struct callchain_cursor_node **old_last = cursor->last;
        struct callchain_node *child, *next_child;
        struct callchain_list *list, *next_list;
-       int old_pos = chain->nr;
+       int old_pos = cursor->nr;
        int err = 0;
 
        list_for_each_entry_safe(list, next_list, &src->val, list) {
-               chain->ips[chain->nr].ip = list->ip;
-               chain->ips[chain->nr].ms = list->ms;
-               chain->nr++;
+               callchain_cursor_append(cursor, list->ip,
+                                       list->ms.map, list->ms.sym);
                list_del(&list->list);
                free(list);
        }
 
-       if (src->hit)
-               append_chain_children(dst, chain, 0, src->hit);
+       if (src->hit) {
+               callchain_cursor_commit(cursor);
+               append_chain_children(dst, cursor, src->hit);
+       }
 
        chain_for_each_child_safe(child, next_child, src) {
-               err = merge_chain_branch(dst, child, chain);
+               err = merge_chain_branch(cursor, dst, child);
                if (err)
                        break;
 
-               list_del(&child->brothers);
+               list_del(&child->siblings);
                free(child);
        }
 
-       chain->nr = old_pos;
+       cursor->nr = old_pos;
+       cursor->last = old_last;
 
        return err;
 }
 
-int callchain_merge(struct callchain_root *dst, struct callchain_root *src)
+int callchain_merge(struct callchain_cursor *cursor,
+                   struct callchain_root *dst, struct callchain_root *src)
+{
+       return merge_chain_branch(cursor, &dst->node, &src->node);
+}
+
+int callchain_cursor_append(struct callchain_cursor *cursor,
+                           u64 ip, struct map *map, struct symbol *sym)
 {
-       struct resolved_chain *chain;
-       int err;
+       struct callchain_cursor_node *node = *cursor->last;
 
-       chain = malloc(sizeof(*chain) +
-                      src->max_depth * sizeof(struct resolved_ip));
-       if (!chain)
-               return -ENOMEM;
+       if (!node) {
+               node = calloc(sizeof(*node), 1);
+               if (!node)
+                       return -ENOMEM;
 
-       chain->nr = 0;
+               *cursor->last = node;
+       }
 
-       err = merge_chain_branch(&dst->node, &src->node, chain);
+       node->ip = ip;
+       node->map = map;
+       node->sym = sym;
 
-       free(chain);
+       cursor->nr++;
 
-       return err;
+       cursor->last = &node->next;
+
+       return 0;
 }
index c15fb8c24ad2b87388e97cd6346cfdebaac11dd5..1a79df9f739f8425ce2991dcc0cc7a2eeac982dc 100644 (file)
@@ -16,7 +16,7 @@ enum chain_mode {
 
 struct callchain_node {
        struct callchain_node   *parent;
-       struct list_head        brothers;
+       struct list_head        siblings;
        struct list_head        children;
        struct list_head        val;
        struct rb_node          rb_node; /* to sort nodes in an rbtree */
@@ -49,9 +49,30 @@ struct callchain_list {
        struct list_head        list;
 };
 
+/*
+ * A callchain cursor is a single linked list that
+ * let one feed a callchain progressively.
+ * It keeps persitent allocated entries to minimize
+ * allocations.
+ */
+struct callchain_cursor_node {
+       u64                             ip;
+       struct map                      *map;
+       struct symbol                   *sym;
+       struct callchain_cursor_node    *next;
+};
+
+struct callchain_cursor {
+       u64                             nr;
+       struct callchain_cursor_node    *first;
+       struct callchain_cursor_node    **last;
+       u64                             pos;
+       struct callchain_cursor_node    *curr;
+};
+
 static inline void callchain_init(struct callchain_root *root)
 {
-       INIT_LIST_HEAD(&root->node.brothers);
+       INIT_LIST_HEAD(&root->node.siblings);
        INIT_LIST_HEAD(&root->node.children);
        INIT_LIST_HEAD(&root->node.val);
 
@@ -61,15 +82,54 @@ static inline void callchain_init(struct callchain_root *root)
        root->max_depth = 0;
 }
 
-static inline u64 cumul_hits(struct callchain_node *node)
+static inline u64 callchain_cumul_hits(struct callchain_node *node)
 {
        return node->hit + node->children_hit;
 }
 
-int register_callchain_param(struct callchain_param *param);
-int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
-                    struct map_symbol *syms, u64 period);
-int callchain_merge(struct callchain_root *dst, struct callchain_root *src);
+int callchain_register_param(struct callchain_param *param);
+int callchain_append(struct callchain_root *root,
+                    struct callchain_cursor *cursor,
+                    u64 period);
+
+int callchain_merge(struct callchain_cursor *cursor,
+                   struct callchain_root *dst, struct callchain_root *src);
+
+bool ip_callchain__valid(struct ip_callchain *chain,
+                        const union perf_event *event);
+/*
+ * Initialize a cursor before adding entries inside, but keep
+ * the previously allocated entries as a cache.
+ */
+static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
+{
+       cursor->nr = 0;
+       cursor->last = &cursor->first;
+}
+
+int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
+                           struct map *map, struct symbol *sym);
 
-bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
+/* Close a cursor writing session. Initialize for the reader */
+static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
+{
+       cursor->curr = cursor->first;
+       cursor->pos = 0;
+}
+
+/* Cursor reading iteration helpers */
+static inline struct callchain_cursor_node *
+callchain_cursor_current(struct callchain_cursor *cursor)
+{
+       if (cursor->pos == cursor->nr)
+               return NULL;
+
+       return cursor->curr;
+}
+
+static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
+{
+       cursor->curr = cursor->curr->next;
+       cursor->pos++;
+}
 #endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
new file mode 100644 (file)
index 0000000..9fea755
--- /dev/null
@@ -0,0 +1,178 @@
+#include "util.h"
+#include "../perf.h"
+#include "parse-options.h"
+#include "evsel.h"
+#include "cgroup.h"
+#include "debugfs.h" /* MAX_PATH, STR() */
+#include "evlist.h"
+
+int nr_cgroups;
+
+static int
+cgroupfs_find_mountpoint(char *buf, size_t maxlen)
+{
+       FILE *fp;
+       char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1];
+       char *token, *saved_ptr;
+       int found = 0;
+
+       fp = fopen("/proc/mounts", "r");
+       if (!fp)
+               return -1;
+
+       /*
+        * in order to handle split hierarchy, we need to scan /proc/mounts
+        * and inspect every cgroupfs mount point to find one that has
+        * perf_event subsystem
+        */
+       while (fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %"
+                               STR(MAX_PATH)"s %*d %*d\n",
+                               mountpoint, type, tokens) == 3) {
+
+               if (!strcmp(type, "cgroup")) {
+
+                       token = strtok_r(tokens, ",", &saved_ptr);
+
+                       while (token != NULL) {
+                               if (!strcmp(token, "perf_event")) {
+                                       found = 1;
+                                       break;
+                               }
+                               token = strtok_r(NULL, ",", &saved_ptr);
+                       }
+               }
+               if (found)
+                       break;
+       }
+       fclose(fp);
+       if (!found)
+               return -1;
+
+       if (strlen(mountpoint) < maxlen) {
+               strcpy(buf, mountpoint);
+               return 0;
+       }
+       return -1;
+}
+
+static int open_cgroup(char *name)
+{
+       char path[MAX_PATH+1];
+       char mnt[MAX_PATH+1];
+       int fd;
+
+
+       if (cgroupfs_find_mountpoint(mnt, MAX_PATH+1))
+               return -1;
+
+       snprintf(path, MAX_PATH, "%s/%s", mnt, name);
+
+       fd = open(path, O_RDONLY);
+       if (fd == -1)
+               fprintf(stderr, "no access to cgroup %s\n", path);
+
+       return fd;
+}
+
+static int add_cgroup(struct perf_evlist *evlist, char *str)
+{
+       struct perf_evsel *counter;
+       struct cgroup_sel *cgrp = NULL;
+       int n;
+       /*
+        * check if cgrp is already defined, if so we reuse it
+        */
+       list_for_each_entry(counter, &evlist->entries, node) {
+               cgrp = counter->cgrp;
+               if (!cgrp)
+                       continue;
+               if (!strcmp(cgrp->name, str))
+                       break;
+
+               cgrp = NULL;
+       }
+
+       if (!cgrp) {
+               cgrp = zalloc(sizeof(*cgrp));
+               if (!cgrp)
+                       return -1;
+
+               cgrp->name = str;
+
+               cgrp->fd = open_cgroup(str);
+               if (cgrp->fd == -1) {
+                       free(cgrp);
+                       return -1;
+               }
+       }
+
+       /*
+        * find corresponding event
+        * if add cgroup N, then need to find event N
+        */
+       n = 0;
+       list_for_each_entry(counter, &evlist->entries, node) {
+               if (n == nr_cgroups)
+                       goto found;
+               n++;
+       }
+       if (cgrp->refcnt == 0)
+               free(cgrp);
+
+       return -1;
+found:
+       cgrp->refcnt++;
+       counter->cgrp = cgrp;
+       return 0;
+}
+
+void close_cgroup(struct cgroup_sel *cgrp)
+{
+       if (!cgrp)
+               return;
+
+       /* XXX: not reentrant */
+       if (--cgrp->refcnt == 0) {
+               close(cgrp->fd);
+               free(cgrp->name);
+               free(cgrp);
+       }
+}
+
+int parse_cgroups(const struct option *opt __used, const char *str,
+                 int unset __used)
+{
+       struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+       const char *p, *e, *eos = str + strlen(str);
+       char *s;
+       int ret;
+
+       if (list_empty(&evlist->entries)) {
+               fprintf(stderr, "must define events before cgroups\n");
+               return -1;
+       }
+
+       for (;;) {
+               p = strchr(str, ',');
+               e = p ? p : eos;
+
+               /* allow empty cgroups, i.e., skip */
+               if (e - str) {
+                       /* termination added */
+                       s = strndup(str, e - str);
+                       if (!s)
+                               return -1;
+                       ret = add_cgroup(evlist, s);
+                       if (ret) {
+                               free(s);
+                               return -1;
+                       }
+               }
+               /* nr_cgroups is increased een for empty cgroups */
+               nr_cgroups++;
+               if (!p)
+                       break;
+               str = p+1;
+       }
+       return 0;
+}
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h
new file mode 100644 (file)
index 0000000..89acd6d
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef __CGROUP_H__
+#define __CGROUP_H__
+
+struct option;
+
+struct cgroup_sel {
+       char *name;
+       int fd;
+       int refcnt;
+};
+
+
+extern int nr_cgroups; /* number of explicit cgroups defined */
+extern void close_cgroup(struct cgroup_sel *cgrp);
+extern int parse_cgroups(const struct option *opt, const char *str, int unset);
+
+#endif /* __CGROUP_H__ */
index 3ccaa10433830503325bb7625527839f4586b93e..6893eec693abad5ad8e5af88f1c8ee107c63e713 100644 (file)
@@ -177,3 +177,8 @@ struct cpu_map *cpu_map__dummy_new(void)
 
        return cpus;
 }
+
+void cpu_map__delete(struct cpu_map *map)
+{
+       free(map);
+}
index f7a4f42f6307fb522299ea48d1126e6d08ffda71..072c0a374794eafcdbfd628ae80c9fe1b0e88240 100644 (file)
@@ -8,6 +8,6 @@ struct cpu_map {
 
 struct cpu_map *cpu_map__new(const char *cpu_list);
 struct cpu_map *cpu_map__dummy_new(void);
-void *cpu_map__delete(struct cpu_map *map);
+void cpu_map__delete(struct cpu_map *map);
 
 #endif /* __PERF_CPUMAP_H */
index 01bbe8ecec3f7eda9088e9e59ad78ea53b7b5ce6..d4536a9e0d8cc35a4d1cf49f677cf97932f99482 100644 (file)
@@ -57,7 +57,7 @@ void ui__warning(const char *format, ...)
 }
 #endif
 
-void trace_event(event_t *event)
+void trace_event(union perf_event *event)
 {
        unsigned char *raw_event = (void *)event;
        const char *color = PERF_COLOR_BLUE;
index ca35fd66b5dfc8c238f5a4be3cb28ce402cf0bf6..93516cf4682ca3127ca36e26049bdfb92681f59f 100644 (file)
@@ -9,7 +9,7 @@ extern int verbose;
 extern bool quiet, dump_trace;
 
 int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
-void trace_event(event_t *event);
+void trace_event(union perf_event *event);
 
 struct ui_progress;
 
index 50d0a931497abbdc0151ca4b0a3a040b88c1b036..2b15c362ef568e2ac8b11a581f36ee52f789c2d5 100644 (file)
@@ -6,8 +6,9 @@
 #include "string.h"
 #include "strlist.h"
 #include "thread.h"
+#include "thread_map.h"
 
-static const char *event__name[] = {
+static const char *perf_event__names[] = {
        [0]                      = "TOTAL",
        [PERF_RECORD_MMAP]       = "MMAP",
        [PERF_RECORD_LOST]       = "LOST",
@@ -25,16 +26,16 @@ static const char *event__name[] = {
        [PERF_RECORD_FINISHED_ROUND]     = "FINISHED_ROUND",
 };
 
-const char *event__get_event_name(unsigned int id)
+const char *perf_event__name(unsigned int id)
 {
-       if (id >= ARRAY_SIZE(event__name))
+       if (id >= ARRAY_SIZE(perf_event__names))
                return "INVALID";
-       if (!event__name[id])
+       if (!perf_event__names[id])
                return "UNKNOWN";
-       return event__name[id];
+       return perf_event__names[id];
 }
 
-static struct sample_data synth_sample = {
+static struct perf_sample synth_sample = {
        .pid       = -1,
        .tid       = -1,
        .time      = -1,
@@ -43,9 +44,9 @@ static struct sample_data synth_sample = {
        .period    = 1,
 };
 
-static pid_t event__synthesize_comm(event_t *event, pid_t pid, int full,
-                                   event__handler_t process,
-                                   struct perf_session *session)
+static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid,
+                                        int full, perf_event__handler_t process,
+                                        struct perf_session *session)
 {
        char filename[PATH_MAX];
        char bf[BUFSIZ];
@@ -126,9 +127,10 @@ out:
        return tgid;
 }
 
-static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid,
-                                        event__handler_t process,
-                                        struct perf_session *session)
+static int perf_event__synthesize_mmap_events(union perf_event *event,
+                                             pid_t pid, pid_t tgid,
+                                             perf_event__handler_t process,
+                                             struct perf_session *session)
 {
        char filename[PATH_MAX];
        FILE *fp;
@@ -199,14 +201,14 @@ static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid,
        return 0;
 }
 
-int event__synthesize_modules(event__handler_t process,
-                             struct perf_session *session,
-                             struct machine *machine)
+int perf_event__synthesize_modules(perf_event__handler_t process,
+                                  struct perf_session *session,
+                                  struct machine *machine)
 {
        struct rb_node *nd;
        struct map_groups *kmaps = &machine->kmaps;
-       event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
-
+       union perf_event *event = zalloc((sizeof(event->mmap) +
+                                         session->id_hdr_size));
        if (event == NULL) {
                pr_debug("Not enough memory synthesizing mmap event "
                         "for kernel modules\n");
@@ -251,23 +253,24 @@ int event__synthesize_modules(event__handler_t process,
        return 0;
 }
 
-static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event,
-                                     pid_t pid, event__handler_t process,
+static int __event__synthesize_thread(union perf_event *comm_event,
+                                     union perf_event *mmap_event,
+                                     pid_t pid, perf_event__handler_t process,
                                      struct perf_session *session)
 {
-       pid_t tgid = event__synthesize_comm(comm_event, pid, 1, process,
+       pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process,
                                            session);
        if (tgid == -1)
                return -1;
-       return event__synthesize_mmap_events(mmap_event, pid, tgid,
+       return perf_event__synthesize_mmap_events(mmap_event, pid, tgid,
                                             process, session);
 }
 
-int event__synthesize_thread_map(struct thread_map *threads,
-                                event__handler_t process,
-                                struct perf_session *session)
+int perf_event__synthesize_thread_map(struct thread_map *threads,
+                                     perf_event__handler_t process,
+                                     struct perf_session *session)
 {
-       event_t *comm_event, *mmap_event;
+       union perf_event *comm_event, *mmap_event;
        int err = -1, thread;
 
        comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
@@ -294,12 +297,12 @@ out:
        return err;
 }
 
-int event__synthesize_threads(event__handler_t process,
-                             struct perf_session *session)
+int perf_event__synthesize_threads(perf_event__handler_t process,
+                                  struct perf_session *session)
 {
        DIR *proc;
        struct dirent dirent, *next;
-       event_t *comm_event, *mmap_event;
+       union perf_event *comm_event, *mmap_event;
        int err = -1;
 
        comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
@@ -357,10 +360,10 @@ static int find_symbol_cb(void *arg, const char *name, char type,
        return 1;
 }
 
-int event__synthesize_kernel_mmap(event__handler_t process,
-                                 struct perf_session *session,
-                                 struct machine *machine,
-                                 const char *symbol_name)
+int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
+                                      struct perf_session *session,
+                                      struct machine *machine,
+                                      const char *symbol_name)
 {
        size_t size;
        const char *filename, *mmap_name;
@@ -374,8 +377,8 @@ int event__synthesize_kernel_mmap(event__handler_t process,
         * kernels.
         */
        struct process_symbol_args args = { .name = symbol_name, };
-       event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
-
+       union perf_event *event = zalloc((sizeof(event->mmap) +
+                                         session->id_hdr_size));
        if (event == NULL) {
                pr_debug("Not enough memory synthesizing mmap event "
                         "for kernel modules\n");
@@ -421,42 +424,15 @@ int event__synthesize_kernel_mmap(event__handler_t process,
        return err;
 }
 
-static void thread__comm_adjust(struct thread *self, struct hists *hists)
-{
-       char *comm = self->comm;
-
-       if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
-           (!symbol_conf.comm_list ||
-            strlist__has_entry(symbol_conf.comm_list, comm))) {
-               u16 slen = strlen(comm);
-
-               if (hists__new_col_len(hists, HISTC_COMM, slen))
-                       hists__set_col_len(hists, HISTC_THREAD, slen + 6);
-       }
-}
-
-static int thread__set_comm_adjust(struct thread *self, const char *comm,
-                                  struct hists *hists)
+int perf_event__process_comm(union perf_event *event,
+                            struct perf_sample *sample __used,
+                            struct perf_session *session)
 {
-       int ret = thread__set_comm(self, comm);
-
-       if (ret)
-               return ret;
-
-       thread__comm_adjust(self, hists);
+       struct thread *thread = perf_session__findnew(session, event->comm.tid);
 
-       return 0;
-}
+       dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid);
 
-int event__process_comm(event_t *self, struct sample_data *sample __used,
-                       struct perf_session *session)
-{
-       struct thread *thread = perf_session__findnew(session, self->comm.tid);
-
-       dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
-
-       if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm,
-                                                     &session->hists)) {
+       if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
                dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
                return -1;
        }
@@ -464,19 +440,21 @@ int event__process_comm(event_t *self, struct sample_data *sample __used,
        return 0;
 }
 
-int event__process_lost(event_t *self, struct sample_data *sample __used,
-                       struct perf_session *session)
+int perf_event__process_lost(union perf_event *event,
+                            struct perf_sample *sample __used,
+                            struct perf_session *session)
 {
        dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
-                   self->lost.id, self->lost.lost);
-       session->hists.stats.total_lost += self->lost.lost;
+                   event->lost.id, event->lost.lost);
+       session->hists.stats.total_lost += event->lost.lost;
        return 0;
 }
 
-static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
+static void perf_event__set_kernel_mmap_len(union perf_event *event,
+                                           struct map **maps)
 {
-       maps[MAP__FUNCTION]->start = self->mmap.start;
-       maps[MAP__FUNCTION]->end   = self->mmap.start + self->mmap.len;
+       maps[MAP__FUNCTION]->start = event->mmap.start;
+       maps[MAP__FUNCTION]->end   = event->mmap.start + event->mmap.len;
        /*
         * Be a bit paranoid here, some perf.data file came with
         * a zero sized synthesized MMAP event for the kernel.
@@ -485,8 +463,8 @@ static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
                maps[MAP__FUNCTION]->end = ~0ULL;
 }
 
-static int event__process_kernel_mmap(event_t *self,
-                       struct perf_session *session)
+static int perf_event__process_kernel_mmap(union perf_event *event,
+                                          struct perf_session *session)
 {
        struct map *map;
        char kmmap_prefix[PATH_MAX];
@@ -494,9 +472,9 @@ static int event__process_kernel_mmap(event_t *self,
        enum dso_kernel_type kernel_type;
        bool is_kernel_mmap;
 
-       machine = perf_session__findnew_machine(session, self->mmap.pid);
+       machine = perf_session__findnew_machine(session, event->mmap.pid);
        if (!machine) {
-               pr_err("Can't find id %d's machine\n", self->mmap.pid);
+               pr_err("Can't find id %d's machine\n", event->mmap.pid);
                goto out_problem;
        }
 
@@ -506,17 +484,17 @@ static int event__process_kernel_mmap(event_t *self,
        else
                kernel_type = DSO_TYPE_GUEST_KERNEL;
 
-       is_kernel_mmap = memcmp(self->mmap.filename,
+       is_kernel_mmap = memcmp(event->mmap.filename,
                                kmmap_prefix,
                                strlen(kmmap_prefix)) == 0;
-       if (self->mmap.filename[0] == '/' ||
-           (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
+       if (event->mmap.filename[0] == '/' ||
+           (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
 
                char short_module_name[1024];
                char *name, *dot;
 
-               if (self->mmap.filename[0] == '/') {
-                       name = strrchr(self->mmap.filename, '/');
+               if (event->mmap.filename[0] == '/') {
+                       name = strrchr(event->mmap.filename, '/');
                        if (name == NULL)
                                goto out_problem;
 
@@ -528,10 +506,10 @@ static int event__process_kernel_mmap(event_t *self,
                                        "[%.*s]", (int)(dot - name), name);
                        strxfrchar(short_module_name, '-', '_');
                } else
-                       strcpy(short_module_name, self->mmap.filename);
+                       strcpy(short_module_name, event->mmap.filename);
 
-               map = machine__new_module(machine, self->mmap.start,
-                                         self->mmap.filename);
+               map = machine__new_module(machine, event->mmap.start,
+                                         event->mmap.filename);
                if (map == NULL)
                        goto out_problem;
 
@@ -541,9 +519,9 @@ static int event__process_kernel_mmap(event_t *self,
 
                map->dso->short_name = name;
                map->dso->sname_alloc = 1;
-               map->end = map->start + self->mmap.len;
+               map->end = map->start + event->mmap.len;
        } else if (is_kernel_mmap) {
-               const char *symbol_name = (self->mmap.filename +
+               const char *symbol_name = (event->mmap.filename +
                                strlen(kmmap_prefix));
                /*
                 * Should be there already, from the build-id table in
@@ -558,10 +536,10 @@ static int event__process_kernel_mmap(event_t *self,
                if (__machine__create_kernel_maps(machine, kernel) < 0)
                        goto out_problem;
 
-               event_set_kernel_mmap_len(machine->vmlinux_maps, self);
+               perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
                perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
                                                         symbol_name,
-                                                        self->mmap.pgoff);
+                                                        event->mmap.pgoff);
                if (machine__is_default_guest(machine)) {
                        /*
                         * preload dso of guest kernel and modules
@@ -575,22 +553,23 @@ out_problem:
        return -1;
 }
 
-int event__process_mmap(event_t *self, struct sample_data *sample __used,
-                       struct perf_session *session)
+int perf_event__process_mmap(union perf_event *event,
+                            struct perf_sample *sample __used,
+                            struct perf_session *session)
 {
        struct machine *machine;
        struct thread *thread;
        struct map *map;
-       u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+       u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
        int ret = 0;
 
        dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
-                       self->mmap.pid, self->mmap.tid, self->mmap.start,
-                       self->mmap.len, self->mmap.pgoff, self->mmap.filename);
+                       event->mmap.pid, event->mmap.tid, event->mmap.start,
+                       event->mmap.len, event->mmap.pgoff, event->mmap.filename);
 
        if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
            cpumode == PERF_RECORD_MISC_KERNEL) {
-               ret = event__process_kernel_mmap(self, session);
+               ret = perf_event__process_kernel_mmap(event, session);
                if (ret < 0)
                        goto out_problem;
                return 0;
@@ -599,12 +578,12 @@ int event__process_mmap(event_t *self, struct sample_data *sample __used,
        machine = perf_session__find_host_machine(session);
        if (machine == NULL)
                goto out_problem;
-       thread = perf_session__findnew(session, self->mmap.pid);
+       thread = perf_session__findnew(session, event->mmap.pid);
        if (thread == NULL)
                goto out_problem;
-       map = map__new(&machine->user_dsos, self->mmap.start,
-                       self->mmap.len, self->mmap.pgoff,
-                       self->mmap.pid, self->mmap.filename,
+       map = map__new(&machine->user_dsos, event->mmap.start,
+                       event->mmap.len, event->mmap.pgoff,
+                       event->mmap.pid, event->mmap.filename,
                        MAP__FUNCTION);
        if (map == NULL)
                goto out_problem;
@@ -617,16 +596,17 @@ out_problem:
        return 0;
 }
 
-int event__process_task(event_t *self, struct sample_data *sample __used,
-                       struct perf_session *session)
+int perf_event__process_task(union perf_event *event,
+                            struct perf_sample *sample __used,
+                            struct perf_session *session)
 {
-       struct thread *thread = perf_session__findnew(session, self->fork.tid);
-       struct thread *parent = perf_session__findnew(session, self->fork.ptid);
+       struct thread *thread = perf_session__findnew(session, event->fork.tid);
+       struct thread *parent = perf_session__findnew(session, event->fork.ptid);
 
-       dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
-                   self->fork.ppid, self->fork.ptid);
+       dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
+                   event->fork.ppid, event->fork.ptid);
 
-       if (self->header.type == PERF_RECORD_EXIT) {
+       if (event->header.type == PERF_RECORD_EXIT) {
                perf_session__remove_thread(session, thread);
                return 0;
        }
@@ -640,20 +620,22 @@ int event__process_task(event_t *self, struct sample_data *sample __used,
        return 0;
 }
 
-int event__process(event_t *event, struct sample_data *sample,
-                  struct perf_session *session)
+int perf_event__process(union perf_event *event, struct perf_sample *sample,
+                       struct perf_session *session)
 {
        switch (event->header.type) {
        case PERF_RECORD_COMM:
-               event__process_comm(event, sample, session);
+               perf_event__process_comm(event, sample, session);
                break;
        case PERF_RECORD_MMAP:
-               event__process_mmap(event, sample, session);
+               perf_event__process_mmap(event, sample, session);
                break;
        case PERF_RECORD_FORK:
        case PERF_RECORD_EXIT:
-               event__process_task(event, sample, session);
+               perf_event__process_task(event, sample, session);
                break;
+       case PERF_RECORD_LOST:
+               perf_event__process_lost(event, sample, session);
        default:
                break;
        }
@@ -750,24 +732,14 @@ void thread__find_addr_location(struct thread *self,
                al->sym = NULL;
 }
 
-static void dso__calc_col_width(struct dso *self, struct hists *hists)
-{
-       if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
-           (!symbol_conf.dso_list ||
-            strlist__has_entry(symbol_conf.dso_list, self->name))) {
-               u16 slen = dso__name_len(self);
-               hists__new_col_len(hists, HISTC_DSO, slen);
-       }
-
-       self->slen_calculated = 1;
-}
-
-int event__preprocess_sample(const event_t *self, struct perf_session *session,
-                            struct addr_location *al, struct sample_data *data,
-                            symbol_filter_t filter)
+int perf_event__preprocess_sample(const union perf_event *event,
+                                 struct perf_session *session,
+                                 struct addr_location *al,
+                                 struct perf_sample *sample,
+                                 symbol_filter_t filter)
 {
-       u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-       struct thread *thread = perf_session__findnew(session, self->ip.pid);
+       u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+       struct thread *thread = perf_session__findnew(session, event->ip.pid);
 
        if (thread == NULL)
                return -1;
@@ -789,12 +761,12 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
                machine__create_kernel_maps(&session->host_machine);
 
        thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
-                             self->ip.pid, self->ip.ip, al);
+                             event->ip.pid, event->ip.ip, al);
        dump_printf(" ...... dso: %s\n",
                    al->map ? al->map->dso->long_name :
                        al->level == 'H' ? "[hypervisor]" : "<not found>");
        al->sym = NULL;
-       al->cpu = data->cpu;
+       al->cpu = sample->cpu;
 
        if (al->map) {
                if (symbol_conf.dso_list &&
@@ -805,23 +777,8 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
                        strlist__has_entry(symbol_conf.dso_list,
                                           al->map->dso->long_name)))))
                        goto out_filtered;
-               /*
-                * We have to do this here as we may have a dso with no symbol
-                * hit that has a name longer than the ones with symbols
-                * sampled.
-                */
-               if (!sort_dso.elide && !al->map->dso->slen_calculated)
-                       dso__calc_col_width(al->map->dso, &session->hists);
 
                al->sym = map__find_symbol(al->map, al->addr, filter);
-       } else {
-               const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
-
-               if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width &&
-                   !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
-                   !symbol_conf.dso_list)
-                       hists__set_col_len(&session->hists, HISTC_DSO,
-                                          unresolved_col_width);
        }
 
        if (symbol_conf.sym_list && al->sym &&
@@ -834,128 +791,3 @@ out_filtered:
        al->filtered = true;
        return 0;
 }
-
-static int event__parse_id_sample(const event_t *event,
-                                 struct perf_session *session,
-                                 struct sample_data *sample)
-{
-       const u64 *array;
-       u64 type;
-
-       sample->cpu = sample->pid = sample->tid = -1;
-       sample->stream_id = sample->id = sample->time = -1ULL;
-
-       if (!session->sample_id_all)
-               return 0;
-
-       array = event->sample.array;
-       array += ((event->header.size -
-                  sizeof(event->header)) / sizeof(u64)) - 1;
-       type = session->sample_type;
-
-       if (type & PERF_SAMPLE_CPU) {
-               u32 *p = (u32 *)array;
-               sample->cpu = *p;
-               array--;
-       }
-
-       if (type & PERF_SAMPLE_STREAM_ID) {
-               sample->stream_id = *array;
-               array--;
-       }
-
-       if (type & PERF_SAMPLE_ID) {
-               sample->id = *array;
-               array--;
-       }
-
-       if (type & PERF_SAMPLE_TIME) {
-               sample->time = *array;
-               array--;
-       }
-
-       if (type & PERF_SAMPLE_TID) {
-               u32 *p = (u32 *)array;
-               sample->pid = p[0];
-               sample->tid = p[1];
-       }
-
-       return 0;
-}
-
-int event__parse_sample(const event_t *event, struct perf_session *session,
-                       struct sample_data *data)
-{
-       const u64 *array;
-       u64 type;
-
-       if (event->header.type != PERF_RECORD_SAMPLE)
-               return event__parse_id_sample(event, session, data);
-
-       array = event->sample.array;
-       type = session->sample_type;
-
-       if (type & PERF_SAMPLE_IP) {
-               data->ip = event->ip.ip;
-               array++;
-       }
-
-       if (type & PERF_SAMPLE_TID) {
-               u32 *p = (u32 *)array;
-               data->pid = p[0];
-               data->tid = p[1];
-               array++;
-       }
-
-       if (type & PERF_SAMPLE_TIME) {
-               data->time = *array;
-               array++;
-       }
-
-       if (type & PERF_SAMPLE_ADDR) {
-               data->addr = *array;
-               array++;
-       }
-
-       data->id = -1ULL;
-       if (type & PERF_SAMPLE_ID) {
-               data->id = *array;
-               array++;
-       }
-
-       if (type & PERF_SAMPLE_STREAM_ID) {
-               data->stream_id = *array;
-               array++;
-       }
-
-       if (type & PERF_SAMPLE_CPU) {
-               u32 *p = (u32 *)array;
-               data->cpu = *p;
-               array++;
-       } else
-               data->cpu = -1;
-
-       if (type & PERF_SAMPLE_PERIOD) {
-               data->period = *array;
-               array++;
-       }
-
-       if (type & PERF_SAMPLE_READ) {
-               pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
-               return -1;
-       }
-
-       if (type & PERF_SAMPLE_CALLCHAIN) {
-               data->callchain = (struct ip_callchain *)array;
-               array += 1 + data->callchain->nr;
-       }
-
-       if (type & PERF_SAMPLE_RAW) {
-               u32 *p = (u32 *)array;
-               data->raw_size = *p;
-               p++;
-               data->raw_data = p;
-       }
-
-       return 0;
-}
index cc7b52f9b49265cf4097a3154568fb850f55cc60..9c35170fb379599713a6458c07023ccba8da2b2b 100644 (file)
@@ -61,7 +61,7 @@ struct sample_event {
        u64 array[];
 };
 
-struct sample_data {
+struct perf_sample {
        u64 ip;
        u32 pid, tid;
        u64 time;
@@ -117,7 +117,7 @@ struct tracing_data_event {
        u32 size;
 };
 
-typedef union event_union {
+union perf_event {
        struct perf_event_header        header;
        struct ip_event                 ip;
        struct mmap_event               mmap;
@@ -130,50 +130,54 @@ typedef union event_union {
        struct event_type_event         event_type;
        struct tracing_data_event       tracing_data;
        struct build_id_event           build_id;
-} event_t;
+};
 
-void event__print_totals(void);
+void perf_event__print_totals(void);
 
 struct perf_session;
 struct thread_map;
 
-typedef int (*event__handler_synth_t)(event_t *event, 
+typedef int (*perf_event__handler_synth_t)(union perf_event *event, 
+                                          struct perf_session *session);
+typedef int (*perf_event__handler_t)(union perf_event *event,
+                                    struct perf_sample *sample,
                                      struct perf_session *session);
-typedef int (*event__handler_t)(event_t *event, struct sample_data *sample,
-                               struct perf_session *session);
-
-int event__synthesize_thread_map(struct thread_map *threads,
-                                event__handler_t process,
-                                struct perf_session *session);
-int event__synthesize_threads(event__handler_t process,
-                             struct perf_session *session);
-int event__synthesize_kernel_mmap(event__handler_t process,
-                               struct perf_session *session,
-                               struct machine *machine,
-                               const char *symbol_name);
-
-int event__synthesize_modules(event__handler_t process,
-                             struct perf_session *session,
-                             struct machine *machine);
-
-int event__process_comm(event_t *self, struct sample_data *sample,
-                       struct perf_session *session);
-int event__process_lost(event_t *self, struct sample_data *sample,
-                       struct perf_session *session);
-int event__process_mmap(event_t *self, struct sample_data *sample,
-                       struct perf_session *session);
-int event__process_task(event_t *self, struct sample_data *sample,
+
+int perf_event__synthesize_thread_map(struct thread_map *threads,
+                                     perf_event__handler_t process,
+                                     struct perf_session *session);
+int perf_event__synthesize_threads(perf_event__handler_t process,
+                                  struct perf_session *session);
+int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
+                                      struct perf_session *session,
+                                      struct machine *machine,
+                                      const char *symbol_name);
+
+int perf_event__synthesize_modules(perf_event__handler_t process,
+                                  struct perf_session *session,
+                                  struct machine *machine);
+
+int perf_event__process_comm(union perf_event *event, struct perf_sample *sample,
+                            struct perf_session *session);
+int perf_event__process_lost(union perf_event *event, struct perf_sample *sample,
+                            struct perf_session *session);
+int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample,
+                            struct perf_session *session);
+int perf_event__process_task(union perf_event *event, struct perf_sample *sample,
+                            struct perf_session *session);
+int perf_event__process(union perf_event *event, struct perf_sample *sample,
                        struct perf_session *session);
-int event__process(event_t *event, struct sample_data *sample,
-                  struct perf_session *session);
 
 struct addr_location;
-int event__preprocess_sample(const event_t *self, struct perf_session *session,
-                            struct addr_location *al, struct sample_data *data,
-                            symbol_filter_t filter);
-int event__parse_sample(const event_t *event, struct perf_session *session,
-                       struct sample_data *sample);
+int perf_event__preprocess_sample(const union perf_event *self,
+                                 struct perf_session *session,
+                                 struct addr_location *al,
+                                 struct perf_sample *sample,
+                                 symbol_filter_t filter);
+
+const char *perf_event__name(unsigned int id);
 
-const char *event__get_event_name(unsigned int id);
+int perf_event__parse_sample(const union perf_event *event, u64 type,
+                            bool sample_id_all, struct perf_sample *sample);
 
 #endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
new file mode 100644 (file)
index 0000000..d852cef
--- /dev/null
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+#include <poll.h>
+#include "cpumap.h"
+#include "thread_map.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "util.h"
+
+#include <sys/mman.h>
+
+#include <linux/bitops.h>
+#include <linux/hash.h>
+
+#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+
+void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+                      struct thread_map *threads)
+{
+       int i;
+
+       for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
+               INIT_HLIST_HEAD(&evlist->heads[i]);
+       INIT_LIST_HEAD(&evlist->entries);
+       perf_evlist__set_maps(evlist, cpus, threads);
+}
+
+struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
+                                    struct thread_map *threads)
+{
+       struct perf_evlist *evlist = zalloc(sizeof(*evlist));
+
+       if (evlist != NULL)
+               perf_evlist__init(evlist, cpus, threads);
+
+       return evlist;
+}
+
+static void perf_evlist__purge(struct perf_evlist *evlist)
+{
+       struct perf_evsel *pos, *n;
+
+       list_for_each_entry_safe(pos, n, &evlist->entries, node) {
+               list_del_init(&pos->node);
+               perf_evsel__delete(pos);
+       }
+
+       evlist->nr_entries = 0;
+}
+
+void perf_evlist__exit(struct perf_evlist *evlist)
+{
+       free(evlist->mmap);
+       free(evlist->pollfd);
+       evlist->mmap = NULL;
+       evlist->pollfd = NULL;
+}
+
+void perf_evlist__delete(struct perf_evlist *evlist)
+{
+       perf_evlist__purge(evlist);
+       perf_evlist__exit(evlist);
+       free(evlist);
+}
+
+void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
+{
+       list_add_tail(&entry->node, &evlist->entries);
+       ++evlist->nr_entries;
+}
+
+int perf_evlist__add_default(struct perf_evlist *evlist)
+{
+       struct perf_event_attr attr = {
+               .type = PERF_TYPE_HARDWARE,
+               .config = PERF_COUNT_HW_CPU_CYCLES,
+       };
+       struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
+
+       if (evsel == NULL)
+               return -ENOMEM;
+
+       perf_evlist__add(evlist, evsel);
+       return 0;
+}
+
+int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+{
+       int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
+       evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
+       return evlist->pollfd != NULL ? 0 : -ENOMEM;
+}
+
+void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
+{
+       fcntl(fd, F_SETFL, O_NONBLOCK);
+       evlist->pollfd[evlist->nr_fds].fd = fd;
+       evlist->pollfd[evlist->nr_fds].events = POLLIN;
+       evlist->nr_fds++;
+}
+
+static void perf_evlist__id_hash(struct perf_evlist *evlist,
+                                struct perf_evsel *evsel,
+                                int cpu, int thread, u64 id)
+{
+       int hash;
+       struct perf_sample_id *sid = SID(evsel, cpu, thread);
+
+       sid->id = id;
+       sid->evsel = evsel;
+       hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
+       hlist_add_head(&sid->node, &evlist->heads[hash]);
+}
+
+void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
+                        int cpu, int thread, u64 id)
+{
+       perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
+       evsel->id[evsel->ids++] = id;
+}
+
+static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+                                 struct perf_evsel *evsel,
+                                 int cpu, int thread, int fd)
+{
+       u64 read_data[4] = { 0, };
+       int id_idx = 1; /* The first entry is the counter value */
+
+       if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
+           read(fd, &read_data, sizeof(read_data)) == -1)
+               return -1;
+
+       if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+               ++id_idx;
+       if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+               ++id_idx;
+
+       perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
+       return 0;
+}
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+{
+       struct hlist_head *head;
+       struct hlist_node *pos;
+       struct perf_sample_id *sid;
+       int hash;
+
+       if (evlist->nr_entries == 1)
+               return list_entry(evlist->entries.next, struct perf_evsel, node);
+
+       hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+       head = &evlist->heads[hash];
+
+       hlist_for_each_entry(sid, pos, head, node)
+               if (sid->id == id)
+                       return sid->evsel;
+       return NULL;
+}
+
+union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
+{
+       /* XXX Move this to perf.c, making it generally available */
+       unsigned int page_size = sysconf(_SC_PAGE_SIZE);
+       struct perf_mmap *md = &evlist->mmap[cpu];
+       unsigned int head = perf_mmap__read_head(md);
+       unsigned int old = md->prev;
+       unsigned char *data = md->base + page_size;
+       union perf_event *event = NULL;
+
+       if (evlist->overwrite) {
+               /*
+                * If we're further behind than half the buffer, there's a chance
+                * the writer will bite our tail and mess up the samples under us.
+                *
+                * If we somehow ended up ahead of the head, we got messed up.
+                *
+                * In either case, truncate and restart at head.
+                */
+               int diff = head - old;
+               if (diff > md->mask / 2 || diff < 0) {
+                       fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
+
+                       /*
+                        * head points to a known good entry, start there.
+                        */
+                       old = head;
+               }
+       }
+
+       if (old != head) {
+               size_t size;
+
+               event = (union perf_event *)&data[old & md->mask];
+               size = event->header.size;
+
+               /*
+                * Event straddles the mmap boundary -- header should always
+                * be inside due to u64 alignment of output.
+                */
+               if ((old & md->mask) + size != ((old + size) & md->mask)) {
+                       unsigned int offset = old;
+                       unsigned int len = min(sizeof(*event), size), cpy;
+                       void *dst = &evlist->event_copy;
+
+                       do {
+                               cpy = min(md->mask + 1 - (offset & md->mask), len);
+                               memcpy(dst, &data[offset & md->mask], cpy);
+                               offset += cpy;
+                               dst += cpy;
+                               len -= cpy;
+                       } while (len);
+
+                       event = &evlist->event_copy;
+               }
+
+               old += size;
+       }
+
+       md->prev = old;
+
+       if (!evlist->overwrite)
+               perf_mmap__write_tail(md, old);
+
+       return event;
+}
+
+void perf_evlist__munmap(struct perf_evlist *evlist)
+{
+       int cpu;
+
+       for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+               if (evlist->mmap[cpu].base != NULL) {
+                       munmap(evlist->mmap[cpu].base, evlist->mmap_len);
+                       evlist->mmap[cpu].base = NULL;
+               }
+       }
+}
+
+int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
+{
+       evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap));
+       return evlist->mmap != NULL ? 0 : -ENOMEM;
+}
+
+static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
+                              int mask, int fd)
+{
+       evlist->mmap[cpu].prev = 0;
+       evlist->mmap[cpu].mask = mask;
+       evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
+                                     MAP_SHARED, fd, 0);
+       if (evlist->mmap[cpu].base == MAP_FAILED)
+               return -1;
+
+       perf_evlist__add_pollfd(evlist, fd);
+       return 0;
+}
+
+/** perf_evlist__mmap - Create per cpu maps to receive events
+ *
+ * @evlist - list of events
+ * @pages - map length in pages
+ * @overwrite - overwrite older events?
+ *
+ * If overwrite is false the user needs to signal event consuption using:
+ *
+ *     struct perf_mmap *m = &evlist->mmap[cpu];
+ *     unsigned int head = perf_mmap__read_head(m);
+ *
+ *     perf_mmap__write_tail(m, head)
+ *
+ * Using perf_evlist__read_on_cpu does this automatically.
+ */
+int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
+{
+       unsigned int page_size = sysconf(_SC_PAGE_SIZE);
+       int mask = pages * page_size - 1, cpu;
+       struct perf_evsel *first_evsel, *evsel;
+       const struct cpu_map *cpus = evlist->cpus;
+       const struct thread_map *threads = evlist->threads;
+       int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
+
+       if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
+               return -ENOMEM;
+
+       if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
+               return -ENOMEM;
+
+       evlist->overwrite = overwrite;
+       evlist->mmap_len = (pages + 1) * page_size;
+       first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+       list_for_each_entry(evsel, &evlist->entries, node) {
+               if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+                   evsel->sample_id == NULL &&
+                   perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
+                       return -ENOMEM;
+
+               for (cpu = 0; cpu < cpus->nr; cpu++) {
+                       for (thread = 0; thread < threads->nr; thread++) {
+                               int fd = FD(evsel, cpu, thread);
+
+                               if (evsel->idx || thread) {
+                                       if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
+                                                 FD(first_evsel, cpu, 0)) != 0)
+                                               goto out_unmap;
+                               } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
+                                       goto out_unmap;
+
+                               if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+                                   perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
+                                       goto out_unmap;
+                       }
+               }
+       }
+
+       return 0;
+
+out_unmap:
+       for (cpu = 0; cpu < cpus->nr; cpu++) {
+               if (evlist->mmap[cpu].base != NULL) {
+                       munmap(evlist->mmap[cpu].base, evlist->mmap_len);
+                       evlist->mmap[cpu].base = NULL;
+               }
+       }
+       return -1;
+}
+
+int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
+                            pid_t target_tid, const char *cpu_list)
+{
+       evlist->threads = thread_map__new(target_pid, target_tid);
+
+       if (evlist->threads == NULL)
+               return -1;
+
+       if (target_tid != -1)
+               evlist->cpus = cpu_map__dummy_new();
+       else
+               evlist->cpus = cpu_map__new(cpu_list);
+
+       if (evlist->cpus == NULL)
+               goto out_delete_threads;
+
+       return 0;
+
+out_delete_threads:
+       thread_map__delete(evlist->threads);
+       return -1;
+}
+
+void perf_evlist__delete_maps(struct perf_evlist *evlist)
+{
+       cpu_map__delete(evlist->cpus);
+       thread_map__delete(evlist->threads);
+       evlist->cpus    = NULL;
+       evlist->threads = NULL;
+}
+
+int perf_evlist__set_filters(struct perf_evlist *evlist)
+{
+       const struct thread_map *threads = evlist->threads;
+       const struct cpu_map *cpus = evlist->cpus;
+       struct perf_evsel *evsel;
+       char *filter;
+       int thread;
+       int cpu;
+       int err;
+       int fd;
+
+       list_for_each_entry(evsel, &evlist->entries, node) {
+               filter = evsel->filter;
+               if (!filter)
+                       continue;
+               for (cpu = 0; cpu < cpus->nr; cpu++) {
+                       for (thread = 0; thread < threads->nr; thread++) {
+                               fd = FD(evsel, cpu, thread);
+                               err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
+                               if (err)
+                                       return err;
+                       }
+               }
+       }
+
+       return 0;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
new file mode 100644 (file)
index 0000000..8b1cb7a
--- /dev/null
@@ -0,0 +1,68 @@
+#ifndef __PERF_EVLIST_H
+#define __PERF_EVLIST_H 1
+
+#include <linux/list.h>
+#include "../perf.h"
+#include "event.h"
+
+struct pollfd;
+struct thread_map;
+struct cpu_map;
+
+#define PERF_EVLIST__HLIST_BITS 8
+#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
+
+struct perf_evlist {
+       struct list_head entries;
+       struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
+       int              nr_entries;
+       int              nr_fds;
+       int              mmap_len;
+       bool             overwrite;
+       union perf_event event_copy;
+       struct perf_mmap *mmap;
+       struct pollfd    *pollfd;
+       struct thread_map *threads;
+       struct cpu_map    *cpus;
+};
+
+struct perf_evsel;
+
+struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
+                                    struct thread_map *threads);
+void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+                      struct thread_map *threads);
+void perf_evlist__exit(struct perf_evlist *evlist);
+void perf_evlist__delete(struct perf_evlist *evlist);
+
+void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
+int perf_evlist__add_default(struct perf_evlist *evlist);
+
+void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
+                        int cpu, int thread, u64 id);
+
+int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
+void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
+
+union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu);
+
+int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
+int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
+void perf_evlist__munmap(struct perf_evlist *evlist);
+
+static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
+                                        struct cpu_map *cpus,
+                                        struct thread_map *threads)
+{
+       evlist->cpus    = cpus;
+       evlist->threads = threads;
+}
+
+int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
+                            pid_t target_tid, const char *cpu_list);
+void perf_evlist__delete_maps(struct perf_evlist *evlist);
+int perf_evlist__set_filters(struct perf_evlist *evlist);
+
+#endif /* __PERF_EVLIST_H */
index d8575d31ee6cb68c3c85eb8bbd980f1833866cdc..662596afd7f1d49fccaeb8c95ee7c8c64748b8c9 100644 (file)
@@ -1,20 +1,34 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
 #include "evsel.h"
-#include "../perf.h"
+#include "evlist.h"
 #include "util.h"
 #include "cpumap.h"
-#include "thread.h"
+#include "thread_map.h"
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 
+void perf_evsel__init(struct perf_evsel *evsel,
+                     struct perf_event_attr *attr, int idx)
+{
+       evsel->idx         = idx;
+       evsel->attr        = *attr;
+       INIT_LIST_HEAD(&evsel->node);
+}
+
 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
 {
        struct perf_evsel *evsel = zalloc(sizeof(*evsel));
 
-       if (evsel != NULL) {
-               evsel->idx         = idx;
-               evsel->attr        = *attr;
-               INIT_LIST_HEAD(&evsel->node);
-       }
+       if (evsel != NULL)
+               perf_evsel__init(evsel, attr, idx);
 
        return evsel;
 }
@@ -25,6 +39,22 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
        return evsel->fd != NULL ? 0 : -ENOMEM;
 }
 
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+       evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
+       if (evsel->sample_id == NULL)
+               return -ENOMEM;
+
+       evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
+       if (evsel->id == NULL) {
+               xyarray__delete(evsel->sample_id);
+               evsel->sample_id = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
 {
        evsel->counts = zalloc((sizeof(*evsel->counts) +
@@ -38,6 +68,14 @@ void perf_evsel__free_fd(struct perf_evsel *evsel)
        evsel->fd = NULL;
 }
 
+void perf_evsel__free_id(struct perf_evsel *evsel)
+{
+       xyarray__delete(evsel->sample_id);
+       evsel->sample_id = NULL;
+       free(evsel->id);
+       evsel->id = NULL;
+}
+
 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
        int cpu, thread;
@@ -49,10 +87,19 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
                }
 }
 
-void perf_evsel__delete(struct perf_evsel *evsel)
+void perf_evsel__exit(struct perf_evsel *evsel)
 {
        assert(list_empty(&evsel->node));
        xyarray__delete(evsel->fd);
+       xyarray__delete(evsel->sample_id);
+       free(evsel->id);
+}
+
+void perf_evsel__delete(struct perf_evsel *evsel)
+{
+       perf_evsel__exit(evsel);
+       close_cgroup(evsel->cgrp);
+       free(evsel->name);
        free(evsel);
 }
 
@@ -128,21 +175,51 @@ int __perf_evsel__read(struct perf_evsel *evsel,
 }
 
 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-                             struct thread_map *threads)
+                             struct thread_map *threads, bool group, bool inherit)
 {
        int cpu, thread;
+       unsigned long flags = 0;
+       int pid = -1;
 
        if (evsel->fd == NULL &&
            perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
                return -1;
 
+       if (evsel->cgrp) {
+               flags = PERF_FLAG_PID_CGROUP;
+               pid = evsel->cgrp->fd;
+       }
+
        for (cpu = 0; cpu < cpus->nr; cpu++) {
+               int group_fd = -1;
+               /*
+                * Don't allow mmap() of inherited per-task counters. This
+                * would create a performance issue due to all children writing
+                * to the same buffer.
+                *
+                * FIXME:
+                * Proper fix is not to pass 'inherit' to perf_evsel__open*,
+                * but a 'flags' parameter, with 'group' folded there as well,
+                * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
+                * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
+                * set. Lets go for the minimal fix first tho.
+                */
+               evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
+
                for (thread = 0; thread < threads->nr; thread++) {
+
+                       if (!evsel->cgrp)
+                               pid = threads->map[thread];
+
                        FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
-                                                                    threads->map[thread],
-                                                                    cpus->map[cpu], -1, 0);
+                                                                    pid,
+                                                                    cpus->map[cpu],
+                                                                    group_fd, flags);
                        if (FD(evsel, cpu, thread) < 0)
                                goto out_close;
+
+                       if (group && group_fd == -1)
+                               group_fd = FD(evsel, cpu, thread);
                }
        }
 
@@ -175,10 +252,9 @@ static struct {
        .threads = { -1, },
 };
 
-int perf_evsel__open(struct perf_evsel *evsel,
-                    struct cpu_map *cpus, struct thread_map *threads)
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+                    struct thread_map *threads, bool group, bool inherit)
 {
-
        if (cpus == NULL) {
                /* Work around old compiler warnings about strict aliasing */
                cpus = &empty_cpu_map.map;
@@ -187,15 +263,135 @@ int perf_evsel__open(struct perf_evsel *evsel,
        if (threads == NULL)
                threads = &empty_thread_map.map;
 
-       return __perf_evsel__open(evsel, cpus, threads);
+       return __perf_evsel__open(evsel, cpus, threads, group, inherit);
 }
 
-int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+                            struct cpu_map *cpus, bool group, bool inherit)
 {
-       return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
+       return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
+}
+
+int perf_evsel__open_per_thread(struct perf_evsel *evsel,
+                               struct thread_map *threads, bool group, bool inherit)
+{
+       return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
+}
+
+static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
+                                      struct perf_sample *sample)
+{
+       const u64 *array = event->sample.array;
+
+       array += ((event->header.size -
+                  sizeof(event->header)) / sizeof(u64)) - 1;
+
+       if (type & PERF_SAMPLE_CPU) {
+               u32 *p = (u32 *)array;
+               sample->cpu = *p;
+               array--;
+       }
+
+       if (type & PERF_SAMPLE_STREAM_ID) {
+               sample->stream_id = *array;
+               array--;
+       }
+
+       if (type & PERF_SAMPLE_ID) {
+               sample->id = *array;
+               array--;
+       }
+
+       if (type & PERF_SAMPLE_TIME) {
+               sample->time = *array;
+               array--;
+       }
+
+       if (type & PERF_SAMPLE_TID) {
+               u32 *p = (u32 *)array;
+               sample->pid = p[0];
+               sample->tid = p[1];
+       }
+
+       return 0;
 }
 
-int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
+int perf_event__parse_sample(const union perf_event *event, u64 type,
+                            bool sample_id_all, struct perf_sample *data)
 {
-       return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
+       const u64 *array;
+
+       data->cpu = data->pid = data->tid = -1;
+       data->stream_id = data->id = data->time = -1ULL;
+
+       if (event->header.type != PERF_RECORD_SAMPLE) {
+               if (!sample_id_all)
+                       return 0;
+               return perf_event__parse_id_sample(event, type, data);
+       }
+
+       array = event->sample.array;
+
+       if (type & PERF_SAMPLE_IP) {
+               data->ip = event->ip.ip;
+               array++;
+       }
+
+       if (type & PERF_SAMPLE_TID) {
+               u32 *p = (u32 *)array;
+               data->pid = p[0];
+               data->tid = p[1];
+               array++;
+       }
+
+       if (type & PERF_SAMPLE_TIME) {
+               data->time = *array;
+               array++;
+       }
+
+       if (type & PERF_SAMPLE_ADDR) {
+               data->addr = *array;
+               array++;
+       }
+
+       data->id = -1ULL;
+       if (type & PERF_SAMPLE_ID) {
+               data->id = *array;
+               array++;
+       }
+
+       if (type & PERF_SAMPLE_STREAM_ID) {
+               data->stream_id = *array;
+               array++;
+       }
+
+       if (type & PERF_SAMPLE_CPU) {
+               u32 *p = (u32 *)array;
+               data->cpu = *p;
+               array++;
+       }
+
+       if (type & PERF_SAMPLE_PERIOD) {
+               data->period = *array;
+               array++;
+       }
+
+       if (type & PERF_SAMPLE_READ) {
+               fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
+               return -1;
+       }
+
+       if (type & PERF_SAMPLE_CALLCHAIN) {
+               data->callchain = (struct ip_callchain *)array;
+               array += 1 + data->callchain->nr;
+       }
+
+       if (type & PERF_SAMPLE_RAW) {
+               u32 *p = (u32 *)array;
+               data->raw_size = *p;
+               p++;
+               data->raw_data = p;
+       }
+
+       return 0;
 }
index b2d755fe88a50614da63ac4c6e674a6045ab6188..6710ab538342fe1d5970c812423908ddec3ee5a7 100644 (file)
@@ -6,6 +6,8 @@
 #include "../../../include/linux/perf_event.h"
 #include "types.h"
 #include "xyarray.h"
+#include "cgroup.h"
+#include "hist.h"
  
 struct perf_counts_values {
        union {
@@ -24,31 +26,66 @@ struct perf_counts {
        struct perf_counts_values cpu[];
 };
 
+struct perf_evsel;
+
+/*
+ * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
+ * more than one entry in the evlist.
+ */
+struct perf_sample_id {
+       struct hlist_node       node;
+       u64                     id;
+       struct perf_evsel       *evsel;
+};
+
+/** struct perf_evsel - event selector
+ *
+ * @name - Can be set to retain the original event name passed by the user,
+ *         so that when showing results in tools such as 'perf stat', we
+ *         show the name used, not some alias.
+ */
 struct perf_evsel {
        struct list_head        node;
        struct perf_event_attr  attr;
        char                    *filter;
        struct xyarray          *fd;
+       struct xyarray          *sample_id;
+       u64                     *id;
        struct perf_counts      *counts;
        int                     idx;
-       void                    *priv;
+       int                     ids;
+       struct hists            hists;
+       char                    *name;
+       union {
+               void            *priv;
+               off_t           id_offset;
+       };
+       struct cgroup_sel       *cgrp;
 };
 
 struct cpu_map;
 struct thread_map;
+struct perf_evlist;
 
 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
+void perf_evsel__init(struct perf_evsel *evsel,
+                     struct perf_event_attr *attr, int idx);
+void perf_evsel__exit(struct perf_evsel *evsel);
 void perf_evsel__delete(struct perf_evsel *evsel);
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
 void perf_evsel__free_fd(struct perf_evsel *evsel);
+void perf_evsel__free_id(struct perf_evsel *evsel);
 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 
-int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus);
-int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads);
-int perf_evsel__open(struct perf_evsel *evsel, 
-                    struct cpu_map *cpus, struct thread_map *threads);
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+                            struct cpu_map *cpus, bool group, bool inherit);
+int perf_evsel__open_per_thread(struct perf_evsel *evsel,
+                               struct thread_map *threads, bool group, bool inherit);
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+                    struct thread_map *threads, bool group, bool inherit);
 
 #define perf_evsel__match(evsel, t, c)         \
        (evsel->attr.type == PERF_TYPE_##t &&   \
index 67eeff571568d5ead2088bdcaee30097dc8d26d8..7adf4ad15d8fcfd4c90d1efbe72f2594e47a3575 100644 (file)
@@ -11,31 +11,12 @@ static const char *argv0_path;
 
 const char *system_path(const char *path)
 {
-#ifdef RUNTIME_PREFIX
-       static const char *prefix;
-#else
        static const char *prefix = PREFIX;
-#endif
        struct strbuf d = STRBUF_INIT;
 
        if (is_absolute_path(path))
                return path;
 
-#ifdef RUNTIME_PREFIX
-       assert(argv0_path);
-       assert(is_absolute_path(argv0_path));
-
-       if (!prefix &&
-           !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) &&
-           !(prefix = strip_path_suffix(argv0_path, BINDIR)) &&
-           !(prefix = strip_path_suffix(argv0_path, "perf"))) {
-               prefix = PREFIX;
-               fprintf(stderr, "RUNTIME_PREFIX requested, "
-                               "but prefix computation failed.  "
-                               "Using static fallback '%s'.\n", prefix);
-       }
-#endif
-
        strbuf_addf(&d, "%s/%s", prefix, path);
        path = strbuf_detach(&d, NULL);
        return path;
index f6a929e74981e48844b0af2d21599ff5bb725ac3..e5230c0ef95b91e5aeaff97c6efd59ad271d06d0 100644 (file)
@@ -8,6 +8,8 @@
 #include <linux/list.h>
 #include <linux/kernel.h>
 
+#include "evlist.h"
+#include "evsel.h"
 #include "util.h"
 #include "header.h"
 #include "../perf.h"
 
 static bool no_buildid_cache = false;
 
-/*
- * Create new perf.data header attribute:
- */
-struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr)
-{
-       struct perf_header_attr *self = malloc(sizeof(*self));
-
-       if (self != NULL) {
-               self->attr = *attr;
-               self->ids  = 0;
-               self->size = 1;
-               self->id   = malloc(sizeof(u64));
-               if (self->id == NULL) {
-                       free(self);
-                       self = NULL;
-               }
-       }
-
-       return self;
-}
-
-void perf_header_attr__delete(struct perf_header_attr *self)
-{
-       free(self->id);
-       free(self);
-}
-
-int perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
-{
-       int pos = self->ids;
-
-       self->ids++;
-       if (self->ids > self->size) {
-               int nsize = self->size * 2;
-               u64 *nid = realloc(self->id, nsize * sizeof(u64));
-
-               if (nid == NULL)
-                       return -1;
-
-               self->size = nsize;
-               self->id = nid;
-       }
-       self->id[pos] = id;
-       return 0;
-}
-
-int perf_header__init(struct perf_header *self)
-{
-       self->size = 1;
-       self->attr = malloc(sizeof(void *));
-       return self->attr == NULL ? -ENOMEM : 0;
-}
-
-void perf_header__exit(struct perf_header *self)
-{
-       int i;
-       for (i = 0; i < self->attrs; ++i)
-                perf_header_attr__delete(self->attr[i]);
-       free(self->attr);
-}
-
-int perf_header__add_attr(struct perf_header *self,
-                         struct perf_header_attr *attr)
-{
-       if (self->frozen)
-               return -1;
-
-       if (self->attrs == self->size) {
-               int nsize = self->size * 2;
-               struct perf_header_attr **nattr;
-
-               nattr = realloc(self->attr, nsize * sizeof(void *));
-               if (nattr == NULL)
-                       return -1;
-
-               self->size = nsize;
-               self->attr = nattr;
-       }
-
-       self->attr[self->attrs++] = attr;
-       return 0;
-}
-
 static int event_count;
 static struct perf_trace_event_type *events;
 
@@ -147,19 +66,19 @@ struct perf_file_attr {
        struct perf_file_section        ids;
 };
 
-void perf_header__set_feat(struct perf_header *self, int feat)
+void perf_header__set_feat(struct perf_header *header, int feat)
 {
-       set_bit(feat, self->adds_features);
+       set_bit(feat, header->adds_features);
 }
 
-void perf_header__clear_feat(struct perf_header *self, int feat)
+void perf_header__clear_feat(struct perf_header *header, int feat)
 {
-       clear_bit(feat, self->adds_features);
+       clear_bit(feat, header->adds_features);
 }
 
-bool perf_header__has_feat(const struct perf_header *self, int feat)
+bool perf_header__has_feat(const struct perf_header *header, int feat)
 {
-       return test_bit(feat, self->adds_features);
+       return test_bit(feat, header->adds_features);
 }
 
 static int do_write(int fd, const void *buf, size_t size)
@@ -228,22 +147,22 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
        return 0;
 }
 
-static int machine__write_buildid_table(struct machine *self, int fd)
+static int machine__write_buildid_table(struct machine *machine, int fd)
 {
        int err;
        u16 kmisc = PERF_RECORD_MISC_KERNEL,
            umisc = PERF_RECORD_MISC_USER;
 
-       if (!machine__is_host(self)) {
+       if (!machine__is_host(machine)) {
                kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
                umisc = PERF_RECORD_MISC_GUEST_USER;
        }
 
-       err = __dsos__write_buildid_table(&self->kernel_dsos, self->pid,
+       err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
                                          kmisc, fd);
        if (err == 0)
-               err = __dsos__write_buildid_table(&self->user_dsos,
-                                                 self->pid, umisc, fd);
+               err = __dsos__write_buildid_table(&machine->user_dsos,
+                                                 machine->pid, umisc, fd);
        return err;
 }
 
@@ -270,11 +189,15 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
                          const char *name, bool is_kallsyms)
 {
        const size_t size = PATH_MAX;
-       char *realname = realpath(name, NULL),
-            *filename = malloc(size),
+       char *realname, *filename = malloc(size),
             *linkname = malloc(size), *targetname;
        int len, err = -1;
 
+       if (is_kallsyms)
+               realname = (char *)name;
+       else
+               realname = realpath(name, NULL);
+
        if (realname == NULL || filename == NULL || linkname == NULL)
                goto out_free;
 
@@ -306,7 +229,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
        if (symlink(targetname, linkname) == 0)
                err = 0;
 out_free:
-       free(realname);
+       if (!is_kallsyms)
+               free(realname);
        free(filename);
        free(linkname);
        return err;
@@ -361,12 +285,12 @@ out_free:
        return err;
 }
 
-static int dso__cache_build_id(struct dso *self, const char *debugdir)
+static int dso__cache_build_id(struct dso *dso, const char *debugdir)
 {
-       bool is_kallsyms = self->kernel && self->long_name[0] != '/';
+       bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
 
-       return build_id_cache__add_b(self->build_id, sizeof(self->build_id),
-                                    self->long_name, debugdir, is_kallsyms);
+       return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
+                                    dso->long_name, debugdir, is_kallsyms);
 }
 
 static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
@@ -381,14 +305,14 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
        return err;
 }
 
-static int machine__cache_build_ids(struct machine *self, const char *debugdir)
+static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
 {
-       int ret = __dsos__cache_build_ids(&self->kernel_dsos, debugdir);
-       ret |= __dsos__cache_build_ids(&self->user_dsos, debugdir);
+       int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
+       ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
        return ret;
 }
 
-static int perf_session__cache_build_ids(struct perf_session *self)
+static int perf_session__cache_build_ids(struct perf_session *session)
 {
        struct rb_node *nd;
        int ret;
@@ -399,28 +323,28 @@ static int perf_session__cache_build_ids(struct perf_session *self)
        if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
                return -1;
 
-       ret = machine__cache_build_ids(&self->host_machine, debugdir);
+       ret = machine__cache_build_ids(&session->host_machine, debugdir);
 
-       for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
                struct machine *pos = rb_entry(nd, struct machine, rb_node);
                ret |= machine__cache_build_ids(pos, debugdir);
        }
        return ret ? -1 : 0;
 }
 
-static bool machine__read_build_ids(struct machine *self, bool with_hits)
+static bool machine__read_build_ids(struct machine *machine, bool with_hits)
 {
-       bool ret = __dsos__read_build_ids(&self->kernel_dsos, with_hits);
-       ret |= __dsos__read_build_ids(&self->user_dsos, with_hits);
+       bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
+       ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
        return ret;
 }
 
-static bool perf_session__read_build_ids(struct perf_session *self, bool with_hits)
+static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
 {
        struct rb_node *nd;
-       bool ret = machine__read_build_ids(&self->host_machine, with_hits);
+       bool ret = machine__read_build_ids(&session->host_machine, with_hits);
 
-       for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
                struct machine *pos = rb_entry(nd, struct machine, rb_node);
                ret |= machine__read_build_ids(pos, with_hits);
        }
@@ -428,7 +352,8 @@ static bool perf_session__read_build_ids(struct perf_session *self, bool with_hi
        return ret;
 }
 
-static int perf_header__adds_write(struct perf_header *self, int fd)
+static int perf_header__adds_write(struct perf_header *header,
+                                  struct perf_evlist *evlist, int fd)
 {
        int nr_sections;
        struct perf_session *session;
@@ -437,13 +362,13 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
        u64 sec_start;
        int idx = 0, err;
 
-       session = container_of(self, struct perf_session, header);
+       session = container_of(header, struct perf_session, header);
 
-       if (perf_header__has_feat(self, HEADER_BUILD_ID &&
+       if (perf_header__has_feat(header, HEADER_BUILD_ID &&
            !perf_session__read_build_ids(session, true)))
-               perf_header__clear_feat(self, HEADER_BUILD_ID);
+               perf_header__clear_feat(header, HEADER_BUILD_ID);
 
-       nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
+       nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
        if (!nr_sections)
                return 0;
 
@@ -453,28 +378,28 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
 
        sec_size = sizeof(*feat_sec) * nr_sections;
 
-       sec_start = self->data_offset + self->data_size;
+       sec_start = header->data_offset + header->data_size;
        lseek(fd, sec_start + sec_size, SEEK_SET);
 
-       if (perf_header__has_feat(self, HEADER_TRACE_INFO)) {
+       if (perf_header__has_feat(header, HEADER_TRACE_INFO)) {
                struct perf_file_section *trace_sec;
 
                trace_sec = &feat_sec[idx++];
 
                /* Write trace info */
                trace_sec->offset = lseek(fd, 0, SEEK_CUR);
-               read_tracing_data(fd, &evsel_list);
+               read_tracing_data(fd, &evlist->entries);
                trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
        }
 
-       if (perf_header__has_feat(self, HEADER_BUILD_ID)) {
+       if (perf_header__has_feat(header, HEADER_BUILD_ID)) {
                struct perf_file_section *buildid_sec;
 
                buildid_sec = &feat_sec[idx++];
 
                /* Write build-ids */
                buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
-               err = dsos__write_buildid_table(self, fd);
+               err = dsos__write_buildid_table(header, fd);
                if (err < 0) {
                        pr_debug("failed to write buildid table\n");
                        goto out_free;
@@ -513,32 +438,41 @@ int perf_header__write_pipe(int fd)
        return 0;
 }
 
-int perf_header__write(struct perf_header *self, int fd, bool at_exit)
+int perf_session__write_header(struct perf_session *session,
+                              struct perf_evlist *evlist,
+                              int fd, bool at_exit)
 {
        struct perf_file_header f_header;
        struct perf_file_attr   f_attr;
-       struct perf_header_attr *attr;
-       int i, err;
+       struct perf_header *header = &session->header;
+       struct perf_evsel *attr, *pair = NULL;
+       int err;
 
        lseek(fd, sizeof(f_header), SEEK_SET);
 
-       for (i = 0; i < self->attrs; i++) {
-               attr = self->attr[i];
+       if (session->evlist != evlist)
+               pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
 
+       list_for_each_entry(attr, &evlist->entries, node) {
                attr->id_offset = lseek(fd, 0, SEEK_CUR);
                err = do_write(fd, attr->id, attr->ids * sizeof(u64));
                if (err < 0) {
+out_err_write:
                        pr_debug("failed to write perf header\n");
                        return err;
                }
+               if (session->evlist != evlist) {
+                       err = do_write(fd, pair->id, pair->ids * sizeof(u64));
+                       if (err < 0)
+                               goto out_err_write;
+                       attr->ids += pair->ids;
+                       pair = list_entry(pair->node.next, struct perf_evsel, node);
+               }
        }
 
+       header->attr_offset = lseek(fd, 0, SEEK_CUR);
 
-       self->attr_offset = lseek(fd, 0, SEEK_CUR);
-
-       for (i = 0; i < self->attrs; i++) {
-               attr = self->attr[i];
-
+       list_for_each_entry(attr, &evlist->entries, node) {
                f_attr = (struct perf_file_attr){
                        .attr = attr->attr,
                        .ids  = {
@@ -553,20 +487,20 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
                }
        }
 
-       self->event_offset = lseek(fd, 0, SEEK_CUR);
-       self->event_size = event_count * sizeof(struct perf_trace_event_type);
+       header->event_offset = lseek(fd, 0, SEEK_CUR);
+       header->event_size = event_count * sizeof(struct perf_trace_event_type);
        if (events) {
-               err = do_write(fd, events, self->event_size);
+               err = do_write(fd, events, header->event_size);
                if (err < 0) {
                        pr_debug("failed to write perf header events\n");
                        return err;
                }
        }
 
-       self->data_offset = lseek(fd, 0, SEEK_CUR);
+       header->data_offset = lseek(fd, 0, SEEK_CUR);
 
        if (at_exit) {
-               err = perf_header__adds_write(self, fd);
+               err = perf_header__adds_write(header, evlist, fd);
                if (err < 0)
                        return err;
        }
@@ -576,20 +510,20 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
                .size      = sizeof(f_header),
                .attr_size = sizeof(f_attr),
                .attrs = {
-                       .offset = self->attr_offset,
-                       .size   = self->attrs * sizeof(f_attr),
+                       .offset = header->attr_offset,
+                       .size   = evlist->nr_entries * sizeof(f_attr),
                },
                .data = {
-                       .offset = self->data_offset,
-                       .size   = self->data_size,
+                       .offset = header->data_offset,
+                       .size   = header->data_size,
                },
                .event_types = {
-                       .offset = self->event_offset,
-                       .size   = self->event_size,
+                       .offset = header->event_offset,
+                       .size   = header->event_size,
                },
        };
 
-       memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features));
+       memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
 
        lseek(fd, 0, SEEK_SET);
        err = do_write(fd, &f_header, sizeof(f_header));
@@ -597,26 +531,26 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
                pr_debug("failed to write perf header\n");
                return err;
        }
-       lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+       lseek(fd, header->data_offset + header->data_size, SEEK_SET);
 
-       self->frozen = 1;
+       header->frozen = 1;
        return 0;
 }
 
-static int perf_header__getbuffer64(struct perf_header *self,
+static int perf_header__getbuffer64(struct perf_header *header,
                                    int fd, void *buf, size_t size)
 {
        if (readn(fd, buf, size) <= 0)
                return -1;
 
-       if (self->needs_swap)
+       if (header->needs_swap)
                mem_bswap_64(buf, size);
 
        return 0;
 }
 
-int perf_header__process_sections(struct perf_header *self, int fd,
-                                 int (*process)(struct perf_file_section *self,
+int perf_header__process_sections(struct perf_header *header, int fd,
+                                 int (*process)(struct perf_file_section *section,
                                                 struct perf_header *ph,
                                                 int feat, int fd))
 {
@@ -626,7 +560,7 @@ int perf_header__process_sections(struct perf_header *self, int fd,
        int idx = 0;
        int err = -1, feat = 1;
 
-       nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
+       nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
        if (!nr_sections)
                return 0;
 
@@ -636,17 +570,17 @@ int perf_header__process_sections(struct perf_header *self, int fd,
 
        sec_size = sizeof(*feat_sec) * nr_sections;
 
-       lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+       lseek(fd, header->data_offset + header->data_size, SEEK_SET);
 
-       if (perf_header__getbuffer64(self, fd, feat_sec, sec_size))
+       if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
                goto out_free;
 
        err = 0;
        while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
-               if (perf_header__has_feat(self, feat)) {
+               if (perf_header__has_feat(header, feat)) {
                        struct perf_file_section *sec = &feat_sec[idx++];
 
-                       err = process(sec, self, feat, fd);
+                       err = process(sec, header, feat, fd);
                        if (err < 0)
                                break;
                }
@@ -657,35 +591,35 @@ out_free:
        return err;
 }
 
-int perf_file_header__read(struct perf_file_header *self,
+int perf_file_header__read(struct perf_file_header *header,
                           struct perf_header *ph, int fd)
 {
        lseek(fd, 0, SEEK_SET);
 
-       if (readn(fd, self, sizeof(*self)) <= 0 ||
-           memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
+       if (readn(fd, header, sizeof(*header)) <= 0 ||
+           memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
                return -1;
 
-       if (self->attr_size != sizeof(struct perf_file_attr)) {
-               u64 attr_size = bswap_64(self->attr_size);
+       if (header->attr_size != sizeof(struct perf_file_attr)) {
+               u64 attr_size = bswap_64(header->attr_size);
 
                if (attr_size != sizeof(struct perf_file_attr))
                        return -1;
 
-               mem_bswap_64(self, offsetof(struct perf_file_header,
+               mem_bswap_64(header, offsetof(struct perf_file_header,
                                            adds_features));
                ph->needs_swap = true;
        }
 
-       if (self->size != sizeof(*self)) {
+       if (header->size != sizeof(*header)) {
                /* Support the previous format */
-               if (self->size == offsetof(typeof(*self), adds_features))
-                       bitmap_zero(self->adds_features, HEADER_FEAT_BITS);
+               if (header->size == offsetof(typeof(*header), adds_features))
+                       bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
                else
                        return -1;
        }
 
-       memcpy(&ph->adds_features, &self->adds_features,
+       memcpy(&ph->adds_features, &header->adds_features,
               sizeof(ph->adds_features));
        /*
         * FIXME: hack that assumes that if we need swap the perf.data file
@@ -699,10 +633,10 @@ int perf_file_header__read(struct perf_file_header *self,
                perf_header__set_feat(ph, HEADER_BUILD_ID);
        }
 
-       ph->event_offset = self->event_types.offset;
-       ph->event_size   = self->event_types.size;
-       ph->data_offset  = self->data.offset;
-       ph->data_size    = self->data.size;
+       ph->event_offset = header->event_types.offset;
+       ph->event_size   = header->event_types.size;
+       ph->data_offset  = header->data.offset;
+       ph->data_size    = header->data.size;
        return 0;
 }
 
@@ -761,11 +695,10 @@ out:
        return err;
 }
 
-static int perf_header__read_build_ids(struct perf_header *self,
-                       int input, u64 offset, u64 size)
+static int perf_header__read_build_ids(struct perf_header *header,
+                                      int input, u64 offset, u64 size)
 {
-       struct perf_session *session = container_of(self,
-                       struct perf_session, header);
+       struct perf_session *session = container_of(header, struct perf_session, header);
        struct build_id_event bev;
        char filename[PATH_MAX];
        u64 limit = offset + size;
@@ -777,7 +710,7 @@ static int perf_header__read_build_ids(struct perf_header *self,
                if (read(input, &bev, sizeof(bev)) != sizeof(bev))
                        goto out;
 
-               if (self->needs_swap)
+               if (header->needs_swap)
                        perf_event_header__bswap(&bev.header);
 
                len = bev.header.size - sizeof(bev);
@@ -793,13 +726,13 @@ out:
        return err;
 }
 
-static int perf_file_section__process(struct perf_file_section *self,
+static int perf_file_section__process(struct perf_file_section *section,
                                      struct perf_header *ph,
                                      int feat, int fd)
 {
-       if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
+       if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
                pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
-                         "%d, continuing...\n", self->offset, feat);
+                         "%d, continuing...\n", section->offset, feat);
                return 0;
        }
 
@@ -809,7 +742,7 @@ static int perf_file_section__process(struct perf_file_section *self,
                break;
 
        case HEADER_BUILD_ID:
-               if (perf_header__read_build_ids(ph, fd, self->offset, self->size))
+               if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
                        pr_debug("Failed to read buildids, continuing...\n");
                break;
        default:
@@ -819,21 +752,21 @@ static int perf_file_section__process(struct perf_file_section *self,
        return 0;
 }
 
-static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
+static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
                                       struct perf_header *ph, int fd,
                                       bool repipe)
 {
-       if (readn(fd, self, sizeof(*self)) <= 0 ||
-           memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
+       if (readn(fd, header, sizeof(*header)) <= 0 ||
+           memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
                return -1;
 
-       if (repipe && do_write(STDOUT_FILENO, self, sizeof(*self)) < 0)
+       if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
                return -1;
 
-       if (self->size != sizeof(*self)) {
-               u64 size = bswap_64(self->size);
+       if (header->size != sizeof(*header)) {
+               u64 size = bswap_64(header->size);
 
-               if (size != sizeof(*self))
+               if (size != sizeof(*header))
                        return -1;
 
                ph->needs_swap = true;
@@ -844,10 +777,10 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
 
 static int perf_header__read_pipe(struct perf_session *session, int fd)
 {
-       struct perf_header *self = &session->header;
+       struct perf_header *header = &session->header;
        struct perf_pipe_file_header f_header;
 
-       if (perf_file_header__read_pipe(&f_header, self, fd,
+       if (perf_file_header__read_pipe(&f_header, header, fd,
                                        session->repipe) < 0) {
                pr_debug("incompatible file format\n");
                return -EINVAL;
@@ -858,18 +791,22 @@ static int perf_header__read_pipe(struct perf_session *session, int fd)
        return 0;
 }
 
-int perf_header__read(struct perf_session *session, int fd)
+int perf_session__read_header(struct perf_session *session, int fd)
 {
-       struct perf_header *self = &session->header;
+       struct perf_header *header = &session->header;
        struct perf_file_header f_header;
        struct perf_file_attr   f_attr;
        u64                     f_id;
        int nr_attrs, nr_ids, i, j;
 
+       session->evlist = perf_evlist__new(NULL, NULL);
+       if (session->evlist == NULL)
+               return -ENOMEM;
+
        if (session->fd_pipe)
                return perf_header__read_pipe(session, fd);
 
-       if (perf_file_header__read(&f_header, self, fd) < 0) {
+       if (perf_file_header__read(&f_header, header, fd) < 0) {
                pr_debug("incompatible file format\n");
                return -EINVAL;
        }
@@ -878,33 +815,39 @@ int perf_header__read(struct perf_session *session, int fd)
        lseek(fd, f_header.attrs.offset, SEEK_SET);
 
        for (i = 0; i < nr_attrs; i++) {
-               struct perf_header_attr *attr;
+               struct perf_evsel *evsel;
                off_t tmp;
 
-               if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr)))
+               if (perf_header__getbuffer64(header, fd, &f_attr, sizeof(f_attr)))
                        goto out_errno;
 
                tmp = lseek(fd, 0, SEEK_CUR);
+               evsel = perf_evsel__new(&f_attr.attr, i);
 
-               attr = perf_header_attr__new(&f_attr.attr);
-               if (attr == NULL)
-                        return -ENOMEM;
+               if (evsel == NULL)
+                       goto out_delete_evlist;
+               /*
+                * Do it before so that if perf_evsel__alloc_id fails, this
+                * entry gets purged too at perf_evlist__delete().
+                */
+               perf_evlist__add(session->evlist, evsel);
 
                nr_ids = f_attr.ids.size / sizeof(u64);
+               /*
+                * We don't have the cpu and thread maps on the header, so
+                * for allocating the perf_sample_id table we fake 1 cpu and
+                * hattr->ids threads.
+                */
+               if (perf_evsel__alloc_id(evsel, 1, nr_ids))
+                       goto out_delete_evlist;
+
                lseek(fd, f_attr.ids.offset, SEEK_SET);
 
                for (j = 0; j < nr_ids; j++) {
-                       if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id)))
+                       if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
                                goto out_errno;
 
-                       if (perf_header_attr__add_id(attr, f_id) < 0) {
-                               perf_header_attr__delete(attr);
-                               return -ENOMEM;
-                       }
-               }
-               if (perf_header__add_attr(self, attr) < 0) {
-                       perf_header_attr__delete(attr);
-                       return -ENOMEM;
+                       perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
                }
 
                lseek(fd, tmp, SEEK_SET);
@@ -915,93 +858,63 @@ int perf_header__read(struct perf_session *session, int fd)
                events = malloc(f_header.event_types.size);
                if (events == NULL)
                        return -ENOMEM;
-               if (perf_header__getbuffer64(self, fd, events,
+               if (perf_header__getbuffer64(header, fd, events,
                                             f_header.event_types.size))
                        goto out_errno;
                event_count =  f_header.event_types.size / sizeof(struct perf_trace_event_type);
        }
 
-       perf_header__process_sections(self, fd, perf_file_section__process);
+       perf_header__process_sections(header, fd, perf_file_section__process);
 
-       lseek(fd, self->data_offset, SEEK_SET);
+       lseek(fd, header->data_offset, SEEK_SET);
 
-       self->frozen = 1;
+       header->frozen = 1;
        return 0;
 out_errno:
        return -errno;
+
+out_delete_evlist:
+       perf_evlist__delete(session->evlist);
+       session->evlist = NULL;
+       return -ENOMEM;
 }
 
-u64 perf_header__sample_type(struct perf_header *header)
+u64 perf_evlist__sample_type(struct perf_evlist *evlist)
 {
+       struct perf_evsel *pos;
        u64 type = 0;
-       int i;
-
-       for (i = 0; i < header->attrs; i++) {
-               struct perf_header_attr *attr = header->attr[i];
 
+       list_for_each_entry(pos, &evlist->entries, node) {
                if (!type)
-                       type = attr->attr.sample_type;
-               else if (type != attr->attr.sample_type)
+                       type = pos->attr.sample_type;
+               else if (type != pos->attr.sample_type)
                        die("non matching sample_type");
        }
 
        return type;
 }
 
-bool perf_header__sample_id_all(const struct perf_header *header)
+bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
 {
        bool value = false, first = true;
-       int i;
-
-       for (i = 0; i < header->attrs; i++) {
-               struct perf_header_attr *attr = header->attr[i];
+       struct perf_evsel *pos;
 
+       list_for_each_entry(pos, &evlist->entries, node) {
                if (first) {
-                       value = attr->attr.sample_id_all;
+                       value = pos->attr.sample_id_all;
                        first = false;
-               } else if (value != attr->attr.sample_id_all)
+               } else if (value != pos->attr.sample_id_all)
                        die("non matching sample_id_all");
        }
 
        return value;
 }
 
-struct perf_event_attr *
-perf_header__find_attr(u64 id, struct perf_header *header)
-{
-       int i;
-
-       /*
-        * We set id to -1 if the data file doesn't contain sample
-        * ids. This can happen when the data file contains one type
-        * of event and in that case, the header can still store the
-        * event attribute information. Check for this and avoid
-        * walking through the entire list of ids which may be large.
-        */
-       if (id == -1ULL) {
-               if (header->attrs > 0)
-                       return &header->attr[0]->attr;
-               return NULL;
-       }
-
-       for (i = 0; i < header->attrs; i++) {
-               struct perf_header_attr *attr = header->attr[i];
-               int j;
-
-               for (j = 0; j < attr->ids; j++) {
-                       if (attr->id[j] == id)
-                               return &attr->attr;
-               }
-       }
-
-       return NULL;
-}
-
-int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
-                          event__handler_t process,
-                          struct perf_session *session)
+int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
+                               perf_event__handler_t process,
+                               struct perf_session *session)
 {
-       event_t *ev;
+       union perf_event *ev;
        size_t size;
        int err;
 
@@ -1028,17 +941,15 @@ int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
        return err;
 }
 
-int event__synthesize_attrs(struct perf_header *self, event__handler_t process,
-                           struct perf_session *session)
+int perf_session__synthesize_attrs(struct perf_session *session,
+                                  perf_event__handler_t process)
 {
-       struct perf_header_attr *attr;
-       int i, err = 0;
-
-       for (i = 0; i < self->attrs; i++) {
-               attr = self->attr[i];
+       struct perf_evsel *attr;
+       int err = 0;
 
-               err = event__synthesize_attr(&attr->attr, attr->ids, attr->id,
-                                            process, session);
+       list_for_each_entry(attr, &session->evlist->entries, node) {
+               err = perf_event__synthesize_attr(&attr->attr, attr->ids,
+                                                 attr->id, process, session);
                if (err) {
                        pr_debug("failed to create perf header attribute\n");
                        return err;
@@ -1048,29 +959,39 @@ int event__synthesize_attrs(struct perf_header *self, event__handler_t process,
        return err;
 }
 
-int event__process_attr(event_t *self, struct perf_session *session)
+int perf_event__process_attr(union perf_event *event,
+                            struct perf_session *session)
 {
-       struct perf_header_attr *attr;
        unsigned int i, ids, n_ids;
+       struct perf_evsel *evsel;
+
+       if (session->evlist == NULL) {
+               session->evlist = perf_evlist__new(NULL, NULL);
+               if (session->evlist == NULL)
+                       return -ENOMEM;
+       }
 
-       attr = perf_header_attr__new(&self->attr.attr);
-       if (attr == NULL)
+       evsel = perf_evsel__new(&event->attr.attr,
+                               session->evlist->nr_entries);
+       if (evsel == NULL)
                return -ENOMEM;
 
-       ids = self->header.size;
-       ids -= (void *)&self->attr.id - (void *)self;
+       perf_evlist__add(session->evlist, evsel);
+
+       ids = event->header.size;
+       ids -= (void *)&event->attr.id - (void *)event;
        n_ids = ids / sizeof(u64);
+       /*
+        * We don't have the cpu and thread maps on the header, so
+        * for allocating the perf_sample_id table we fake 1 cpu and
+        * hattr->ids threads.
+        */
+       if (perf_evsel__alloc_id(evsel, 1, n_ids))
+               return -ENOMEM;
 
        for (i = 0; i < n_ids; i++) {
-               if (perf_header_attr__add_id(attr, self->attr.id[i]) < 0) {
-                       perf_header_attr__delete(attr);
-                       return -ENOMEM;
-               }
-       }
-
-       if (perf_header__add_attr(&session->header, attr) < 0) {
-               perf_header_attr__delete(attr);
-               return -ENOMEM;
+               perf_evlist__id_add(session->evlist, evsel, 0, i,
+                                   event->attr.id[i]);
        }
 
        perf_session__update_sample_type(session);
@@ -1078,11 +999,11 @@ int event__process_attr(event_t *self, struct perf_session *session)
        return 0;
 }
 
-int event__synthesize_event_type(u64 event_id, char *name,
-                                event__handler_t process,
-                                struct perf_session *session)
+int perf_event__synthesize_event_type(u64 event_id, char *name,
+                                     perf_event__handler_t process,
+                                     struct perf_session *session)
 {
-       event_t ev;
+       union perf_event ev;
        size_t size = 0;
        int err = 0;
 
@@ -1103,8 +1024,8 @@ int event__synthesize_event_type(u64 event_id, char *name,
        return err;
 }
 
-int event__synthesize_event_types(event__handler_t process,
-                                 struct perf_session *session)
+int perf_event__synthesize_event_types(perf_event__handler_t process,
+                                      struct perf_session *session)
 {
        struct perf_trace_event_type *type;
        int i, err = 0;
@@ -1112,8 +1033,9 @@ int event__synthesize_event_types(event__handler_t process,
        for (i = 0; i < event_count; i++) {
                type = &events[i];
 
-               err = event__synthesize_event_type(type->event_id, type->name,
-                                                  process, session);
+               err = perf_event__synthesize_event_type(type->event_id,
+                                                       type->name, process,
+                                                       session);
                if (err) {
                        pr_debug("failed to create perf header event type\n");
                        return err;
@@ -1123,28 +1045,28 @@ int event__synthesize_event_types(event__handler_t process,
        return err;
 }
 
-int event__process_event_type(event_t *self,
-                             struct perf_session *session __unused)
+int perf_event__process_event_type(union perf_event *event,
+                                  struct perf_session *session __unused)
 {
-       if (perf_header__push_event(self->event_type.event_type.event_id,
-                                   self->event_type.event_type.name) < 0)
+       if (perf_header__push_event(event->event_type.event_type.event_id,
+                                   event->event_type.event_type.name) < 0)
                return -ENOMEM;
 
        return 0;
 }
 
-int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
-                                  event__handler_t process,
+int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
+                                        perf_event__handler_t process,
                                   struct perf_session *session __unused)
 {
-       event_t ev;
+       union perf_event ev;
        ssize_t size = 0, aligned_size = 0, padding;
-       int err = 0;
+       int err __used = 0;
 
        memset(&ev, 0, sizeof(ev));
 
        ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
-       size = read_tracing_data_size(fd, pattrs);
+       size = read_tracing_data_size(fd, &evlist->entries);
        if (size <= 0)
                return size;
        aligned_size = ALIGN(size, sizeof(u64));
@@ -1154,16 +1076,16 @@ int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
 
        process(&ev, NULL, session);
 
-       err = read_tracing_data(fd, pattrs);
+       err = read_tracing_data(fd, &evlist->entries);
        write_padded(fd, NULL, 0, padding);
 
        return aligned_size;
 }
 
-int event__process_tracing_data(event_t *self,
-                               struct perf_session *session)
+int perf_event__process_tracing_data(union perf_event *event,
+                                    struct perf_session *session)
 {
-       ssize_t size_read, padding, size = self->tracing_data.size;
+       ssize_t size_read, padding, size = event->tracing_data.size;
        off_t offset = lseek(session->fd, 0, SEEK_CUR);
        char buf[BUFSIZ];
 
@@ -1189,12 +1111,12 @@ int event__process_tracing_data(event_t *self,
        return size_read + padding;
 }
 
-int event__synthesize_build_id(struct dso *pos, u16 misc,
-                              event__handler_t process,
-                              struct machine *machine,
-                              struct perf_session *session)
+int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
+                                   perf_event__handler_t process,
+                                   struct machine *machine,
+                                   struct perf_session *session)
 {
-       event_t ev;
+       union perf_event ev;
        size_t len;
        int err = 0;
 
@@ -1217,11 +1139,11 @@ int event__synthesize_build_id(struct dso *pos, u16 misc,
        return err;
 }
 
-int event__process_build_id(event_t *self,
-                           struct perf_session *session)
+int perf_event__process_build_id(union perf_event *event,
+                                struct perf_session *session)
 {
-       __event_process_build_id(&self->build_id,
-                                self->build_id.filename,
+       __event_process_build_id(&event->build_id,
+                                event->build_id.filename,
                                 session);
        return 0;
 }
index 33f16be7b72fdad5a3757acbfbceb52b3966c593..456661d7f10e7a2594699ae005e451c25914b21c 100644 (file)
@@ -9,13 +9,6 @@
 
 #include <linux/bitmap.h>
 
-struct perf_header_attr {
-       struct perf_event_attr attr;
-       int ids, size;
-       u64 *id;
-       off_t id_offset;
-};
-
 enum {
        HEADER_TRACE_INFO = 1,
        HEADER_BUILD_ID,
@@ -46,14 +39,12 @@ struct perf_pipe_file_header {
 
 struct perf_header;
 
-int perf_file_header__read(struct perf_file_header *self,
+int perf_file_header__read(struct perf_file_header *header,
                           struct perf_header *ph, int fd);
 
 struct perf_header {
        int                     frozen;
-       int                     attrs, size;
        bool                    needs_swap;
-       struct perf_header_attr **attr;
        s64                     attr_offset;
        u64                     data_offset;
        u64                     data_size;
@@ -62,34 +53,25 @@ struct perf_header {
        DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
 };
 
-int perf_header__init(struct perf_header *self);
-void perf_header__exit(struct perf_header *self);
+struct perf_evlist;
 
-int perf_header__read(struct perf_session *session, int fd);
-int perf_header__write(struct perf_header *self, int fd, bool at_exit);
+int perf_session__read_header(struct perf_session *session, int fd);
+int perf_session__write_header(struct perf_session *session,
+                              struct perf_evlist *evlist,
+                              int fd, bool at_exit);
 int perf_header__write_pipe(int fd);
 
-int perf_header__add_attr(struct perf_header *self,
-                         struct perf_header_attr *attr);
-
 int perf_header__push_event(u64 id, const char *name);
 char *perf_header__find_event(u64 id);
 
-struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr);
-void perf_header_attr__delete(struct perf_header_attr *self);
+u64 perf_evlist__sample_type(struct perf_evlist *evlist);
+bool perf_evlist__sample_id_all(const struct perf_evlist *evlist);
+void perf_header__set_feat(struct perf_header *header, int feat);
+void perf_header__clear_feat(struct perf_header *header, int feat);
+bool perf_header__has_feat(const struct perf_header *header, int feat);
 
-int perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
-
-u64 perf_header__sample_type(struct perf_header *header);
-bool perf_header__sample_id_all(const struct perf_header *header);
-struct perf_event_attr *
-perf_header__find_attr(u64 id, struct perf_header *header);
-void perf_header__set_feat(struct perf_header *self, int feat);
-void perf_header__clear_feat(struct perf_header *self, int feat);
-bool perf_header__has_feat(const struct perf_header *self, int feat);
-
-int perf_header__process_sections(struct perf_header *self, int fd,
-                                 int (*process)(struct perf_file_section *self,
+int perf_header__process_sections(struct perf_header *header, int fd,
+                                 int (*process)(struct perf_file_section *section,
                                                 struct perf_header *ph,
                                                 int feat, int fd));
 
@@ -97,32 +79,31 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
                          const char *name, bool is_kallsyms);
 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
 
-int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
-                          event__handler_t process,
-                          struct perf_session *session);
-int event__synthesize_attrs(struct perf_header *self,
-                           event__handler_t process,
-                           struct perf_session *session);
-int event__process_attr(event_t *self, struct perf_session *session);
-
-int event__synthesize_event_type(u64 event_id, char *name,
-                                event__handler_t process,
-                                struct perf_session *session);
-int event__synthesize_event_types(event__handler_t process,
-                                 struct perf_session *session);
-int event__process_event_type(event_t *self,
-                             struct perf_session *session);
-
-int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
-                                  event__handler_t process,
-                                  struct perf_session *session);
-int event__process_tracing_data(event_t *self,
+int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
+                               perf_event__handler_t process,
                                struct perf_session *session);
+int perf_session__synthesize_attrs(struct perf_session *session,
+                                  perf_event__handler_t process);
+int perf_event__process_attr(union perf_event *event, struct perf_session *session);
+
+int perf_event__synthesize_event_type(u64 event_id, char *name,
+                                     perf_event__handler_t process,
+                                     struct perf_session *session);
+int perf_event__synthesize_event_types(perf_event__handler_t process,
+                                      struct perf_session *session);
+int perf_event__process_event_type(union perf_event *event,
+                                  struct perf_session *session);
 
-int event__synthesize_build_id(struct dso *pos, u16 misc,
-                              event__handler_t process,
-                              struct machine *machine,
-                              struct perf_session *session);
-int event__process_build_id(event_t *self, struct perf_session *session);
-
+int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
+                                       perf_event__handler_t process,
+                                       struct perf_session *session);
+int perf_event__process_tracing_data(union perf_event *event,
+                                    struct perf_session *session);
+
+int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
+                                   perf_event__handler_t process,
+                                   struct machine *machine,
+                                   struct perf_session *session);
+int perf_event__process_build_id(union perf_event *event,
+                                struct perf_session *session);
 #endif /* __PERF_HEADER_H */
index 32f4f1f2f6e4410c5e94d25fd8a7113d2676f493..627a02e03c57ab381d97cf2be9756997851d752d 100644 (file)
@@ -1,3 +1,4 @@
+#include "annotate.h"
 #include "util.h"
 #include "build-id.h"
 #include "hist.h"
@@ -49,6 +50,15 @@ static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
 
        if (h->ms.sym)
                hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
+       else {
+               const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
+
+               if (hists__col_len(self, HISTC_DSO) < unresolved_col_width &&
+                   !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
+                   !symbol_conf.dso_list)
+                       hists__set_col_len(self, HISTC_DSO,
+                                          unresolved_col_width);
+       }
 
        len = thread__comm_len(h->thread);
        if (hists__new_col_len(self, HISTC_COMM, len))
@@ -211,7 +221,9 @@ void hist_entry__free(struct hist_entry *he)
  * collapse the histogram
  */
 
-static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
+static bool hists__collapse_insert_entry(struct hists *self,
+                                        struct rb_root *root,
+                                        struct hist_entry *he)
 {
        struct rb_node **p = &root->rb_node;
        struct rb_node *parent = NULL;
@@ -226,8 +238,11 @@ static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
 
                if (!cmp) {
                        iter->period += he->period;
-                       if (symbol_conf.use_callchain)
-                               callchain_merge(iter->callchain, he->callchain);
+                       if (symbol_conf.use_callchain) {
+                               callchain_cursor_reset(&self->callchain_cursor);
+                               callchain_merge(&self->callchain_cursor, iter->callchain,
+                                               he->callchain);
+                       }
                        hist_entry__free(he);
                        return false;
                }
@@ -262,7 +277,7 @@ void hists__collapse_resort(struct hists *self)
                next = rb_next(&n->rb_node);
 
                rb_erase(&n->rb_node, &self->entries);
-               if (collapse__insert_entry(&tmp, n))
+               if (hists__collapse_insert_entry(self, &tmp, n))
                        hists__inc_nr_entries(self, n);
        }
 
@@ -425,7 +440,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
                u64 cumul;
 
                child = rb_entry(node, struct callchain_node, rb_node);
-               cumul = cumul_hits(child);
+               cumul = callchain_cumul_hits(child);
                remaining -= cumul;
 
                /*
@@ -585,6 +600,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
 {
        struct sort_entry *se;
        u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
+       u64 nr_events;
        const char *sep = symbol_conf.field_sep;
        int ret;
 
@@ -593,6 +609,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
 
        if (pair_hists) {
                period = self->pair ? self->pair->period : 0;
+               nr_events = self->pair ? self->pair->nr_events : 0;
                total = pair_hists->stats.total_period;
                period_sys = self->pair ? self->pair->period_sys : 0;
                period_us = self->pair ? self->pair->period_us : 0;
@@ -600,6 +617,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
                period_guest_us = self->pair ? self->pair->period_guest_us : 0;
        } else {
                period = self->period;
+               nr_events = self->nr_events;
                total = session_total;
                period_sys = self->period_sys;
                period_us = self->period_us;
@@ -640,9 +658,9 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
 
        if (symbol_conf.show_nr_samples) {
                if (sep)
-                       ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
+                       ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
                else
-                       ret += snprintf(s + ret, size - ret, "%11" PRIu64, period);
+                       ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
        }
 
        if (pair_hists) {
@@ -944,225 +962,14 @@ void hists__filter_by_thread(struct hists *self, const struct thread *thread)
        }
 }
 
-static int symbol__alloc_hist(struct symbol *self)
-{
-       struct sym_priv *priv = symbol__priv(self);
-       const int size = (sizeof(*priv->hist) +
-                         (self->end - self->start) * sizeof(u64));
-
-       priv->hist = zalloc(size);
-       return priv->hist == NULL ? -1 : 0;
-}
-
-int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
-{
-       unsigned int sym_size, offset;
-       struct symbol *sym = self->ms.sym;
-       struct sym_priv *priv;
-       struct sym_hist *h;
-
-       if (!sym || !self->ms.map)
-               return 0;
-
-       priv = symbol__priv(sym);
-       if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
-               return -ENOMEM;
-
-       sym_size = sym->end - sym->start;
-       offset = ip - sym->start;
-
-       pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
-
-       if (offset >= sym_size)
-               return 0;
-
-       h = priv->hist;
-       h->sum++;
-       h->ip[offset]++;
-
-       pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64
-                 "] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name,
-                 ip, ip - self->ms.sym->start, h->ip[offset]);
-       return 0;
-}
-
-static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
-{
-       struct objdump_line *self = malloc(sizeof(*self) + privsize);
-
-       if (self != NULL) {
-               self->offset = offset;
-               self->line = line;
-       }
-
-       return self;
-}
-
-void objdump_line__free(struct objdump_line *self)
-{
-       free(self->line);
-       free(self);
-}
-
-static void objdump__add_line(struct list_head *head, struct objdump_line *line)
-{
-       list_add_tail(&line->node, head);
-}
-
-struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
-                                              struct objdump_line *pos)
-{
-       list_for_each_entry_continue(pos, head, node)
-               if (pos->offset >= 0)
-                       return pos;
-
-       return NULL;
-}
-
-static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
-                                         struct list_head *head, size_t privsize)
+int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
 {
-       struct symbol *sym = self->ms.sym;
-       struct objdump_line *objdump_line;
-       char *line = NULL, *tmp, *tmp2, *c;
-       size_t line_len;
-       s64 line_ip, offset = -1;
-
-       if (getline(&line, &line_len, file) < 0)
-               return -1;
-
-       if (!line)
-               return -1;
-
-       while (line_len != 0 && isspace(line[line_len - 1]))
-               line[--line_len] = '\0';
-
-       c = strchr(line, '\n');
-       if (c)
-               *c = 0;
-
-       line_ip = -1;
-
-       /*
-        * Strip leading spaces:
-        */
-       tmp = line;
-       while (*tmp) {
-               if (*tmp != ' ')
-                       break;
-               tmp++;
-       }
-
-       if (*tmp) {
-               /*
-                * Parse hexa addresses followed by ':'
-                */
-               line_ip = strtoull(tmp, &tmp2, 16);
-               if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
-                       line_ip = -1;
-       }
-
-       if (line_ip != -1) {
-               u64 start = map__rip_2objdump(self->ms.map, sym->start),
-                   end = map__rip_2objdump(self->ms.map, sym->end);
-
-               offset = line_ip - start;
-               if (offset < 0 || (u64)line_ip > end)
-                       offset = -1;
-       }
-
-       objdump_line = objdump_line__new(offset, line, privsize);
-       if (objdump_line == NULL) {
-               free(line);
-               return -1;
-       }
-       objdump__add_line(head, objdump_line);
-
-       return 0;
+       return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
 }
 
-int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
-                        size_t privsize)
+int hist_entry__annotate(struct hist_entry *he, size_t privsize)
 {
-       struct symbol *sym = self->ms.sym;
-       struct map *map = self->ms.map;
-       struct dso *dso = map->dso;
-       char *filename = dso__build_id_filename(dso, NULL, 0);
-       bool free_filename = true;
-       char command[PATH_MAX * 2];
-       FILE *file;
-       int err = 0;
-       u64 len;
-       char symfs_filename[PATH_MAX];
-
-       if (filename) {
-               snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
-                        symbol_conf.symfs, filename);
-       }
-
-       if (filename == NULL) {
-               if (dso->has_build_id) {
-                       pr_err("Can't annotate %s: not enough memory\n",
-                              sym->name);
-                       return -ENOMEM;
-               }
-               goto fallback;
-       } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
-                  strstr(command, "[kernel.kallsyms]") ||
-                  access(symfs_filename, R_OK)) {
-               free(filename);
-fallback:
-               /*
-                * If we don't have build-ids or the build-id file isn't in the
-                * cache, or is just a kallsyms file, well, lets hope that this
-                * DSO is the same as when 'perf record' ran.
-                */
-               filename = dso->long_name;
-               snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
-                        symbol_conf.symfs, filename);
-               free_filename = false;
-       }
-
-       if (dso->origin == DSO__ORIG_KERNEL) {
-               if (dso->annotate_warned)
-                       goto out_free_filename;
-               err = -ENOENT;
-               dso->annotate_warned = 1;
-               pr_err("Can't annotate %s: No vmlinux file was found in the "
-                      "path\n", sym->name);
-               goto out_free_filename;
-       }
-
-       pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
-                filename, sym->name, map->unmap_ip(map, sym->start),
-                map->unmap_ip(map, sym->end));
-
-       len = sym->end - sym->start;
-
-       pr_debug("annotating [%p] %30s : [%p] %30s\n",
-                dso, dso->long_name, sym, sym->name);
-
-       snprintf(command, sizeof(command),
-                "objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
-                map__rip_2objdump(map, sym->start),
-                map__rip_2objdump(map, sym->end),
-                symfs_filename, filename);
-
-       pr_debug("Executing: %s\n", command);
-
-       file = popen(command, "r");
-       if (!file)
-               goto out_free_filename;
-
-       while (!feof(file))
-               if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0)
-                       break;
-
-       pclose(file);
-out_free_filename:
-       if (free_filename)
-               free(filename);
-       return err;
+       return symbol__annotate(he->ms.sym, he->ms.map, privsize);
 }
 
 void hists__inc_nr_events(struct hists *self, u32 type)
@@ -1177,8 +984,12 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
        size_t ret = 0;
 
        for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
-               const char *name = event__get_event_name(i);
+               const char *name;
+
+               if (self->stats.nr_events[i] == 0)
+                       continue;
 
+               name = perf_event__name(i);
                if (!strcmp(name, "UNKNOWN"))
                        continue;
 
index ee789856a8c94644e189f0dc8a7be7933469a6cb..cb6858a2f9a35df35f6dff6943360f2e76499c5d 100644 (file)
@@ -9,33 +9,6 @@ extern struct callchain_param callchain_param;
 struct hist_entry;
 struct addr_location;
 struct symbol;
-struct rb_root;
-
-struct objdump_line {
-       struct list_head node;
-       s64              offset;
-       char             *line;
-};
-
-void objdump_line__free(struct objdump_line *self);
-struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
-                                              struct objdump_line *pos);
-
-struct sym_hist {
-       u64             sum;
-       u64             ip[0];
-};
-
-struct sym_ext {
-       struct rb_node  node;
-       double          percent;
-       char            *path;
-};
-
-struct sym_priv {
-       struct sym_hist *hist;
-       struct sym_ext  *ext;
-};
 
 /*
  * The kernel collects the number of events it couldn't send in a stretch and
@@ -69,14 +42,13 @@ enum hist_column {
 };
 
 struct hists {
-       struct rb_node          rb_node;
        struct rb_root          entries;
        u64                     nr_entries;
        struct events_stats     stats;
-       u64                     config;
        u64                     event_stream;
-       u32                     type;
        u16                     col_len[HISTC_NR_COLS];
+       /* Best would be to reuse the session callchain cursor */
+       struct callchain_cursor callchain_cursor;
 };
 
 struct hist_entry *__hists__add_entry(struct hists *self,
@@ -102,9 +74,8 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
 size_t hists__fprintf(struct hists *self, struct hists *pair,
                      bool show_displacement, FILE *fp);
 
-int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip);
-int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
-                        size_t privsize);
+int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr);
+int hist_entry__annotate(struct hist_entry *self, size_t privsize);
 
 void hists__filter_by_dso(struct hists *self, const struct dso *dso);
 void hists__filter_by_thread(struct hists *self, const struct thread *thread);
@@ -113,21 +84,18 @@ u16 hists__col_len(struct hists *self, enum hist_column col);
 void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
 bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len);
 
-#ifdef NO_NEWT_SUPPORT
-static inline int hists__browse(struct hists *self __used,
-                               const char *helpline __used,
-                               const char *ev_name __used)
-{
-       return 0;
-}
+struct perf_evlist;
 
-static inline int hists__tui_browse_tree(struct rb_root *self __used,
-                                        const char *help __used)
+#ifdef NO_NEWT_SUPPORT
+static inline
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used,
+                                 const char *help __used)
 {
        return 0;
 }
 
-static inline int hist_entry__tui_annotate(struct hist_entry *self __used)
+static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
+                                          int evidx __used)
 {
        return 0;
 }
@@ -135,14 +103,12 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used)
 #define KEY_RIGHT -2
 #else
 #include <newt.h>
-int hists__browse(struct hists *self, const char *helpline,
-                 const char *ev_name);
-int hist_entry__tui_annotate(struct hist_entry *self);
+int hist_entry__tui_annotate(struct hist_entry *self, int evidx);
 
 #define KEY_LEFT NEWT_KEY_LEFT
 #define KEY_RIGHT NEWT_KEY_RIGHT
 
-int hists__tui_browse_tree(struct rb_root *self, const char *help);
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help);
 #endif
 
 unsigned int hists__sort_list_width(struct hists *self);
index f5ca26e53fbbad820623472a052fa848a9311c0b..356c7e467b83fd59af5cd191a577197ee88349b8 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
 #include "../../../../include/linux/list.h"
 
 #ifndef PERF_LIST_H
index 135f69baf966d8a2a2525c6f9478958cf2932bf2..54a7e2634d582812546d1ed1c85a0e80313019cd 100644 (file)
@@ -1,6 +1,7 @@
 #include "../../../include/linux/hw_breakpoint.h"
 #include "util.h"
 #include "../perf.h"
+#include "evlist.h"
 #include "evsel.h"
 #include "parse-options.h"
 #include "parse-events.h"
 #include "header.h"
 #include "debugfs.h"
 
-int                            nr_counters;
-
-LIST_HEAD(evsel_list);
-
 struct event_symbol {
        u8              type;
        u64             config;
@@ -271,6 +268,9 @@ const char *event_name(struct perf_evsel *evsel)
        u64 config = evsel->attr.config;
        int type = evsel->attr.type;
 
+       if (evsel->name)
+               return evsel->name;
+
        return __event_name(type, config);
 }
 
@@ -449,8 +449,8 @@ parse_single_tracepoint_event(char *sys_name,
 /* sys + ':' + event + ':' + flags*/
 #define MAX_EVOPT_LEN  (MAX_EVENT_LENGTH * 2 + 2 + 128)
 static enum event_result
-parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
-                               char *flags)
+parse_multiple_tracepoint_event(const struct option *opt, char *sys_name,
+                               const char *evt_exp, char *flags)
 {
        char evt_path[MAXPATHLEN];
        struct dirent *evt_ent;
@@ -483,15 +483,16 @@ parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
                if (len < 0)
                        return EVT_FAILED;
 
-               if (parse_events(NULL, event_opt, 0))
+               if (parse_events(opt, event_opt, 0))
                        return EVT_FAILED;
        }
 
        return EVT_HANDLED_ALL;
 }
 
-static enum event_result parse_tracepoint_event(const char **strp,
-                                   struct perf_event_attr *attr)
+static enum event_result
+parse_tracepoint_event(const struct option *opt, const char **strp,
+                      struct perf_event_attr *attr)
 {
        const char *evt_name;
        char *flags = NULL, *comma_loc;
@@ -530,7 +531,7 @@ static enum event_result parse_tracepoint_event(const char **strp,
                return EVT_FAILED;
        if (strpbrk(evt_name, "*?")) {
                *strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */
-               return parse_multiple_tracepoint_event(sys_name, evt_name,
+               return parse_multiple_tracepoint_event(opt, sys_name, evt_name,
                                                       flags);
        } else {
                return parse_single_tracepoint_event(sys_name, evt_name,
@@ -740,11 +741,12 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
  * Symbolic names are (almost) exactly matched.
  */
 static enum event_result
-parse_event_symbols(const char **str, struct perf_event_attr *attr)
+parse_event_symbols(const struct option *opt, const char **str,
+                   struct perf_event_attr *attr)
 {
        enum event_result ret;
 
-       ret = parse_tracepoint_event(str, attr);
+       ret = parse_tracepoint_event(opt, str, attr);
        if (ret != EVT_FAILED)
                goto modifier;
 
@@ -778,14 +780,17 @@ modifier:
        return ret;
 }
 
-int parse_events(const struct option *opt __used, const char *str, int unset __used)
+int parse_events(const struct option *opt, const char *str, int unset __used)
 {
+       struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
        struct perf_event_attr attr;
        enum event_result ret;
+       const char *ostr;
 
        for (;;) {
+               ostr = str;
                memset(&attr, 0, sizeof(attr));
-               ret = parse_event_symbols(&str, &attr);
+               ret = parse_event_symbols(opt, &str, &attr);
                if (ret == EVT_FAILED)
                        return -1;
 
@@ -794,12 +799,15 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
 
                if (ret != EVT_HANDLED_ALL) {
                        struct perf_evsel *evsel;
-                       evsel = perf_evsel__new(&attr,
-                                               nr_counters);
+                       evsel = perf_evsel__new(&attr, evlist->nr_entries);
                        if (evsel == NULL)
                                return -1;
-                       list_add_tail(&evsel->node, &evsel_list);
-                       ++nr_counters;
+                       perf_evlist__add(evlist, evsel);
+
+                       evsel->name = calloc(str - ostr + 1, 1);
+                       if (!evsel->name)
+                               return -1;
+                       strncpy(evsel->name, ostr, str - ostr);
                }
 
                if (*str == 0)
@@ -813,13 +821,14 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
        return 0;
 }
 
-int parse_filter(const struct option *opt __used, const char *str,
+int parse_filter(const struct option *opt, const char *str,
                 int unset __used)
 {
+       struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
        struct perf_evsel *last = NULL;
 
-       if (!list_empty(&evsel_list))
-               last = list_entry(evsel_list.prev, struct perf_evsel, node);
+       if (evlist->nr_entries > 0)
+               last = list_entry(evlist->entries.prev, struct perf_evsel, node);
 
        if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
                fprintf(stderr,
@@ -849,7 +858,7 @@ static const char * const event_type_descriptors[] = {
  * Print the events from <debugfs_mount_point>/tracing/events
  */
 
-static void print_tracepoint_events(void)
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
 {
        DIR *sys_dir, *evt_dir;
        struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
@@ -864,6 +873,9 @@ static void print_tracepoint_events(void)
                return;
 
        for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+               if (subsys_glob != NULL && 
+                   !strglobmatch(sys_dirent.d_name, subsys_glob))
+                       continue;
 
                snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
                         sys_dirent.d_name);
@@ -872,6 +884,10 @@ static void print_tracepoint_events(void)
                        continue;
 
                for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+                       if (event_glob != NULL && 
+                           !strglobmatch(evt_dirent.d_name, event_glob))
+                               continue;
+
                        snprintf(evt_path, MAXPATHLEN, "%s:%s",
                                 sys_dirent.d_name, evt_dirent.d_name);
                        printf("  %-42s [%s]\n", evt_path,
@@ -923,13 +939,61 @@ int is_valid_tracepoint(const char *event_string)
        return 0;
 }
 
+void print_events_type(u8 type)
+{
+       struct event_symbol *syms = event_symbols;
+       unsigned int i;
+       char name[64];
+
+       for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
+               if (type != syms->type)
+                       continue;
+
+               if (strlen(syms->alias))
+                       snprintf(name, sizeof(name),  "%s OR %s",
+                                syms->symbol, syms->alias);
+               else
+                       snprintf(name, sizeof(name), "%s", syms->symbol);
+
+               printf("  %-42s [%s]\n", name,
+                       event_type_descriptors[type]);
+       }
+}
+
+int print_hwcache_events(const char *event_glob)
+{
+       unsigned int type, op, i, printed = 0;
+
+       for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+               for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+                       /* skip invalid cache type */
+                       if (!is_cache_op_valid(type, op))
+                               continue;
+
+                       for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+                               char *name = event_cache_name(type, op, i);
+
+                               if (event_glob != NULL && 
+                                   !strglobmatch(name, event_glob))
+                                       continue;
+
+                               printf("  %-42s [%s]\n", name,
+                                       event_type_descriptors[PERF_TYPE_HW_CACHE]);
+                               ++printed;
+                       }
+               }
+       }
+
+       return printed;
+}
+
 /*
  * Print the help text for the event symbols:
  */
-void print_events(void)
+void print_events(const char *event_glob)
 {
        struct event_symbol *syms = event_symbols;
-       unsigned int i, type, op, prev_type = -1;
+       unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0;
        char name[40];
 
        printf("\n");
@@ -938,8 +1002,16 @@ void print_events(void)
        for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
                type = syms->type;
 
-               if (type != prev_type)
+               if (type != prev_type && printed) {
                        printf("\n");
+                       printed = 0;
+                       ntypes_printed++;
+               }
+
+               if (event_glob != NULL && 
+                   !(strglobmatch(syms->symbol, event_glob) ||
+                     (syms->alias && strglobmatch(syms->alias, event_glob))))
+                       continue;
 
                if (strlen(syms->alias))
                        sprintf(name, "%s OR %s", syms->symbol, syms->alias);
@@ -949,22 +1021,17 @@ void print_events(void)
                        event_type_descriptors[type]);
 
                prev_type = type;
+               ++printed;
        }
 
-       printf("\n");
-       for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
-               for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
-                       /* skip invalid cache type */
-                       if (!is_cache_op_valid(type, op))
-                               continue;
-
-                       for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
-                               printf("  %-42s [%s]\n",
-                                       event_cache_name(type, op, i),
-                                       event_type_descriptors[PERF_TYPE_HW_CACHE]);
-                       }
-               }
+       if (ntypes_printed) {
+               printed = 0;
+               printf("\n");
        }
+       print_hwcache_events(event_glob);
+
+       if (event_glob != NULL)
+               return;
 
        printf("\n");
        printf("  %-42s [%s]\n",
@@ -977,37 +1044,7 @@ void print_events(void)
                        event_type_descriptors[PERF_TYPE_BREAKPOINT]);
        printf("\n");
 
-       print_tracepoint_events();
+       print_tracepoint_events(NULL, NULL);
 
        exit(129);
 }
-
-int perf_evsel_list__create_default(void)
-{
-       struct perf_evsel *evsel;
-       struct perf_event_attr attr;
-
-       memset(&attr, 0, sizeof(attr));
-       attr.type = PERF_TYPE_HARDWARE;
-       attr.config = PERF_COUNT_HW_CPU_CYCLES;
-
-       evsel = perf_evsel__new(&attr, 0);
-
-       if (evsel == NULL)
-               return -ENOMEM;
-
-       list_add(&evsel->node, &evsel_list);
-       ++nr_counters;
-       return 0;
-}
-
-void perf_evsel_list__delete(void)
-{
-       struct perf_evsel *pos, *n;
-
-       list_for_each_entry_safe(pos, n, &evsel_list, node) {
-               list_del_init(&pos->node);
-               perf_evsel__delete(pos);
-       }
-       nr_counters = 0;
-}
index 458e3ecf17af9a829dd0cd82be8044221988af88..212f88e07a9cf33da85fa5157524dec04dc766b0 100644 (file)
@@ -9,11 +9,6 @@
 struct list_head;
 struct perf_evsel;
 
-extern struct list_head evsel_list;
-
-int perf_evsel_list__create_default(void);
-void perf_evsel_list__delete(void);
-
 struct option;
 
 struct tracepoint_path {
@@ -25,8 +20,6 @@ struct tracepoint_path {
 extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
 extern bool have_tracepoints(struct list_head *evlist);
 
-extern int                     nr_counters;
-
 const char *event_name(struct perf_evsel *event);
 extern const char *__event_name(int type, u64 config);
 
@@ -35,7 +28,10 @@ extern int parse_filter(const struct option *opt, const char *str, int unset);
 
 #define EVENTS_HELP_MAX (128*1024)
 
-extern void print_events(void);
+void print_events(const char *event_glob);
+void print_events_type(u8 type);
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
+int print_hwcache_events(const char *event_glob);
 extern int is_valid_tracepoint(const char *event_string);
 
 extern char debugfs_path[];
index 6e29d9c9dcccb50f63adac097bc512b56fb3e28c..5ddee66020a70c8b74f4b53caf3aa002f56f3d75 100644 (file)
@@ -31,6 +31,7 @@
 #include <string.h>
 #include <stdarg.h>
 #include <limits.h>
+#include <elf.h>
 
 #undef _GNU_SOURCE
 #include "util.h"
@@ -111,7 +112,25 @@ static struct symbol *__find_kernel_function_by_name(const char *name,
                                                     NULL);
 }
 
-const char *kernel_get_module_path(const char *module)
+static struct map *kernel_get_module_map(const char *module)
+{
+       struct rb_node *nd;
+       struct map_groups *grp = &machine.kmaps;
+
+       if (!module)
+               module = "kernel";
+
+       for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
+               struct map *pos = rb_entry(nd, struct map, rb_node);
+               if (strncmp(pos->dso->short_name + 1, module,
+                           pos->dso->short_name_len - 2) == 0) {
+                       return pos;
+               }
+       }
+       return NULL;
+}
+
+static struct dso *kernel_get_module_dso(const char *module)
 {
        struct dso *dso;
        struct map *map;
@@ -141,7 +160,13 @@ const char *kernel_get_module_path(const char *module)
                }
        }
 found:
-       return dso->long_name;
+       return dso;
+}
+
+const char *kernel_get_module_path(const char *module)
+{
+       struct dso *dso = kernel_get_module_dso(module);
+       return (dso) ? dso->long_name : NULL;
 }
 
 #ifdef DWARF_SUPPORT
@@ -384,7 +409,7 @@ int show_line_range(struct line_range *lr, const char *module)
        setup_pager();
 
        if (lr->function)
-               fprintf(stdout, "<%s:%d>\n", lr->function,
+               fprintf(stdout, "<%s@%s:%d>\n", lr->function, lr->path,
                        lr->start - lr->offset);
        else
                fprintf(stdout, "<%s:%d>\n", lr->path, lr->start);
@@ -426,12 +451,14 @@ end:
 }
 
 static int show_available_vars_at(int fd, struct perf_probe_event *pev,
-                                 int max_vls, bool externs)
+                                 int max_vls, struct strfilter *_filter,
+                                 bool externs)
 {
        char *buf;
-       int ret, i;
+       int ret, i, nvars;
        struct str_node *node;
        struct variable_list *vls = NULL, *vl;
+       const char *var;
 
        buf = synthesize_perf_probe_point(&pev->point);
        if (!buf)
@@ -439,36 +466,45 @@ static int show_available_vars_at(int fd, struct perf_probe_event *pev,
        pr_debug("Searching variables at %s\n", buf);
 
        ret = find_available_vars_at(fd, pev, &vls, max_vls, externs);
-       if (ret > 0) {
-               /* Some variables were found */
-               fprintf(stdout, "Available variables at %s\n", buf);
-               for (i = 0; i < ret; i++) {
-                       vl = &vls[i];
-                       /*
-                        * A probe point might be converted to
-                        * several trace points.
-                        */
-                       fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
-                               vl->point.offset);
-                       free(vl->point.symbol);
-                       if (vl->vars) {
-                               strlist__for_each(node, vl->vars)
+       if (ret <= 0) {
+               pr_err("Failed to find variables at %s (%d)\n", buf, ret);
+               goto end;
+       }
+       /* Some variables are found */
+       fprintf(stdout, "Available variables at %s\n", buf);
+       for (i = 0; i < ret; i++) {
+               vl = &vls[i];
+               /*
+                * A probe point might be converted to
+                * several trace points.
+                */
+               fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
+                       vl->point.offset);
+               free(vl->point.symbol);
+               nvars = 0;
+               if (vl->vars) {
+                       strlist__for_each(node, vl->vars) {
+                               var = strchr(node->s, '\t') + 1;
+                               if (strfilter__compare(_filter, var)) {
                                        fprintf(stdout, "\t\t%s\n", node->s);
-                               strlist__delete(vl->vars);
-                       } else
-                               fprintf(stdout, "(No variables)\n");
+                                       nvars++;
+                               }
+                       }
+                       strlist__delete(vl->vars);
                }
-               free(vls);
-       } else
-               pr_err("Failed to find variables at %s (%d)\n", buf, ret);
-
+               if (nvars == 0)
+                       fprintf(stdout, "\t\t(No matched variables)\n");
+       }
+       free(vls);
+end:
        free(buf);
        return ret;
 }
 
 /* Show available variables on given probe point */
 int show_available_vars(struct perf_probe_event *pevs, int npevs,
-                       int max_vls, const char *module, bool externs)
+                       int max_vls, const char *module,
+                       struct strfilter *_filter, bool externs)
 {
        int i, fd, ret = 0;
 
@@ -485,7 +521,8 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
        setup_pager();
 
        for (i = 0; i < npevs && ret >= 0; i++)
-               ret = show_available_vars_at(fd, &pevs[i], max_vls, externs);
+               ret = show_available_vars_at(fd, &pevs[i], max_vls, _filter,
+                                            externs);
 
        close(fd);
        return ret;
@@ -531,7 +568,9 @@ int show_line_range(struct line_range *lr __unused, const char *module __unused)
 
 int show_available_vars(struct perf_probe_event *pevs __unused,
                        int npevs __unused, int max_vls __unused,
-                       const char *module __unused, bool externs __unused)
+                       const char *module __unused,
+                       struct strfilter *filter __unused,
+                       bool externs __unused)
 {
        pr_warning("Debuginfo-analysis is not supported.\n");
        return -ENOSYS;
@@ -556,11 +595,11 @@ static int parse_line_num(char **ptr, int *val, const char *what)
  * The line range syntax is described by:
  *
  *         SRC[:SLN[+NUM|-ELN]]
- *         FNC[:SLN[+NUM|-ELN]]
+ *         FNC[@SRC][:SLN[+NUM|-ELN]]
  */
 int parse_line_range_desc(const char *arg, struct line_range *lr)
 {
-       char *range, *name = strdup(arg);
+       char *range, *file, *name = strdup(arg);
        int err;
 
        if (!name)
@@ -610,7 +649,16 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
                }
        }
 
-       if (strchr(name, '.'))
+       file = strchr(name, '@');
+       if (file) {
+               *file = '\0';
+               lr->file = strdup(++file);
+               if (lr->file == NULL) {
+                       err = -ENOMEM;
+                       goto err;
+               }
+               lr->function = name;
+       } else if (strchr(name, '.'))
                lr->file = name;
        else
                lr->function = name;
@@ -1784,9 +1832,12 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
        }
 
        /* Loop 2: add all events */
-       for (i = 0; i < npevs && ret >= 0; i++)
+       for (i = 0; i < npevs; i++) {
                ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs,
                                                pkgs[i].ntevs, force_add);
+               if (ret < 0)
+                       break;
+       }
 end:
        /* Loop 3: cleanup and free trace events  */
        for (i = 0; i < npevs; i++) {
@@ -1912,4 +1963,46 @@ int del_perf_probe_events(struct strlist *dellist)
 
        return ret;
 }
+/* TODO: don't use a global variable for filter ... */
+static struct strfilter *available_func_filter;
+
+/*
+ * If a symbol corresponds to a function with global binding and
+ * matches filter return 0. For all others return 1.
+ */
+static int filter_available_functions(struct map *map __unused,
+                                     struct symbol *sym)
+{
+       if (sym->binding == STB_GLOBAL &&
+           strfilter__compare(available_func_filter, sym->name))
+               return 0;
+       return 1;
+}
+
+int show_available_funcs(const char *module, struct strfilter *_filter)
+{
+       struct map *map;
+       int ret;
+
+       setup_pager();
+
+       ret = init_vmlinux();
+       if (ret < 0)
+               return ret;
 
+       map = kernel_get_module_map(module);
+       if (!map) {
+               pr_err("Failed to find %s map.\n", (module) ? : "kernel");
+               return -EINVAL;
+       }
+       available_func_filter = _filter;
+       if (map__load(map, filter_available_functions)) {
+               pr_err("Failed to load map.\n");
+               return -EINVAL;
+       }
+       if (!dso__sorted_by_name(map->dso, map->type))
+               dso__sort_by_name(map->dso, map->type);
+
+       dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
+       return 0;
+}
index 5accbedfea372b6761727c0fc1c824e237f12c72..3434fc9d79d5a11c1b67a594f06feda2d8100ea0 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <stdbool.h>
 #include "strlist.h"
+#include "strfilter.h"
 
 extern bool probe_event_dry_run;
 
@@ -126,7 +127,8 @@ extern int show_perf_probe_events(void);
 extern int show_line_range(struct line_range *lr, const char *module);
 extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
                               int max_probe_points, const char *module,
-                              bool externs);
+                              struct strfilter *filter, bool externs);
+extern int show_available_funcs(const char *module, struct strfilter *filter);
 
 
 /* Maximum index number of event-name postfix */
index ab83b6ac5d657c80af1e67790c1ace15d7592578..194f9e2a328575ed8fc9bf406bd3f82a667f6c0a 100644 (file)
@@ -33,6 +33,7 @@
 #include <ctype.h>
 #include <dwarf-regs.h>
 
+#include <linux/bitops.h>
 #include "event.h"
 #include "debug.h"
 #include "util.h"
@@ -280,6 +281,19 @@ static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
        return name ? (strcmp(tname, name) == 0) : false;
 }
 
+/* Get callsite line number of inline-function instance */
+static int die_get_call_lineno(Dwarf_Die *in_die)
+{
+       Dwarf_Attribute attr;
+       Dwarf_Word ret;
+
+       if (!dwarf_attr(in_die, DW_AT_call_line, &attr))
+               return -ENOENT;
+
+       dwarf_formudata(&attr, &ret);
+       return (int)ret;
+}
+
 /* Get type die */
 static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
 {
@@ -320,13 +334,23 @@ static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
        return vr_die;
 }
 
-static bool die_is_signed_type(Dwarf_Die *tp_die)
+static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
+                             Dwarf_Word *result)
 {
        Dwarf_Attribute attr;
+
+       if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+           dwarf_formudata(&attr, result) != 0)
+               return -ENOENT;
+
+       return 0;
+}
+
+static bool die_is_signed_type(Dwarf_Die *tp_die)
+{
        Dwarf_Word ret;
 
-       if (dwarf_attr(tp_die, DW_AT_encoding, &attr) == NULL ||
-           dwarf_formudata(&attr, &ret) != 0)
+       if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret))
                return false;
 
        return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
@@ -335,11 +359,29 @@ static bool die_is_signed_type(Dwarf_Die *tp_die)
 
 static int die_get_byte_size(Dwarf_Die *tp_die)
 {
-       Dwarf_Attribute attr;
        Dwarf_Word ret;
 
-       if (dwarf_attr(tp_die, DW_AT_byte_size, &attr) == NULL ||
-           dwarf_formudata(&attr, &ret) != 0)
+       if (die_get_attr_udata(tp_die, DW_AT_byte_size, &ret))
+               return 0;
+
+       return (int)ret;
+}
+
+static int die_get_bit_size(Dwarf_Die *tp_die)
+{
+       Dwarf_Word ret;
+
+       if (die_get_attr_udata(tp_die, DW_AT_bit_size, &ret))
+               return 0;
+
+       return (int)ret;
+}
+
+static int die_get_bit_offset(Dwarf_Die *tp_die)
+{
+       Dwarf_Word ret;
+
+       if (die_get_attr_udata(tp_die, DW_AT_bit_offset, &ret))
                return 0;
 
        return (int)ret;
@@ -458,6 +500,151 @@ static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
        return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
 }
 
+/* Walker on lines (Note: line number will not be sorted) */
+typedef int (* line_walk_handler_t) (const char *fname, int lineno,
+                                    Dwarf_Addr addr, void *data);
+
+struct __line_walk_param {
+       const char *fname;
+       line_walk_handler_t handler;
+       void *data;
+       int retval;
+};
+
+static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
+{
+       struct __line_walk_param *lw = data;
+       Dwarf_Addr addr;
+       int lineno;
+
+       if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
+               lineno = die_get_call_lineno(in_die);
+               if (lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
+                       lw->retval = lw->handler(lw->fname, lineno, addr,
+                                                lw->data);
+                       if (lw->retval != 0)
+                               return DIE_FIND_CB_FOUND;
+               }
+       }
+       return DIE_FIND_CB_SIBLING;
+}
+
+/* Walk on lines of blocks included in given DIE */
+static int __die_walk_funclines(Dwarf_Die *sp_die,
+                               line_walk_handler_t handler, void *data)
+{
+       struct __line_walk_param lw = {
+               .handler = handler,
+               .data = data,
+               .retval = 0,
+       };
+       Dwarf_Die die_mem;
+       Dwarf_Addr addr;
+       int lineno;
+
+       /* Handle function declaration line */
+       lw.fname = dwarf_decl_file(sp_die);
+       if (lw.fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
+           dwarf_entrypc(sp_die, &addr) == 0) {
+               lw.retval = handler(lw.fname, lineno, addr, data);
+               if (lw.retval != 0)
+                       goto done;
+       }
+       die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem);
+done:
+       return lw.retval;
+}
+
+static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
+{
+       struct __line_walk_param *lw = data;
+
+       lw->retval = __die_walk_funclines(sp_die, lw->handler, lw->data);
+       if (lw->retval != 0)
+               return DWARF_CB_ABORT;
+
+       return DWARF_CB_OK;
+}
+
+/*
+ * Walk on lines inside given PDIE. If the PDIE is subprogram, walk only on
+ * the lines inside the subprogram, otherwise PDIE must be a CU DIE.
+ */
+static int die_walk_lines(Dwarf_Die *pdie, line_walk_handler_t handler,
+                         void *data)
+{
+       Dwarf_Lines *lines;
+       Dwarf_Line *line;
+       Dwarf_Addr addr;
+       const char *fname;
+       int lineno, ret = 0;
+       Dwarf_Die die_mem, *cu_die;
+       size_t nlines, i;
+
+       /* Get the CU die */
+       if (dwarf_tag(pdie) == DW_TAG_subprogram)
+               cu_die = dwarf_diecu(pdie, &die_mem, NULL, NULL);
+       else
+               cu_die = pdie;
+       if (!cu_die) {
+               pr_debug2("Failed to get CU from subprogram\n");
+               return -EINVAL;
+       }
+
+       /* Get lines list in the CU */
+       if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) {
+               pr_debug2("Failed to get source lines on this CU.\n");
+               return -ENOENT;
+       }
+       pr_debug2("Get %zd lines from this CU\n", nlines);
+
+       /* Walk on the lines on lines list */
+       for (i = 0; i < nlines; i++) {
+               line = dwarf_onesrcline(lines, i);
+               if (line == NULL ||
+                   dwarf_lineno(line, &lineno) != 0 ||
+                   dwarf_lineaddr(line, &addr) != 0) {
+                       pr_debug2("Failed to get line info. "
+                                 "Possible error in debuginfo.\n");
+                       continue;
+               }
+               /* Filter lines based on address */
+               if (pdie != cu_die)
+                       /*
+                        * Address filtering
+                        * The line is included in given function, and
+                        * no inline block includes it.
+                        */
+                       if (!dwarf_haspc(pdie, addr) ||
+                           die_find_inlinefunc(pdie, addr, &die_mem))
+                               continue;
+               /* Get source line */
+               fname = dwarf_linesrc(line, NULL, NULL);
+
+               ret = handler(fname, lineno, addr, data);
+               if (ret != 0)
+                       return ret;
+       }
+
+       /*
+        * Dwarf lines doesn't include function declarations and inlined
+        * subroutines. We have to check functions list or given function.
+        */
+       if (pdie != cu_die)
+               ret = __die_walk_funclines(pdie, handler, data);
+       else {
+               struct __line_walk_param param = {
+                       .handler = handler,
+                       .data = data,
+                       .retval = 0,
+               };
+               dwarf_getfuncs(cu_die, __die_walk_culines_cb, &param, 0);
+               ret = param.retval;
+       }
+
+       return ret;
+}
+
 struct __find_variable_param {
        const char *name;
        Dwarf_Addr addr;
@@ -669,6 +856,8 @@ static_var:
        return 0;
 }
 
+#define BYTES_TO_BITS(nb)      ((nb) * BITS_PER_LONG / sizeof(long))
+
 static int convert_variable_type(Dwarf_Die *vr_die,
                                 struct probe_trace_arg *tvar,
                                 const char *cast)
@@ -685,6 +874,14 @@ static int convert_variable_type(Dwarf_Die *vr_die,
                return (tvar->type == NULL) ? -ENOMEM : 0;
        }
 
+       if (die_get_bit_size(vr_die) != 0) {
+               /* This is a bitfield */
+               ret = snprintf(buf, 16, "b%d@%d/%zd", die_get_bit_size(vr_die),
+                               die_get_bit_offset(vr_die),
+                               BYTES_TO_BITS(die_get_byte_size(vr_die)));
+               goto formatted;
+       }
+
        if (die_get_real_type(vr_die, &type) == NULL) {
                pr_warning("Failed to get a type information of %s.\n",
                           dwarf_diename(vr_die));
@@ -729,29 +926,31 @@ static int convert_variable_type(Dwarf_Die *vr_die,
                return (tvar->type == NULL) ? -ENOMEM : 0;
        }
 
-       ret = die_get_byte_size(&type) * 8;
-       if (ret) {
-               /* Check the bitwidth */
-               if (ret > MAX_BASIC_TYPE_BITS) {
-                       pr_info("%s exceeds max-bitwidth."
-                               " Cut down to %d bits.\n",
-                               dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
-                       ret = MAX_BASIC_TYPE_BITS;
-               }
+       ret = BYTES_TO_BITS(die_get_byte_size(&type));
+       if (!ret)
+               /* No size ... try to use default type */
+               return 0;
 
-               ret = snprintf(buf, 16, "%c%d",
-                              die_is_signed_type(&type) ? 's' : 'u', ret);
-               if (ret < 0 || ret >= 16) {
-                       if (ret >= 16)
-                               ret = -E2BIG;
-                       pr_warning("Failed to convert variable type: %s\n",
-                                  strerror(-ret));
-                       return ret;
-               }
-               tvar->type = strdup(buf);
-               if (tvar->type == NULL)
-                       return -ENOMEM;
+       /* Check the bitwidth */
+       if (ret > MAX_BASIC_TYPE_BITS) {
+               pr_info("%s exceeds max-bitwidth. Cut down to %d bits.\n",
+                       dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
+               ret = MAX_BASIC_TYPE_BITS;
+       }
+       ret = snprintf(buf, 16, "%c%d",
+                      die_is_signed_type(&type) ? 's' : 'u', ret);
+
+formatted:
+       if (ret < 0 || ret >= 16) {
+               if (ret >= 16)
+                       ret = -E2BIG;
+               pr_warning("Failed to convert variable type: %s\n",
+                          strerror(-ret));
+               return ret;
        }
+       tvar->type = strdup(buf);
+       if (tvar->type == NULL)
+               return -ENOMEM;
        return 0;
 }
 
@@ -1050,157 +1249,102 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
        return ret;
 }
 
-/* Find probe point from its line number */
-static int find_probe_point_by_line(struct probe_finder *pf)
+static int probe_point_line_walker(const char *fname, int lineno,
+                                  Dwarf_Addr addr, void *data)
 {
-       Dwarf_Lines *lines;
-       Dwarf_Line *line;
-       size_t nlines, i;
-       Dwarf_Addr addr;
-       int lineno;
-       int ret = 0;
-
-       if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
-               pr_warning("No source lines found.\n");
-               return -ENOENT;
-       }
+       struct probe_finder *pf = data;
+       int ret;
 
-       for (i = 0; i < nlines && ret == 0; i++) {
-               line = dwarf_onesrcline(lines, i);
-               if (dwarf_lineno(line, &lineno) != 0 ||
-                   lineno != pf->lno)
-                       continue;
+       if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0)
+               return 0;
 
-               /* TODO: Get fileno from line, but how? */
-               if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
-                       continue;
+       pf->addr = addr;
+       ret = call_probe_finder(NULL, pf);
 
-               if (dwarf_lineaddr(line, &addr) != 0) {
-                       pr_warning("Failed to get the address of the line.\n");
-                       return -ENOENT;
-               }
-               pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n",
-                        (int)i, lineno, (uintmax_t)addr);
-               pf->addr = addr;
+       /* Continue if no error, because the line will be in inline function */
+       return ret < 0 ? ret : 0;
+}
 
-               ret = call_probe_finder(NULL, pf);
-               /* Continuing, because target line might be inlined. */
-       }
-       return ret;
+/* Find probe point from its line number */
+static int find_probe_point_by_line(struct probe_finder *pf)
+{
+       return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf);
 }
 
 /* Find lines which match lazy pattern */
 static int find_lazy_match_lines(struct list_head *head,
                                 const char *fname, const char *pat)
 {
-       char *fbuf, *p1, *p2;
-       int fd, line, nlines = -1;
-       struct stat st;
-
-       fd = open(fname, O_RDONLY);
-       if (fd < 0) {
-               pr_warning("Failed to open %s: %s\n", fname, strerror(-fd));
+       FILE *fp;
+       char *line = NULL;
+       size_t line_len;
+       ssize_t len;
+       int count = 0, linenum = 1;
+
+       fp = fopen(fname, "r");
+       if (!fp) {
+               pr_warning("Failed to open %s: %s\n", fname, strerror(errno));
                return -errno;
        }
 
-       if (fstat(fd, &st) < 0) {
-               pr_warning("Failed to get the size of %s: %s\n",
-                          fname, strerror(errno));
-               nlines = -errno;
-               goto out_close;
-       }
-
-       nlines = -ENOMEM;
-       fbuf = malloc(st.st_size + 2);
-       if (fbuf == NULL)
-               goto out_close;
-       if (read(fd, fbuf, st.st_size) < 0) {
-               pr_warning("Failed to read %s: %s\n", fname, strerror(errno));
-               nlines = -errno;
-               goto out_free_fbuf;
-       }
-       fbuf[st.st_size] = '\n';        /* Dummy line */
-       fbuf[st.st_size + 1] = '\0';
-       p1 = fbuf;
-       line = 1;
-       nlines = 0;
-       while ((p2 = strchr(p1, '\n')) != NULL) {
-               *p2 = '\0';
-               if (strlazymatch(p1, pat)) {
-                       line_list__add_line(head, line);
-                       nlines++;
+       while ((len = getline(&line, &line_len, fp)) > 0) {
+
+               if (line[len - 1] == '\n')
+                       line[len - 1] = '\0';
+
+               if (strlazymatch(line, pat)) {
+                       line_list__add_line(head, linenum);
+                       count++;
                }
-               line++;
-               p1 = p2 + 1;
+               linenum++;
        }
-out_free_fbuf:
-       free(fbuf);
-out_close:
-       close(fd);
-       return nlines;
+
+       if (ferror(fp))
+               count = -errno;
+       free(line);
+       fclose(fp);
+
+       if (count == 0)
+               pr_debug("No matched lines found in %s.\n", fname);
+       return count;
+}
+
+static int probe_point_lazy_walker(const char *fname, int lineno,
+                                  Dwarf_Addr addr, void *data)
+{
+       struct probe_finder *pf = data;
+       int ret;
+
+       if (!line_list__has_line(&pf->lcache, lineno) ||
+           strtailcmp(fname, pf->fname) != 0)
+               return 0;
+
+       pr_debug("Probe line found: line:%d addr:0x%llx\n",
+                lineno, (unsigned long long)addr);
+       pf->addr = addr;
+       ret = call_probe_finder(NULL, pf);
+
+       /*
+        * Continue if no error, because the lazy pattern will match
+        * to other lines
+        */
+       return ret < 0 ? ret : 0;
 }
 
 /* Find probe points from lazy pattern  */
 static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
 {
-       Dwarf_Lines *lines;
-       Dwarf_Line *line;
-       size_t nlines, i;
-       Dwarf_Addr addr;
-       Dwarf_Die die_mem;
-       int lineno;
        int ret = 0;
 
        if (list_empty(&pf->lcache)) {
                /* Matching lazy line pattern */
                ret = find_lazy_match_lines(&pf->lcache, pf->fname,
                                            pf->pev->point.lazy_line);
-               if (ret == 0) {
-                       pr_debug("No matched lines found in %s.\n", pf->fname);
-                       return 0;
-               } else if (ret < 0)
+               if (ret <= 0)
                        return ret;
        }
 
-       if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
-               pr_warning("No source lines found.\n");
-               return -ENOENT;
-       }
-
-       for (i = 0; i < nlines && ret >= 0; i++) {
-               line = dwarf_onesrcline(lines, i);
-
-               if (dwarf_lineno(line, &lineno) != 0 ||
-                   !line_list__has_line(&pf->lcache, lineno))
-                       continue;
-
-               /* TODO: Get fileno from line, but how? */
-               if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
-                       continue;
-
-               if (dwarf_lineaddr(line, &addr) != 0) {
-                       pr_debug("Failed to get the address of line %d.\n",
-                                lineno);
-                       continue;
-               }
-               if (sp_die) {
-                       /* Address filtering 1: does sp_die include addr? */
-                       if (!dwarf_haspc(sp_die, addr))
-                               continue;
-                       /* Address filtering 2: No child include addr? */
-                       if (die_find_inlinefunc(sp_die, addr, &die_mem))
-                               continue;
-               }
-
-               pr_debug("Probe line found: line[%d]:%d addr:0x%llx\n",
-                        (int)i, lineno, (unsigned long long)addr);
-               pf->addr = addr;
-
-               ret = call_probe_finder(sp_die, pf);
-               /* Continuing, because target line might be inlined. */
-       }
-       /* TODO: deallocate lines, but how? */
-       return ret;
+       return die_walk_lines(sp_die, probe_point_lazy_walker, pf);
 }
 
 /* Callback parameter with return value */
@@ -1318,8 +1462,7 @@ static int find_probes(int fd, struct probe_finder *pf)
        off = 0;
        line_list__init(&pf->lcache);
        /* Loop on CUs (Compilation Unit) */
-       while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) &&
-              ret >= 0) {
+       while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
                /* Get the DIE(Debugging Information Entry) of this CU */
                diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die);
                if (!diep)
@@ -1340,6 +1483,8 @@ static int find_probes(int fd, struct probe_finder *pf)
                                pf->lno = pp->line;
                                ret = find_probe_point_by_line(pf);
                        }
+                       if (ret < 0)
+                               break;
                }
                off = noff;
        }
@@ -1644,91 +1789,28 @@ static int line_range_add_line(const char *src, unsigned int lineno,
        return line_list__add_line(&lr->line_list, lineno);
 }
 
-/* Search function declaration lines */
-static int line_range_funcdecl_cb(Dwarf_Die *sp_die, void *data)
+static int line_range_walk_cb(const char *fname, int lineno,
+                             Dwarf_Addr addr __used,
+                             void *data)
 {
-       struct dwarf_callback_param *param = data;
-       struct line_finder *lf = param->data;
-       const char *src;
-       int lineno;
+       struct line_finder *lf = data;
 
-       src = dwarf_decl_file(sp_die);
-       if (src && strtailcmp(src, lf->fname) != 0)
-               return DWARF_CB_OK;
-
-       if (dwarf_decl_line(sp_die, &lineno) != 0 ||
+       if ((strtailcmp(fname, lf->fname) != 0) ||
            (lf->lno_s > lineno || lf->lno_e < lineno))
-               return DWARF_CB_OK;
+               return 0;
 
-       param->retval = line_range_add_line(src, lineno, lf->lr);
-       if (param->retval < 0)
-               return DWARF_CB_ABORT;
-       return DWARF_CB_OK;
-}
+       if (line_range_add_line(fname, lineno, lf->lr) < 0)
+               return -EINVAL;
 
-static int find_line_range_func_decl_lines(struct line_finder *lf)
-{
-       struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
-       dwarf_getfuncs(&lf->cu_die, line_range_funcdecl_cb, &param, 0);
-       return param.retval;
+       return 0;
 }
 
 /* Find line range from its line number */
 static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
 {
-       Dwarf_Lines *lines;
-       Dwarf_Line *line;
-       size_t nlines, i;
-       Dwarf_Addr addr;
-       int lineno, ret = 0;
-       const char *src;
-       Dwarf_Die die_mem;
-
-       line_list__init(&lf->lr->line_list);
-       if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) {
-               pr_warning("No source lines found.\n");
-               return -ENOENT;
-       }
-
-       /* Search probable lines on lines list */
-       for (i = 0; i < nlines; i++) {
-               line = dwarf_onesrcline(lines, i);
-               if (dwarf_lineno(line, &lineno) != 0 ||
-                   (lf->lno_s > lineno || lf->lno_e < lineno))
-                       continue;
-
-               if (sp_die) {
-                       /* Address filtering 1: does sp_die include addr? */
-                       if (dwarf_lineaddr(line, &addr) != 0 ||
-                           !dwarf_haspc(sp_die, addr))
-                               continue;
-
-                       /* Address filtering 2: No child include addr? */
-                       if (die_find_inlinefunc(sp_die, addr, &die_mem))
-                               continue;
-               }
-
-               /* TODO: Get fileno from line, but how? */
-               src = dwarf_linesrc(line, NULL, NULL);
-               if (strtailcmp(src, lf->fname) != 0)
-                       continue;
-
-               ret = line_range_add_line(src, lineno, lf->lr);
-               if (ret < 0)
-                       return ret;
-       }
+       int ret;
 
-       /*
-        * Dwarf lines doesn't include function declarations. We have to
-        * check functions list or given function.
-        */
-       if (sp_die) {
-               src = dwarf_decl_file(sp_die);
-               if (src && dwarf_decl_line(sp_die, &lineno) == 0 &&
-                   (lf->lno_s <= lineno && lf->lno_e >= lineno))
-                       ret = line_range_add_line(src, lineno, lf->lr);
-       } else
-               ret = find_line_range_func_decl_lines(lf);
+       ret = die_walk_lines(sp_die ?: &lf->cu_die, line_range_walk_cb, lf);
 
        /* Update status */
        if (ret >= 0)
@@ -1758,9 +1840,6 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
        struct line_finder *lf = param->data;
        struct line_range *lr = lf->lr;
 
-       pr_debug("find (%llx) %s\n",
-                (unsigned long long)dwarf_dieoffset(sp_die),
-                dwarf_diename(sp_die));
        if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
            die_compare_name(sp_die, lr->function)) {
                lf->fname = dwarf_decl_file(sp_die);
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
new file mode 100644 (file)
index 0000000..a9f2d7e
--- /dev/null
@@ -0,0 +1,896 @@
+#include <Python.h>
+#include <structmember.h>
+#include <inttypes.h>
+#include <poll.h>
+#include "evlist.h"
+#include "evsel.h"
+#include "event.h"
+#include "cpumap.h"
+#include "thread_map.h"
+
+/* Define PyVarObject_HEAD_INIT for python 2.5 */
+#ifndef PyVarObject_HEAD_INIT
+# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
+#endif
+
+struct throttle_event {
+       struct perf_event_header header;
+       u64                      time;
+       u64                      id;
+       u64                      stream_id;
+};
+
+PyMODINIT_FUNC initperf(void);
+
+#define member_def(type, member, ptype, help) \
+       { #member, ptype, \
+         offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
+         0, help }
+
+#define sample_member_def(name, member, ptype, help) \
+       { #name, ptype, \
+         offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
+         0, help }
+
+struct pyrf_event {
+       PyObject_HEAD
+       struct perf_sample sample;
+       union perf_event   event;
+};
+
+#define sample_members \
+       sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"),                     \
+       sample_member_def(sample_pid, pid, T_INT, "event pid"),                  \
+       sample_member_def(sample_tid, tid, T_INT, "event tid"),                  \
+       sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"),            \
+       sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"),                 \
+       sample_member_def(sample_id, id, T_ULONGLONG, "event id"),                       \
+       sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
+       sample_member_def(sample_period, period, T_ULONGLONG, "event period"),           \
+       sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
+
+static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
+
+static PyMemberDef pyrf_mmap_event__members[] = {
+       sample_members
+       member_def(perf_event_header, type, T_UINT, "event type"),
+       member_def(mmap_event, pid, T_UINT, "event pid"),
+       member_def(mmap_event, tid, T_UINT, "event tid"),
+       member_def(mmap_event, start, T_ULONGLONG, "start of the map"),
+       member_def(mmap_event, len, T_ULONGLONG, "map length"),
+       member_def(mmap_event, pgoff, T_ULONGLONG, "page offset"),
+       member_def(mmap_event, filename, T_STRING_INPLACE, "backing store"),
+       { .name = NULL, },
+};
+
+static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
+{
+       PyObject *ret;
+       char *s;
+
+       if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRIx64 ", "
+                        "length: %#" PRIx64 ", offset: %#" PRIx64 ", "
+                        "filename: %s }",
+                    pevent->event.mmap.pid, pevent->event.mmap.tid,
+                    pevent->event.mmap.start, pevent->event.mmap.len,
+                    pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
+               ret = PyErr_NoMemory();
+       } else {
+               ret = PyString_FromString(s);
+               free(s);
+       }
+       return ret;
+}
+
+static PyTypeObject pyrf_mmap_event__type = {
+       PyVarObject_HEAD_INIT(NULL, 0)
+       .tp_name        = "perf.mmap_event",
+       .tp_basicsize   = sizeof(struct pyrf_event),
+       .tp_flags       = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+       .tp_doc         = pyrf_mmap_event__doc,
+       .tp_members     = pyrf_mmap_event__members,
+       .tp_repr        = (reprfunc)pyrf_mmap_event__repr,
+};
+
+static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
+
+static PyMemberDef pyrf_task_event__members[] = {
+       sample_members
+       member_def(perf_event_header, type, T_UINT, "event type"),
+       member_def(fork_event, pid, T_UINT, "event pid"),
+       member_def(fork_event, ppid, T_UINT, "event ppid"),
+       member_def(fork_event, tid, T_UINT, "event tid"),
+       member_def(fork_event, ptid, T_UINT, "event ptid"),
+       member_def(fork_event, time, T_ULONGLONG, "timestamp"),
+       { .name = NULL, },
+};
+
+static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
+{
+       return PyString_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
+                                  "ptid: %u, time: %" PRIu64 "}",
+                                  pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
+                                  pevent->event.fork.pid,
+                                  pevent->event.fork.ppid,
+                                  pevent->event.fork.tid,
+                                  pevent->event.fork.ptid,
+                                  pevent->event.fork.time);
+}
+
+static PyTypeObject pyrf_task_event__type = {
+       PyVarObject_HEAD_INIT(NULL, 0)
+       .tp_name        = "perf.task_event",
+       .tp_basicsize   = sizeof(struct pyrf_event),
+       .tp_flags       = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+       .tp_doc         = pyrf_task_event__doc,
+       .tp_members     = pyrf_task_event__members,
+       .tp_repr        = (reprfunc)pyrf_task_event__repr,
+};
+
+static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
+
+static PyMemberDef pyrf_comm_event__members[] = {
+       sample_members
+       member_def(perf_event_header, type, T_UINT, "event type"),
+       member_def(comm_event, pid, T_UINT, "event pid"),
+       member_def(comm_event, tid, T_UINT, "event tid"),
+       member_def(comm_event, comm, T_STRING_INPLACE, "process name"),
+       { .name = NULL, },
+};
+
+static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
+{
+       return PyString_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
+                                  pevent->event.comm.pid,
+                                  pevent->event.comm.tid,
+                                  pevent->event.comm.comm);
+}
+
+static PyTypeObject pyrf_comm_event__type = {
+       PyVarObject_HEAD_INIT(NULL, 0)
+       .tp_name        = "perf.comm_event",
+       .tp_basicsize   = sizeof(struct pyrf_event),
+       .tp_flags       = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+       .tp_doc         = pyrf_comm_event__doc,
+       .tp_members     = pyrf_comm_event__members,
+       .tp_repr        = (reprfunc)pyrf_comm_event__repr,
+};
+
+static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
+
+static PyMemberDef pyrf_throttle_event__members[] = {
+       sample_members
+       member_def(perf_event_header, type, T_UINT, "event type"),
+       member_def(throttle_event, time, T_ULONGLONG, "timestamp"),
+       member_def(throttle_event, id, T_ULONGLONG, "event id"),
+       member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"),
+       { .name = NULL, },
+};
+
+static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
+{
+       struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1);
+
+       return PyString_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64
+                                  ", stream_id: %" PRIu64 " }",
+                                  pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
+                                  te->time, te->id, te->stream_id);
+}
+
+static PyTypeObject pyrf_throttle_event__type = {
+       PyVarObject_HEAD_INIT(NULL, 0)
+       .tp_name        = "perf.throttle_event",
+       .tp_basicsize   = sizeof(struct pyrf_event),
+       .tp_flags       = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+       .tp_doc         = pyrf_throttle_event__doc,
+       .tp_members     = pyrf_throttle_event__members,
+       .tp_repr        = (reprfunc)pyrf_throttle_event__repr,
+};
+
+static int pyrf_event__setup_types(void)
+{
+       int err;
+       pyrf_mmap_event__type.tp_new =
+       pyrf_task_event__type.tp_new =
+       pyrf_comm_event__type.tp_new =
+       pyrf_throttle_event__type.tp_new = PyType_GenericNew;
+       err = PyType_Ready(&pyrf_mmap_event__type);
+       if (err < 0)
+               goto out;
+       err = PyType_Ready(&pyrf_task_event__type);
+       if (err < 0)
+               goto out;
+       err = PyType_Ready(&pyrf_comm_event__type);
+       if (err < 0)
+               goto out;
+       err = PyType_Ready(&pyrf_throttle_event__type);
+       if (err < 0)
+               goto out;
+out:
+       return err;
+}
+
+static PyTypeObject *pyrf_event__type[] = {
+       [PERF_RECORD_MMAP]       = &pyrf_mmap_event__type,
+       [PERF_RECORD_LOST]       = &pyrf_mmap_event__type,
+       [PERF_RECORD_COMM]       = &pyrf_comm_event__type,
+       [PERF_RECORD_EXIT]       = &pyrf_task_event__type,
+       [PERF_RECORD_THROTTLE]   = &pyrf_throttle_event__type,
+       [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
+       [PERF_RECORD_FORK]       = &pyrf_task_event__type,
+       [PERF_RECORD_READ]       = &pyrf_mmap_event__type,
+       [PERF_RECORD_SAMPLE]     = &pyrf_mmap_event__type,
+};
+
+static PyObject *pyrf_event__new(union perf_event *event)
+{
+       struct pyrf_event *pevent;
+       PyTypeObject *ptype;
+
+       if (event->header.type < PERF_RECORD_MMAP ||
+           event->header.type > PERF_RECORD_SAMPLE)
+               return NULL;
+
+       ptype = pyrf_event__type[event->header.type];
+       pevent = PyObject_New(struct pyrf_event, ptype);
+       if (pevent != NULL)
+               memcpy(&pevent->event, event, event->header.size);
+       return (PyObject *)pevent;
+}
+
+struct pyrf_cpu_map {
+       PyObject_HEAD
+
+       struct cpu_map *cpus;
+};
+
+static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
+                             PyObject *args, PyObject *kwargs)
+{
+       static char *kwlist[] = { "cpustr", NULL, NULL, };
+       char *cpustr = NULL;
+
+       if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
+                                        kwlist, &cpustr))
+               return -1;
+
+       pcpus->cpus = cpu_map__new(cpustr);
+       if (pcpus->cpus == NULL)
+               return -1;
+       return 0;
+}
+
+static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
+{
+       cpu_map__delete(pcpus->cpus);
+       pcpus->ob_type->tp_free((PyObject*)pcpus);
+}
+
+static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
+{
+       struct pyrf_cpu_map *pcpus = (void *)obj;
+
+       return pcpus->cpus->nr;
+}
+
+static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
+{
+       struct pyrf_cpu_map *pcpus = (void *)obj;
+
+       if (i >= pcpus->cpus->nr)
+               return NULL;
+
+       return Py_BuildValue("i", pcpus->cpus->map[i]);
+}
+
+static PySequenceMethods pyrf_cpu_map__sequence_methods = {
+       .sq_length = pyrf_cpu_map__length,
+       .sq_item   = pyrf_cpu_map__item,
+};
+
+static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
+
+static PyTypeObject pyrf_cpu_map__type = {
+       PyVarObject_HEAD_INIT(NULL, 0)
+       .tp_name        = "perf.cpu_map",
+       .tp_basicsize   = sizeof(struct pyrf_cpu_map),
+       .tp_dealloc     = (destructor)pyrf_cpu_map__delete,
+       .tp_flags       = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+       .tp_doc         = pyrf_cpu_map__doc,
+       .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
+       .tp_init        = (initproc)pyrf_cpu_map__init,
+};
+
+static int pyrf_cpu_map__setup_types(void)
+{
+       pyrf_cpu_map__type.tp_new = PyType_GenericNew;
+       return PyType_Ready(&pyrf_cpu_map__type);
+}
+
+struct pyrf_thread_map {
+       PyObject_HEAD
+
+       struct thread_map *threads;
+};
+
+static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
+                                PyObject *args, PyObject *kwargs)
+{
+       static char *kwlist[] = { "pid", "tid", NULL, NULL, };
+       int pid = -1, tid = -1;
+
+       if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii",
+                                        kwlist, &pid, &tid))
+               return -1;
+
+       pthreads->threads = thread_map__new(pid, tid);
+       if (pthreads->threads == NULL)
+               return -1;
+       return 0;
+}
+
+static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
+{
+       thread_map__delete(pthreads->threads);
+       pthreads->ob_type->tp_free((PyObject*)pthreads);
+}
+
+static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
+{
+       struct pyrf_thread_map *pthreads = (void *)obj;
+
+       return pthreads->threads->nr;
+}
+
+static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
+{
+       struct pyrf_thread_map *pthreads = (void *)obj;
+
+       if (i >= pthreads->threads->nr)
+               return NULL;
+
+       return Py_BuildValue("i", pthreads->threads->map[i]);
+}
+
+static PySequenceMethods pyrf_thread_map__sequence_methods = {
+       .sq_length = pyrf_thread_map__length,
+       .sq_item   = pyrf_thread_map__item,
+};
+
+static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
+
+static PyTypeObject pyrf_thread_map__type = {
+       PyVarObject_HEAD_INIT(NULL, 0)
+       .tp_name        = "perf.thread_map",
+       .tp_basicsize   = sizeof(struct pyrf_thread_map),
+       .tp_dealloc     = (destructor)pyrf_thread_map__delete,
+       .tp_flags       = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+       .tp_doc         = pyrf_thread_map__doc,
+       .tp_as_sequence = &pyrf_thread_map__sequence_methods,
+       .tp_init        = (initproc)pyrf_thread_map__init,
+};
+
+static int pyrf_thread_map__setup_types(void)
+{
+       pyrf_thread_map__type.tp_new = PyType_GenericNew;
+       return PyType_Ready(&pyrf_thread_map__type);
+}
+
+struct pyrf_evsel {
+       PyObject_HEAD
+
+       struct perf_evsel evsel;
+};
+
+static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
+                           PyObject *args, PyObject *kwargs)
+{
+       struct perf_event_attr attr = {
+               .type = PERF_TYPE_HARDWARE,
+               .config = PERF_COUNT_HW_CPU_CYCLES,
+               .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
+       };
+       static char *kwlist[] = {
+               "type",
+               "config",
+               "sample_freq",
+               "sample_period",
+               "sample_type",
+               "read_format",
+               "disabled",
+               "inherit",
+               "pinned",
+               "exclusive",
+               "exclude_user",
+               "exclude_kernel",
+               "exclude_hv",
+               "exclude_idle",
+               "mmap",
+               "comm",
+               "freq",
+               "inherit_stat",
+               "enable_on_exec",
+               "task",
+               "watermark",
+               "precise_ip",
+               "mmap_data",
+               "sample_id_all",
+               "wakeup_events",
+               "bp_type",
+               "bp_addr",
+               "bp_len", NULL, NULL, };
+       u64 sample_period = 0;
+       u32 disabled = 0,
+           inherit = 0,
+           pinned = 0,
+           exclusive = 0,
+           exclude_user = 0,
+           exclude_kernel = 0,
+           exclude_hv = 0,
+           exclude_idle = 0,
+           mmap = 0,
+           comm = 0,
+           freq = 1,
+           inherit_stat = 0,
+           enable_on_exec = 0,
+           task = 0,
+           watermark = 0,
+           precise_ip = 0,
+           mmap_data = 0,
+           sample_id_all = 1;
+       int idx = 0;
+
+       if (!PyArg_ParseTupleAndKeywords(args, kwargs,
+                                        "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
+                                        &attr.type, &attr.config, &attr.sample_freq,
+                                        &sample_period, &attr.sample_type,
+                                        &attr.read_format, &disabled, &inherit,
+                                        &pinned, &exclusive, &exclude_user,
+                                        &exclude_kernel, &exclude_hv, &exclude_idle,
+                                        &mmap, &comm, &freq, &inherit_stat,
+                                        &enable_on_exec, &task, &watermark,
+                                        &precise_ip, &mmap_data, &sample_id_all,
+                                        &attr.wakeup_events, &attr.bp_type,
+                                        &attr.bp_addr, &attr.bp_len, &idx))
+               return -1;
+
+       /* union... */
+       if (sample_period != 0) {
+               if (attr.sample_freq != 0)
+                       return -1; /* FIXME: throw right exception */
+               attr.sample_period = sample_period;
+       }
+
+       /* Bitfields */
+       attr.disabled       = disabled;
+       attr.inherit        = inherit;
+       attr.pinned         = pinned;
+       attr.exclusive      = exclusive;
+       attr.exclude_user   = exclude_user;
+       attr.exclude_kernel = exclude_kernel;
+       attr.exclude_hv     = exclude_hv;
+       attr.exclude_idle   = exclude_idle;
+       attr.mmap           = mmap;
+       attr.comm           = comm;
+       attr.freq           = freq;
+       attr.inherit_stat   = inherit_stat;
+       attr.enable_on_exec = enable_on_exec;
+       attr.task           = task;
+       attr.watermark      = watermark;
+       attr.precise_ip     = precise_ip;
+       attr.mmap_data      = mmap_data;
+       attr.sample_id_all  = sample_id_all;
+
+       perf_evsel__init(&pevsel->evsel, &attr, idx);
+       return 0;
+}
+
+static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
+{
+       perf_evsel__exit(&pevsel->evsel);
+       pevsel->ob_type->tp_free((PyObject*)pevsel);
+}
+
+static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
+                                 PyObject *args, PyObject *kwargs)
+{
+       struct perf_evsel *evsel = &pevsel->evsel;
+       struct cpu_map *cpus = NULL;
+       struct thread_map *threads = NULL;
+       PyObject *pcpus = NULL, *pthreads = NULL;
+       int group = 0, overwrite = 0;
+       static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL};
+
+       if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
+                                        &pcpus, &pthreads, &group, &overwrite))
+               return NULL;
+
+       if (pthreads != NULL)
+               threads = ((struct pyrf_thread_map *)pthreads)->threads;
+
+       if (pcpus != NULL)
+               cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
+
+       if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) {
+               PyErr_SetFromErrno(PyExc_OSError);
+               return NULL;
+       }
+
+       Py_INCREF(Py_None);
+       return Py_None;
+}
+
+static PyMethodDef pyrf_evsel__methods[] = {
+       {
+               .ml_name  = "open",
+               .ml_meth  = (PyCFunction)pyrf_evsel__open,
+               .ml_flags = METH_VARARGS | METH_KEYWORDS,
+               .ml_doc   = PyDoc_STR("open the event selector file descriptor table.")
+       },
+       { .ml_name = NULL, }
+};
+
+static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
+
+static PyTypeObject pyrf_evsel__type = {
+       PyVarObject_HEAD_INIT(NULL, 0)
+       .tp_name        = "perf.evsel",
+       .tp_basicsize   = sizeof(struct pyrf_evsel),
+       .tp_dealloc     = (destructor)pyrf_evsel__delete,
+       .tp_flags       = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+       .tp_doc         = pyrf_evsel__doc,
+       .tp_methods     = pyrf_evsel__methods,
+       .tp_init        = (initproc)pyrf_evsel__init,
+};
+
+static int pyrf_evsel__setup_types(void)
+{
+       pyrf_evsel__type.tp_new = PyType_GenericNew;
+       return PyType_Ready(&pyrf_evsel__type);
+}
+
+struct pyrf_evlist {
+       PyObject_HEAD
+
+       struct perf_evlist evlist;
+};
+
+static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
+                            PyObject *args, PyObject *kwargs __used)
+{
+       PyObject *pcpus = NULL, *pthreads = NULL;
+       struct cpu_map *cpus;
+       struct thread_map *threads;
+
+       if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
+               return -1;
+
+       threads = ((struct pyrf_thread_map *)pthreads)->threads;
+       cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
+       perf_evlist__init(&pevlist->evlist, cpus, threads);
+       return 0;
+}
+
+static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
+{
+       perf_evlist__exit(&pevlist->evlist);
+       pevlist->ob_type->tp_free((PyObject*)pevlist);
+}
+
+static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
+                                  PyObject *args, PyObject *kwargs)
+{
+       struct perf_evlist *evlist = &pevlist->evlist;
+       static char *kwlist[] = {"pages", "overwrite",
+                                 NULL, NULL};
+       int pages = 128, overwrite = false;
+
+       if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
+                                        &pages, &overwrite))
+               return NULL;
+
+       if (perf_evlist__mmap(evlist, pages, overwrite) < 0) {
+               PyErr_SetFromErrno(PyExc_OSError);
+               return NULL;
+       }
+
+       Py_INCREF(Py_None);
+       return Py_None;
+}
+
+static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
+                                  PyObject *args, PyObject *kwargs)
+{
+       struct perf_evlist *evlist = &pevlist->evlist;
+       static char *kwlist[] = {"timeout", NULL, NULL};
+       int timeout = -1, n;
+
+       if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
+               return NULL;
+
+       n = poll(evlist->pollfd, evlist->nr_fds, timeout);
+       if (n < 0) {
+               PyErr_SetFromErrno(PyExc_OSError);
+               return NULL;
+       }
+
+       return Py_BuildValue("i", n);
+}
+
+static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
+                                        PyObject *args __used, PyObject *kwargs __used)
+{
+       struct perf_evlist *evlist = &pevlist->evlist;
+        PyObject *list = PyList_New(0);
+       int i;
+
+       for (i = 0; i < evlist->nr_fds; ++i) {
+               PyObject *file;
+               FILE *fp = fdopen(evlist->pollfd[i].fd, "r");
+
+               if (fp == NULL)
+                       goto free_list;
+
+               file = PyFile_FromFile(fp, "perf", "r", NULL);
+               if (file == NULL)
+                       goto free_list;
+
+               if (PyList_Append(list, file) != 0) {
+                       Py_DECREF(file);
+                       goto free_list;
+               }
+                       
+               Py_DECREF(file);
+       }
+
+       return list;
+free_list:
+       return PyErr_NoMemory();
+}
+
+
+static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
+                                 PyObject *args, PyObject *kwargs __used)
+{
+       struct perf_evlist *evlist = &pevlist->evlist;
+       PyObject *pevsel;
+       struct perf_evsel *evsel;
+
+       if (!PyArg_ParseTuple(args, "O", &pevsel))
+               return NULL;
+
+       Py_INCREF(pevsel);
+       evsel = &((struct pyrf_evsel *)pevsel)->evsel;
+       evsel->idx = evlist->nr_entries;
+       perf_evlist__add(evlist, evsel);
+
+       return Py_BuildValue("i", evlist->nr_entries);
+}
+
+static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
+                                         PyObject *args, PyObject *kwargs)
+{
+       struct perf_evlist *evlist = &pevlist->evlist;
+       union perf_event *event;
+       int sample_id_all = 1, cpu;
+       static char *kwlist[] = {"sample_id_all", NULL, NULL};
+
+       if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
+                                        &cpu, &sample_id_all))
+               return NULL;
+
+       event = perf_evlist__read_on_cpu(evlist, cpu);
+       if (event != NULL) {
+               struct perf_evsel *first;
+               PyObject *pyevent = pyrf_event__new(event);
+               struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
+
+               if (pyevent == NULL)
+                       return PyErr_NoMemory();
+
+               first = list_entry(evlist->entries.next, struct perf_evsel, node);
+               perf_event__parse_sample(event, first->attr.sample_type, sample_id_all,
+                                        &pevent->sample);
+               return pyevent;
+       }
+
+       Py_INCREF(Py_None);
+       return Py_None;
+}
+
+static PyMethodDef pyrf_evlist__methods[] = {
+       {
+               .ml_name  = "mmap",
+               .ml_meth  = (PyCFunction)pyrf_evlist__mmap,
+               .ml_flags = METH_VARARGS | METH_KEYWORDS,
+               .ml_doc   = PyDoc_STR("mmap the file descriptor table.")
+       },
+       {
+               .ml_name  = "poll",
+               .ml_meth  = (PyCFunction)pyrf_evlist__poll,
+               .ml_flags = METH_VARARGS | METH_KEYWORDS,
+               .ml_doc   = PyDoc_STR("poll the file descriptor table.")
+       },
+       {
+               .ml_name  = "get_pollfd",
+               .ml_meth  = (PyCFunction)pyrf_evlist__get_pollfd,
+               .ml_flags = METH_VARARGS | METH_KEYWORDS,
+               .ml_doc   = PyDoc_STR("get the poll file descriptor table.")
+       },
+       {
+               .ml_name  = "add",
+               .ml_meth  = (PyCFunction)pyrf_evlist__add,
+               .ml_flags = METH_VARARGS | METH_KEYWORDS,
+               .ml_doc   = PyDoc_STR("adds an event selector to the list.")
+       },
+       {
+               .ml_name  = "read_on_cpu",
+               .ml_meth  = (PyCFunction)pyrf_evlist__read_on_cpu,
+               .ml_flags = METH_VARARGS | METH_KEYWORDS,
+               .ml_doc   = PyDoc_STR("reads an event.")
+       },
+       { .ml_name = NULL, }
+};
+
+static Py_ssize_t pyrf_evlist__length(PyObject *obj)
+{
+       struct pyrf_evlist *pevlist = (void *)obj;
+
+       return pevlist->evlist.nr_entries;
+}
+
+static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
+{
+       struct pyrf_evlist *pevlist = (void *)obj;
+       struct perf_evsel *pos;
+
+       if (i >= pevlist->evlist.nr_entries)
+               return NULL;
+
+       list_for_each_entry(pos, &pevlist->evlist.entries, node)
+               if (i-- == 0)
+                       break;
+
+       return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
+}
+
+static PySequenceMethods pyrf_evlist__sequence_methods = {
+       .sq_length = pyrf_evlist__length,
+       .sq_item   = pyrf_evlist__item,
+};
+
+static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
+
+static PyTypeObject pyrf_evlist__type = {
+       PyVarObject_HEAD_INIT(NULL, 0)
+       .tp_name        = "perf.evlist",
+       .tp_basicsize   = sizeof(struct pyrf_evlist),
+       .tp_dealloc     = (destructor)pyrf_evlist__delete,
+       .tp_flags       = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+       .tp_as_sequence = &pyrf_evlist__sequence_methods,
+       .tp_doc         = pyrf_evlist__doc,
+       .tp_methods     = pyrf_evlist__methods,
+       .tp_init        = (initproc)pyrf_evlist__init,
+};
+
+static int pyrf_evlist__setup_types(void)
+{
+       pyrf_evlist__type.tp_new = PyType_GenericNew;
+       return PyType_Ready(&pyrf_evlist__type);
+}
+
+static struct {
+       const char *name;
+       int         value;
+} perf__constants[] = {
+       { "TYPE_HARDWARE",   PERF_TYPE_HARDWARE },
+       { "TYPE_SOFTWARE",   PERF_TYPE_SOFTWARE },
+       { "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT },
+       { "TYPE_HW_CACHE",   PERF_TYPE_HW_CACHE },
+       { "TYPE_RAW",        PERF_TYPE_RAW },
+       { "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT },
+
+       { "COUNT_HW_CPU_CYCLES",          PERF_COUNT_HW_CPU_CYCLES },
+       { "COUNT_HW_INSTRUCTIONS",        PERF_COUNT_HW_INSTRUCTIONS },
+       { "COUNT_HW_CACHE_REFERENCES",    PERF_COUNT_HW_CACHE_REFERENCES },
+       { "COUNT_HW_CACHE_MISSES",        PERF_COUNT_HW_CACHE_MISSES },
+       { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+       { "COUNT_HW_BRANCH_MISSES",       PERF_COUNT_HW_BRANCH_MISSES },
+       { "COUNT_HW_BUS_CYCLES",          PERF_COUNT_HW_BUS_CYCLES },
+       { "COUNT_HW_CACHE_L1D",           PERF_COUNT_HW_CACHE_L1D },
+       { "COUNT_HW_CACHE_L1I",           PERF_COUNT_HW_CACHE_L1I },
+       { "COUNT_HW_CACHE_LL",            PERF_COUNT_HW_CACHE_LL },
+       { "COUNT_HW_CACHE_DTLB",          PERF_COUNT_HW_CACHE_DTLB },
+       { "COUNT_HW_CACHE_ITLB",          PERF_COUNT_HW_CACHE_ITLB },
+       { "COUNT_HW_CACHE_BPU",           PERF_COUNT_HW_CACHE_BPU },
+       { "COUNT_HW_CACHE_OP_READ",       PERF_COUNT_HW_CACHE_OP_READ },
+       { "COUNT_HW_CACHE_OP_WRITE",      PERF_COUNT_HW_CACHE_OP_WRITE },
+       { "COUNT_HW_CACHE_OP_PREFETCH",   PERF_COUNT_HW_CACHE_OP_PREFETCH },
+       { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS },
+       { "COUNT_HW_CACHE_RESULT_MISS",   PERF_COUNT_HW_CACHE_RESULT_MISS },
+
+       { "COUNT_SW_CPU_CLOCK",        PERF_COUNT_SW_CPU_CLOCK },
+       { "COUNT_SW_TASK_CLOCK",       PERF_COUNT_SW_TASK_CLOCK },
+       { "COUNT_SW_PAGE_FAULTS",      PERF_COUNT_SW_PAGE_FAULTS },
+       { "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES },
+       { "COUNT_SW_CPU_MIGRATIONS",   PERF_COUNT_SW_CPU_MIGRATIONS },
+       { "COUNT_SW_PAGE_FAULTS_MIN",  PERF_COUNT_SW_PAGE_FAULTS_MIN },
+       { "COUNT_SW_PAGE_FAULTS_MAJ",  PERF_COUNT_SW_PAGE_FAULTS_MAJ },
+       { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS },
+       { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS },
+
+       { "SAMPLE_IP",        PERF_SAMPLE_IP },
+       { "SAMPLE_TID",       PERF_SAMPLE_TID },
+       { "SAMPLE_TIME",      PERF_SAMPLE_TIME },
+       { "SAMPLE_ADDR",      PERF_SAMPLE_ADDR },
+       { "SAMPLE_READ",      PERF_SAMPLE_READ },
+       { "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN },
+       { "SAMPLE_ID",        PERF_SAMPLE_ID },
+       { "SAMPLE_CPU",       PERF_SAMPLE_CPU },
+       { "SAMPLE_PERIOD",    PERF_SAMPLE_PERIOD },
+       { "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID },
+       { "SAMPLE_RAW",       PERF_SAMPLE_RAW },
+
+       { "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED },
+       { "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING },
+       { "FORMAT_ID",                 PERF_FORMAT_ID },
+       { "FORMAT_GROUP",              PERF_FORMAT_GROUP },
+
+       { "RECORD_MMAP",       PERF_RECORD_MMAP },
+       { "RECORD_LOST",       PERF_RECORD_LOST },
+       { "RECORD_COMM",       PERF_RECORD_COMM },
+       { "RECORD_EXIT",       PERF_RECORD_EXIT },
+       { "RECORD_THROTTLE",   PERF_RECORD_THROTTLE },
+       { "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE },
+       { "RECORD_FORK",       PERF_RECORD_FORK },
+       { "RECORD_READ",       PERF_RECORD_READ },
+       { "RECORD_SAMPLE",     PERF_RECORD_SAMPLE },
+       { .name = NULL, },
+};
+
+static PyMethodDef perf__methods[] = {
+       { .ml_name = NULL, }
+};
+
+PyMODINIT_FUNC initperf(void)
+{
+       PyObject *obj;
+       int i;
+       PyObject *dict, *module = Py_InitModule("perf", perf__methods);
+
+       if (module == NULL ||
+           pyrf_event__setup_types() < 0 ||
+           pyrf_evlist__setup_types() < 0 ||
+           pyrf_evsel__setup_types() < 0 ||
+           pyrf_thread_map__setup_types() < 0 ||
+           pyrf_cpu_map__setup_types() < 0)
+               return;
+
+       Py_INCREF(&pyrf_evlist__type);
+       PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
+
+       Py_INCREF(&pyrf_evsel__type);
+       PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
+
+       Py_INCREF(&pyrf_thread_map__type);
+       PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
+
+       Py_INCREF(&pyrf_cpu_map__type);
+       PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
+
+       dict = PyModule_GetDict(module);
+       if (dict == NULL)
+               goto error;
+
+       for (i = 0; perf__constants[i].name != NULL; i++) {
+               obj = PyInt_FromLong(perf__constants[i].value);
+               if (obj == NULL)
+                       goto error;
+               PyDict_SetItemString(dict, perf__constants[i].name, obj);
+               Py_DECREF(obj);
+       }
+
+error:
+       if (PyErr_Occurred())
+               PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
+}
index c6d99334bdfa836c1adba2d613b658fadf20797b..2040b8538527361828ad7a4ad3e034b9c95755fd 100644 (file)
@@ -248,8 +248,7 @@ static void python_process_event(int cpu, void *data,
        context = PyCObject_FromVoidPtr(scripting_context, NULL);
 
        PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
-       PyTuple_SetItem(t, n++,
-                       PyCObject_FromVoidPtr(scripting_context, NULL));
+       PyTuple_SetItem(t, n++, context);
 
        if (handler) {
                PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
index 105f00bfd5552b1b878c21b72b86a80705592252..f26639fa0fb3a1fd9f2c7376a44a386d5491f49a 100644 (file)
@@ -7,6 +7,8 @@
 #include <sys/types.h>
 #include <sys/mman.h>
 
+#include "evlist.h"
+#include "evsel.h"
 #include "session.h"
 #include "sort.h"
 #include "util.h"
@@ -19,7 +21,7 @@ static int perf_session__open(struct perf_session *self, bool force)
                self->fd_pipe = true;
                self->fd = STDIN_FILENO;
 
-               if (perf_header__read(self, self->fd) < 0)
+               if (perf_session__read_header(self, self->fd) < 0)
                        pr_err("incompatible file format");
 
                return 0;
@@ -51,7 +53,7 @@ static int perf_session__open(struct perf_session *self, bool force)
                goto out_close;
        }
 
-       if (perf_header__read(self, self->fd) < 0) {
+       if (perf_session__read_header(self, self->fd) < 0) {
                pr_err("incompatible file format");
                goto out_close;
        }
@@ -67,7 +69,7 @@ out_close:
 
 static void perf_session__id_header_size(struct perf_session *session)
 {
-       struct sample_data *data;
+       struct perf_sample *data;
        u64 sample_type = session->sample_type;
        u16 size = 0;
 
@@ -92,21 +94,10 @@ out:
        session->id_hdr_size = size;
 }
 
-void perf_session__set_sample_id_all(struct perf_session *session, bool value)
-{
-       session->sample_id_all = value;
-       perf_session__id_header_size(session);
-}
-
-void perf_session__set_sample_type(struct perf_session *session, u64 type)
-{
-       session->sample_type = type;
-}
-
 void perf_session__update_sample_type(struct perf_session *self)
 {
-       self->sample_type = perf_header__sample_type(&self->header);
-       self->sample_id_all = perf_header__sample_id_all(&self->header);
+       self->sample_type = perf_evlist__sample_type(self->evlist);
+       self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
        perf_session__id_header_size(self);
 }
 
@@ -135,13 +126,9 @@ struct perf_session *perf_session__new(const char *filename, int mode,
        if (self == NULL)
                goto out;
 
-       if (perf_header__init(&self->header) < 0)
-               goto out_free;
-
        memcpy(self->filename, filename, len);
        self->threads = RB_ROOT;
        INIT_LIST_HEAD(&self->dead_threads);
-       self->hists_tree = RB_ROOT;
        self->last_match = NULL;
        /*
         * On 64bit we can mmap the data file in one go. No need for tiny mmap
@@ -162,17 +149,16 @@ struct perf_session *perf_session__new(const char *filename, int mode,
        if (mode == O_RDONLY) {
                if (perf_session__open(self, force) < 0)
                        goto out_delete;
+               perf_session__update_sample_type(self);
        } else if (mode == O_WRONLY) {
                /*
                 * In O_RDONLY mode this will be performed when reading the
-                * kernel MMAP event, in event__process_mmap().
+                * kernel MMAP event, in perf_event__process_mmap().
                 */
                if (perf_session__create_kernel_maps(self) < 0)
                        goto out_delete;
        }
 
-       perf_session__update_sample_type(self);
-
        if (ops && ops->ordering_requires_timestamps &&
            ops->ordered_samples && !self->sample_id_all) {
                dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
@@ -181,9 +167,6 @@ struct perf_session *perf_session__new(const char *filename, int mode,
 
 out:
        return self;
-out_free:
-       free(self);
-       return NULL;
 out_delete:
        perf_session__delete(self);
        return NULL;
@@ -214,7 +197,6 @@ static void perf_session__delete_threads(struct perf_session *self)
 
 void perf_session__delete(struct perf_session *self)
 {
-       perf_header__exit(&self->header);
        perf_session__destroy_kernel_maps(self);
        perf_session__delete_dead_threads(self);
        perf_session__delete_threads(self);
@@ -242,17 +224,16 @@ static bool symbol__match_parent_regex(struct symbol *sym)
        return 0;
 }
 
-struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
-                                                  struct thread *thread,
-                                                  struct ip_callchain *chain,
-                                                  struct symbol **parent)
+int perf_session__resolve_callchain(struct perf_session *self,
+                                   struct thread *thread,
+                                   struct ip_callchain *chain,
+                                   struct symbol **parent)
 {
        u8 cpumode = PERF_RECORD_MISC_USER;
        unsigned int i;
-       struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
+       int err;
 
-       if (!syms)
-               return NULL;
+       callchain_cursor_reset(&self->callchain_cursor);
 
        for (i = 0; i < chain->nr; i++) {
                u64 ip = chain->ips[i];
@@ -281,30 +262,33 @@ struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
                                *parent = al.sym;
                        if (!symbol_conf.use_callchain)
                                break;
-                       syms[i].map = al.map;
-                       syms[i].sym = al.sym;
                }
+
+               err = callchain_cursor_append(&self->callchain_cursor,
+                                             ip, al.map, al.sym);
+               if (err)
+                       return err;
        }
 
-       return syms;
+       return 0;
 }
 
-static int process_event_synth_stub(event_t *event __used,
+static int process_event_synth_stub(union perf_event *event __used,
                                    struct perf_session *session __used)
 {
        dump_printf(": unhandled!\n");
        return 0;
 }
 
-static int process_event_stub(event_t *event __used,
-                             struct sample_data *sample __used,
+static int process_event_stub(union perf_event *event __used,
+                             struct perf_sample *sample __used,
                              struct perf_session *session __used)
 {
        dump_printf(": unhandled!\n");
        return 0;
 }
 
-static int process_finished_round_stub(event_t *event __used,
+static int process_finished_round_stub(union perf_event *event __used,
                                       struct perf_session *session __used,
                                       struct perf_event_ops *ops __used)
 {
@@ -312,7 +296,7 @@ static int process_finished_round_stub(event_t *event __used,
        return 0;
 }
 
-static int process_finished_round(event_t *event,
+static int process_finished_round(union perf_event *event,
                                  struct perf_session *session,
                                  struct perf_event_ops *ops);
 
@@ -329,7 +313,7 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
        if (handler->exit == NULL)
                handler->exit = process_event_stub;
        if (handler->lost == NULL)
-               handler->lost = event__process_lost;
+               handler->lost = perf_event__process_lost;
        if (handler->read == NULL)
                handler->read = process_event_stub;
        if (handler->throttle == NULL)
@@ -363,98 +347,98 @@ void mem_bswap_64(void *src, int byte_size)
        }
 }
 
-static void event__all64_swap(event_t *self)
+static void perf_event__all64_swap(union perf_event *event)
 {
-       struct perf_event_header *hdr = &self->header;
-       mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
+       struct perf_event_header *hdr = &event->header;
+       mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
 }
 
-static void event__comm_swap(event_t *self)
+static void perf_event__comm_swap(union perf_event *event)
 {
-       self->comm.pid = bswap_32(self->comm.pid);
-       self->comm.tid = bswap_32(self->comm.tid);
+       event->comm.pid = bswap_32(event->comm.pid);
+       event->comm.tid = bswap_32(event->comm.tid);
 }
 
-static void event__mmap_swap(event_t *self)
+static void perf_event__mmap_swap(union perf_event *event)
 {
-       self->mmap.pid   = bswap_32(self->mmap.pid);
-       self->mmap.tid   = bswap_32(self->mmap.tid);
-       self->mmap.start = bswap_64(self->mmap.start);
-       self->mmap.len   = bswap_64(self->mmap.len);
-       self->mmap.pgoff = bswap_64(self->mmap.pgoff);
+       event->mmap.pid   = bswap_32(event->mmap.pid);
+       event->mmap.tid   = bswap_32(event->mmap.tid);
+       event->mmap.start = bswap_64(event->mmap.start);
+       event->mmap.len   = bswap_64(event->mmap.len);
+       event->mmap.pgoff = bswap_64(event->mmap.pgoff);
 }
 
-static void event__task_swap(event_t *self)
+static void perf_event__task_swap(union perf_event *event)
 {
-       self->fork.pid  = bswap_32(self->fork.pid);
-       self->fork.tid  = bswap_32(self->fork.tid);
-       self->fork.ppid = bswap_32(self->fork.ppid);
-       self->fork.ptid = bswap_32(self->fork.ptid);
-       self->fork.time = bswap_64(self->fork.time);
+       event->fork.pid  = bswap_32(event->fork.pid);
+       event->fork.tid  = bswap_32(event->fork.tid);
+       event->fork.ppid = bswap_32(event->fork.ppid);
+       event->fork.ptid = bswap_32(event->fork.ptid);
+       event->fork.time = bswap_64(event->fork.time);
 }
 
-static void event__read_swap(event_t *self)
+static void perf_event__read_swap(union perf_event *event)
 {
-       self->read.pid          = bswap_32(self->read.pid);
-       self->read.tid          = bswap_32(self->read.tid);
-       self->read.value        = bswap_64(self->read.value);
-       self->read.time_enabled = bswap_64(self->read.time_enabled);
-       self->read.time_running = bswap_64(self->read.time_running);
-       self->read.id           = bswap_64(self->read.id);
+       event->read.pid          = bswap_32(event->read.pid);
+       event->read.tid          = bswap_32(event->read.tid);
+       event->read.value        = bswap_64(event->read.value);
+       event->read.time_enabled = bswap_64(event->read.time_enabled);
+       event->read.time_running = bswap_64(event->read.time_running);
+       event->read.id           = bswap_64(event->read.id);
 }
 
-static void event__attr_swap(event_t *self)
+static void perf_event__attr_swap(union perf_event *event)
 {
        size_t size;
 
-       self->attr.attr.type            = bswap_32(self->attr.attr.type);
-       self->attr.attr.size            = bswap_32(self->attr.attr.size);
-       self->attr.attr.config          = bswap_64(self->attr.attr.config);
-       self->attr.attr.sample_period   = bswap_64(self->attr.attr.sample_period);
-       self->attr.attr.sample_type     = bswap_64(self->attr.attr.sample_type);
-       self->attr.attr.read_format     = bswap_64(self->attr.attr.read_format);
-       self->attr.attr.wakeup_events   = bswap_32(self->attr.attr.wakeup_events);
-       self->attr.attr.bp_type         = bswap_32(self->attr.attr.bp_type);
-       self->attr.attr.bp_addr         = bswap_64(self->attr.attr.bp_addr);
-       self->attr.attr.bp_len          = bswap_64(self->attr.attr.bp_len);
-
-       size = self->header.size;
-       size -= (void *)&self->attr.id - (void *)self;
-       mem_bswap_64(self->attr.id, size);
+       event->attr.attr.type           = bswap_32(event->attr.attr.type);
+       event->attr.attr.size           = bswap_32(event->attr.attr.size);
+       event->attr.attr.config         = bswap_64(event->attr.attr.config);
+       event->attr.attr.sample_period  = bswap_64(event->attr.attr.sample_period);
+       event->attr.attr.sample_type    = bswap_64(event->attr.attr.sample_type);
+       event->attr.attr.read_format    = bswap_64(event->attr.attr.read_format);
+       event->attr.attr.wakeup_events  = bswap_32(event->attr.attr.wakeup_events);
+       event->attr.attr.bp_type        = bswap_32(event->attr.attr.bp_type);
+       event->attr.attr.bp_addr        = bswap_64(event->attr.attr.bp_addr);
+       event->attr.attr.bp_len         = bswap_64(event->attr.attr.bp_len);
+
+       size = event->header.size;
+       size -= (void *)&event->attr.id - (void *)event;
+       mem_bswap_64(event->attr.id, size);
 }
 
-static void event__event_type_swap(event_t *self)
+static void perf_event__event_type_swap(union perf_event *event)
 {
-       self->event_type.event_type.event_id =
-               bswap_64(self->event_type.event_type.event_id);
+       event->event_type.event_type.event_id =
+               bswap_64(event->event_type.event_type.event_id);
 }
 
-static void event__tracing_data_swap(event_t *self)
+static void perf_event__tracing_data_swap(union perf_event *event)
 {
-       self->tracing_data.size = bswap_32(self->tracing_data.size);
+       event->tracing_data.size = bswap_32(event->tracing_data.size);
 }
 
-typedef void (*event__swap_op)(event_t *self);
-
-static event__swap_op event__swap_ops[] = {
-       [PERF_RECORD_MMAP]   event__mmap_swap,
-       [PERF_RECORD_COMM]   event__comm_swap,
-       [PERF_RECORD_FORK]   event__task_swap,
-       [PERF_RECORD_EXIT]   event__task_swap,
-       [PERF_RECORD_LOST]   event__all64_swap,
-       [PERF_RECORD_READ]   event__read_swap,
-       [PERF_RECORD_SAMPLE] event__all64_swap,
-       [PERF_RECORD_HEADER_ATTR]   event__attr_swap,
-       [PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
-       [PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
-       [PERF_RECORD_HEADER_BUILD_ID]   = NULL,
-       [PERF_RECORD_HEADER_MAX]    = NULL,
+typedef void (*perf_event__swap_op)(union perf_event *event);
+
+static perf_event__swap_op perf_event__swap_ops[] = {
+       [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
+       [PERF_RECORD_COMM]                = perf_event__comm_swap,
+       [PERF_RECORD_FORK]                = perf_event__task_swap,
+       [PERF_RECORD_EXIT]                = perf_event__task_swap,
+       [PERF_RECORD_LOST]                = perf_event__all64_swap,
+       [PERF_RECORD_READ]                = perf_event__read_swap,
+       [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
+       [PERF_RECORD_HEADER_ATTR]         = perf_event__attr_swap,
+       [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
+       [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
+       [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
+       [PERF_RECORD_HEADER_MAX]          = NULL,
 };
 
 struct sample_queue {
        u64                     timestamp;
        u64                     file_offset;
-       event_t                 *event;
+       union perf_event        *event;
        struct list_head        list;
 };
 
@@ -472,8 +456,8 @@ static void perf_session_free_sample_buffers(struct perf_session *session)
 }
 
 static int perf_session_deliver_event(struct perf_session *session,
-                                     event_t *event,
-                                     struct sample_data *sample,
+                                     union perf_event *event,
+                                     struct perf_sample *sample,
                                      struct perf_event_ops *ops,
                                      u64 file_offset);
 
@@ -483,7 +467,7 @@ static void flush_sample_queue(struct perf_session *s,
        struct ordered_samples *os = &s->ordered_samples;
        struct list_head *head = &os->samples;
        struct sample_queue *tmp, *iter;
-       struct sample_data sample;
+       struct perf_sample sample;
        u64 limit = os->next_flush;
        u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
 
@@ -494,7 +478,7 @@ static void flush_sample_queue(struct perf_session *s,
                if (iter->timestamp > limit)
                        break;
 
-               event__parse_sample(iter->event, s, &sample);
+               perf_session__parse_sample(s, iter->event, &sample);
                perf_session_deliver_event(s, iter->event, &sample, ops,
                                           iter->file_offset);
 
@@ -550,7 +534,7 @@ static void flush_sample_queue(struct perf_session *s,
  *      Flush every events below timestamp 7
  *      etc...
  */
-static int process_finished_round(event_t *event __used,
+static int process_finished_round(union perf_event *event __used,
                                  struct perf_session *session,
                                  struct perf_event_ops *ops)
 {
@@ -607,12 +591,12 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s)
 
 #define MAX_SAMPLE_BUFFER      (64 * 1024 / sizeof(struct sample_queue))
 
-static int perf_session_queue_event(struct perf_session *s, event_t *event,
-                                   struct sample_data *data, u64 file_offset)
+static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
+                                   struct perf_sample *sample, u64 file_offset)
 {
        struct ordered_samples *os = &s->ordered_samples;
        struct list_head *sc = &os->sample_cache;
-       u64 timestamp = data->time;
+       u64 timestamp = sample->time;
        struct sample_queue *new;
 
        if (!timestamp || timestamp == ~0ULL)
@@ -648,7 +632,7 @@ static int perf_session_queue_event(struct perf_session *s, event_t *event,
        return 0;
 }
 
-static void callchain__printf(struct sample_data *sample)
+static void callchain__printf(struct perf_sample *sample)
 {
        unsigned int i;
 
@@ -660,8 +644,8 @@ static void callchain__printf(struct sample_data *sample)
 }
 
 static void perf_session__print_tstamp(struct perf_session *session,
-                                      event_t *event,
-                                      struct sample_data *sample)
+                                      union perf_event *event,
+                                      struct perf_sample *sample)
 {
        if (event->header.type != PERF_RECORD_SAMPLE &&
            !session->sample_id_all) {
@@ -676,8 +660,8 @@ static void perf_session__print_tstamp(struct perf_session *session,
                printf("%" PRIu64 " ", sample->time);
 }
 
-static void dump_event(struct perf_session *session, event_t *event,
-                      u64 file_offset, struct sample_data *sample)
+static void dump_event(struct perf_session *session, union perf_event *event,
+                      u64 file_offset, struct perf_sample *sample)
 {
        if (!dump_trace)
                return;
@@ -691,11 +675,11 @@ static void dump_event(struct perf_session *session, event_t *event,
                perf_session__print_tstamp(session, event, sample);
 
        printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
-              event->header.size, event__get_event_name(event->header.type));
+              event->header.size, perf_event__name(event->header.type));
 }
 
-static void dump_sample(struct perf_session *session, event_t *event,
-                       struct sample_data *sample)
+static void dump_sample(struct perf_session *session, union perf_event *event,
+                       struct perf_sample *sample)
 {
        if (!dump_trace)
                return;
@@ -709,8 +693,8 @@ static void dump_sample(struct perf_session *session, event_t *event,
 }
 
 static int perf_session_deliver_event(struct perf_session *session,
-                                     event_t *event,
-                                     struct sample_data *sample,
+                                     union perf_event *event,
+                                     struct perf_sample *sample,
                                      struct perf_event_ops *ops,
                                      u64 file_offset)
 {
@@ -743,7 +727,7 @@ static int perf_session_deliver_event(struct perf_session *session,
 }
 
 static int perf_session__preprocess_sample(struct perf_session *session,
-                                          event_t *event, struct sample_data *sample)
+                                          union perf_event *event, struct perf_sample *sample)
 {
        if (event->header.type != PERF_RECORD_SAMPLE ||
            !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
@@ -758,7 +742,7 @@ static int perf_session__preprocess_sample(struct perf_session *session,
        return 0;
 }
 
-static int perf_session__process_user_event(struct perf_session *session, event_t *event,
+static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
                                            struct perf_event_ops *ops, u64 file_offset)
 {
        dump_event(session, event, file_offset, NULL);
@@ -783,15 +767,16 @@ static int perf_session__process_user_event(struct perf_session *session, event_
 }
 
 static int perf_session__process_event(struct perf_session *session,
-                                      event_t *event,
+                                      union perf_event *event,
                                       struct perf_event_ops *ops,
                                       u64 file_offset)
 {
-       struct sample_data sample;
+       struct perf_sample sample;
        int ret;
 
-       if (session->header.needs_swap && event__swap_ops[event->header.type])
-               event__swap_ops[event->header.type](event);
+       if (session->header.needs_swap &&
+           perf_event__swap_ops[event->header.type])
+               perf_event__swap_ops[event->header.type](event);
 
        if (event->header.type >= PERF_RECORD_HEADER_MAX)
                return -EINVAL;
@@ -804,7 +789,7 @@ static int perf_session__process_event(struct perf_session *session,
        /*
         * For all kernel events we get the sample data
         */
-       event__parse_sample(event, session, &sample);
+       perf_session__parse_sample(session, event, &sample);
 
        /* Preprocess sample records - precheck callchains */
        if (perf_session__preprocess_sample(session, event, &sample))
@@ -843,7 +828,7 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se
 static void perf_session__warn_about_errors(const struct perf_session *session,
                                            const struct perf_event_ops *ops)
 {
-       if (ops->lost == event__process_lost &&
+       if (ops->lost == perf_event__process_lost &&
            session->hists.stats.total_lost != 0) {
                ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
                            "!\n\nCheck IO/CPU overload!\n\n",
@@ -875,7 +860,7 @@ volatile int session_done;
 static int __perf_session__process_pipe_events(struct perf_session *self,
                                               struct perf_event_ops *ops)
 {
-       event_t event;
+       union perf_event event;
        uint32_t size;
        int skip = 0;
        u64 head;
@@ -956,7 +941,7 @@ int __perf_session__process_events(struct perf_session *session,
        struct ui_progress *progress;
        size_t  page_size, mmap_size;
        char *buf, *mmaps[8];
-       event_t *event;
+       union perf_event *event;
        uint32_t size;
 
        perf_event_ops__fill_defaults(ops);
@@ -1001,7 +986,7 @@ remap:
        file_pos = file_offset + head;
 
 more:
-       event = (event_t *)(buf + head);
+       event = (union perf_event *)(buf + head);
 
        if (session->header.needs_swap)
                perf_event_header__bswap(&event->header);
@@ -1134,3 +1119,18 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
        size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
        return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
 }
+
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
+{
+       struct perf_evsel *pos;
+       size_t ret = fprintf(fp, "Aggregated stats:\n");
+
+       ret += hists__fprintf_nr_events(&session->hists, fp);
+
+       list_for_each_entry(pos, &session->evlist->entries, node) {
+               ret += fprintf(fp, "%s stats:\n", event_name(pos));
+               ret += hists__fprintf_nr_events(&pos->hists, fp);
+       }
+
+       return ret;
+}
index decd83f274fd3642fecf039740e75a015431df4d..b5b148b0aacaf937596fa23242bf330cad20769c 100644 (file)
@@ -34,12 +34,12 @@ struct perf_session {
        struct thread           *last_match;
        struct machine          host_machine;
        struct rb_root          machines;
-       struct rb_root          hists_tree;
+       struct perf_evlist      *evlist;
        /*
-        * FIXME: should point to the first entry in hists_tree and
-        *        be a hists instance. Right now its only 'report'
-        *        that is using ->hists_tree while all the rest use
-        *        ->hists.
+        * FIXME: Need to split this up further, we need global
+        *        stats + per event stats. 'perf diff' also needs
+        *        to properly support multiple events in a single
+        *        perf.data file.
         */
        struct hists            hists;
        u64                     sample_type;
@@ -51,15 +51,17 @@ struct perf_session {
        int                     cwdlen;
        char                    *cwd;
        struct ordered_samples  ordered_samples;
-       char filename[0];
+       struct callchain_cursor callchain_cursor;
+       char                    filename[0];
 };
 
 struct perf_event_ops;
 
-typedef int (*event_op)(event_t *self, struct sample_data *sample,
+typedef int (*event_op)(union perf_event *self, struct perf_sample *sample,
                        struct perf_session *session);
-typedef int (*event_synth_op)(event_t *self, struct perf_session *session);
-typedef int (*event_op2)(event_t *self, struct perf_session *session,
+typedef int (*event_synth_op)(union perf_event *self,
+                             struct perf_session *session);
+typedef int (*event_op2)(union perf_event *self, struct perf_session *session,
                         struct perf_event_ops *ops);
 
 struct perf_event_ops {
@@ -94,10 +96,10 @@ int __perf_session__process_events(struct perf_session *self,
 int perf_session__process_events(struct perf_session *self,
                                 struct perf_event_ops *event_ops);
 
-struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
-                                                  struct thread *thread,
-                                                  struct ip_callchain *chain,
-                                                  struct symbol **parent);
+int perf_session__resolve_callchain(struct perf_session *self,
+                                   struct thread *thread,
+                                   struct ip_callchain *chain,
+                                   struct symbol **parent);
 
 bool perf_session__has_traces(struct perf_session *self, const char *msg);
 
@@ -110,8 +112,6 @@ void mem_bswap_64(void *src, int byte_size);
 int perf_session__create_kernel_maps(struct perf_session *self);
 
 void perf_session__update_sample_type(struct perf_session *self);
-void perf_session__set_sample_id_all(struct perf_session *session, bool value);
-void perf_session__set_sample_type(struct perf_session *session, u64 type);
 void perf_session__remove_thread(struct perf_session *self, struct thread *th);
 
 static inline
@@ -149,9 +149,14 @@ size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
                                          FILE *fp, bool with_hits);
 
-static inline
-size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp)
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
+
+static inline int perf_session__parse_sample(struct perf_session *session,
+                                            const union perf_event *event,
+                                            struct perf_sample *sample)
 {
-       return hists__fprintf_nr_events(&self->hists, fp);
+       return perf_event__parse_sample(event, session->sample_type,
+                                       session->sample_id_all, sample);
 }
+
 #endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
new file mode 100644 (file)
index 0000000..e24ffad
--- /dev/null
@@ -0,0 +1,19 @@
+#!/usr/bin/python2
+
+from distutils.core import setup, Extension
+
+perf = Extension('perf',
+                 sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
+                            'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
+                            'util/util.c', 'util/xyarray.c', 'util/cgroup.c'],
+                 include_dirs = ['util/include'],
+                 extra_compile_args = ['-fno-strict-aliasing', '-Wno-write-strings'])
+
+setup(name='perf',
+      version='0.1',
+      description='Interface with the Linux profiling infrastructure',
+      author='Arnaldo Carvalho de Melo',
+      author_email='acme@redhat.com',
+      license='GPLv2',
+      url='http://perf.wiki.kernel.org',
+      ext_modules=[perf])
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c
new file mode 100644 (file)
index 0000000..834c8eb
--- /dev/null
@@ -0,0 +1,199 @@
+#include "util.h"
+#include "string.h"
+#include "strfilter.h"
+
+/* Operators */
+static const char *OP_and      = "&";  /* Logical AND */
+static const char *OP_or       = "|";  /* Logical OR */
+static const char *OP_not      = "!";  /* Logical NOT */
+
+#define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!')
+#define is_separator(c)        (is_operator(c) || (c) == '(' || (c) == ')')
+
+static void strfilter_node__delete(struct strfilter_node *self)
+{
+       if (self) {
+               if (self->p && !is_operator(*self->p))
+                       free((char *)self->p);
+               strfilter_node__delete(self->l);
+               strfilter_node__delete(self->r);
+               free(self);
+       }
+}
+
+void strfilter__delete(struct strfilter *self)
+{
+       if (self) {
+               strfilter_node__delete(self->root);
+               free(self);
+       }
+}
+
+static const char *get_token(const char *s, const char **e)
+{
+       const char *p;
+
+       while (isspace(*s))     /* Skip spaces */
+               s++;
+
+       if (*s == '\0') {
+               p = s;
+               goto end;
+       }
+
+       p = s + 1;
+       if (!is_separator(*s)) {
+               /* End search */
+retry:
+               while (*p && !is_separator(*p) && !isspace(*p))
+                       p++;
+               /* Escape and special case: '!' is also used in glob pattern */
+               if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) {
+                       p++;
+                       goto retry;
+               }
+       }
+end:
+       *e = p;
+       return s;
+}
+
+static struct strfilter_node *strfilter_node__alloc(const char *op,
+                                                   struct strfilter_node *l,
+                                                   struct strfilter_node *r)
+{
+       struct strfilter_node *ret = zalloc(sizeof(struct strfilter_node));
+
+       if (ret) {
+               ret->p = op;
+               ret->l = l;
+               ret->r = r;
+       }
+
+       return ret;
+}
+
+static struct strfilter_node *strfilter_node__new(const char *s,
+                                                 const char **ep)
+{
+       struct strfilter_node root, *cur, *last_op;
+       const char *e;
+
+       if (!s)
+               return NULL;
+
+       memset(&root, 0, sizeof(root));
+       last_op = cur = &root;
+
+       s = get_token(s, &e);
+       while (*s != '\0' && *s != ')') {
+               switch (*s) {
+               case '&':       /* Exchg last OP->r with AND */
+                       if (!cur->r || !last_op->r)
+                               goto error;
+                       cur = strfilter_node__alloc(OP_and, last_op->r, NULL);
+                       if (!cur)
+                               goto nomem;
+                       last_op->r = cur;
+                       last_op = cur;
+                       break;
+               case '|':       /* Exchg the root with OR */
+                       if (!cur->r || !root.r)
+                               goto error;
+                       cur = strfilter_node__alloc(OP_or, root.r, NULL);
+                       if (!cur)
+                               goto nomem;
+                       root.r = cur;
+                       last_op = cur;
+                       break;
+               case '!':       /* Add NOT as a leaf node */
+                       if (cur->r)
+                               goto error;
+                       cur->r = strfilter_node__alloc(OP_not, NULL, NULL);
+                       if (!cur->r)
+                               goto nomem;
+                       cur = cur->r;
+                       break;
+               case '(':       /* Recursively parses inside the parenthesis */
+                       if (cur->r)
+                               goto error;
+                       cur->r = strfilter_node__new(s + 1, &s);
+                       if (!s)
+                               goto nomem;
+                       if (!cur->r || *s != ')')
+                               goto error;
+                       e = s + 1;
+                       break;
+               default:
+                       if (cur->r)
+                               goto error;
+                       cur->r = strfilter_node__alloc(NULL, NULL, NULL);
+                       if (!cur->r)
+                               goto nomem;
+                       cur->r->p = strndup(s, e - s);
+                       if (!cur->r->p)
+                               goto nomem;
+               }
+               s = get_token(e, &e);
+       }
+       if (!cur->r)
+               goto error;
+       *ep = s;
+       return root.r;
+nomem:
+       s = NULL;
+error:
+       *ep = s;
+       strfilter_node__delete(root.r);
+       return NULL;
+}
+
+/*
+ * Parse filter rule and return new strfilter.
+ * Return NULL if fail, and *ep == NULL if memory allocation failed.
+ */
+struct strfilter *strfilter__new(const char *rules, const char **err)
+{
+       struct strfilter *ret = zalloc(sizeof(struct strfilter));
+       const char *ep = NULL;
+
+       if (ret)
+               ret->root = strfilter_node__new(rules, &ep);
+
+       if (!ret || !ret->root || *ep != '\0') {
+               if (err)
+                       *err = ep;
+               strfilter__delete(ret);
+               ret = NULL;
+       }
+
+       return ret;
+}
+
+static bool strfilter_node__compare(struct strfilter_node *self,
+                                   const char *str)
+{
+       if (!self || !self->p)
+               return false;
+
+       switch (*self->p) {
+       case '|':       /* OR */
+               return strfilter_node__compare(self->l, str) ||
+                       strfilter_node__compare(self->r, str);
+       case '&':       /* AND */
+               return strfilter_node__compare(self->l, str) &&
+                       strfilter_node__compare(self->r, str);
+       case '!':       /* NOT */
+               return !strfilter_node__compare(self->r, str);
+       default:
+               return strglobmatch(str, self->p);
+       }
+}
+
+/* Return true if STR matches the filter rules */
+bool strfilter__compare(struct strfilter *self, const char *str)
+{
+       if (!self)
+               return false;
+       return strfilter_node__compare(self->root, str);
+}
diff --git a/tools/perf/util/strfilter.h b/tools/perf/util/strfilter.h
new file mode 100644 (file)
index 0000000..00f58a7
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef __PERF_STRFILTER_H
+#define __PERF_STRFILTER_H
+/* General purpose glob matching filter */
+
+#include <linux/list.h>
+#include <stdbool.h>
+
+/* A node of string filter */
+struct strfilter_node {
+       struct strfilter_node *l;       /* Tree left branche (for &,|) */
+       struct strfilter_node *r;       /* Tree right branche (for !,&,|) */
+       const char *p;          /* Operator or rule */
+};
+
+/* String filter */
+struct strfilter {
+       struct strfilter_node *root;
+};
+
+/**
+ * strfilter__new - Create a new string filter
+ * @rules: Filter rule, which is a combination of glob expressions.
+ * @err: Pointer which points an error detected on @rules
+ *
+ * Parse @rules and return new strfilter. Return NULL if an error detected.
+ * In that case, *@err will indicate where it is detected, and *@err is NULL
+ * if a memory allocation is failed.
+ */
+struct strfilter *strfilter__new(const char *rules, const char **err);
+
+/**
+ * strfilter__compare - compare given string and a string filter
+ * @self: String filter
+ * @str: target string
+ *
+ * Compare @str and @self. Return true if the str match the rule
+ */
+bool strfilter__compare(struct strfilter *self, const char *str);
+
+/**
+ * strfilter__delete - delete a string filter
+ * @self: String filter to delete
+ *
+ * Delete @self.
+ */
+void strfilter__delete(struct strfilter *self);
+
+#endif
index fb737fe9be918d093a8f6b377e434c8c74ffe88a..96c866045d60d2a17f4d969ed5e3a0a3c00933fc 100644 (file)
@@ -456,9 +456,9 @@ void svg_legenda(void)
                return;
 
        svg_legenda_box(0,      "Running", "sample");
-       svg_legenda_box(100,    "Idle","rect.c1");
-       svg_legenda_box(200,    "Deeper Idle", "rect.c3");
-       svg_legenda_box(350,    "Deepest Idle", "rect.c6");
+       svg_legenda_box(100,    "Idle","c1");
+       svg_legenda_box(200,    "Deeper Idle", "c3");
+       svg_legenda_box(350,    "Deepest Idle", "c6");
        svg_legenda_box(550,    "Sleeping", "process2");
        svg_legenda_box(650,    "Waiting for cpu", "waiting");
        svg_legenda_box(800,    "Blocked on IO", "blocked");
index 7821d0e6866f10e745762cc02c57237461ae0f8d..00014e32c288fc0860c233d504badb66935a037e 100644 (file)
@@ -207,7 +207,6 @@ struct dso *dso__new(const char *name)
                dso__set_short_name(self, self->name);
                for (i = 0; i < MAP__NR_TYPES; ++i)
                        self->symbols[i] = self->symbol_names[i] = RB_ROOT;
-               self->slen_calculated = 0;
                self->origin = DSO__ORIG_NOT_FOUND;
                self->loaded = 0;
                self->sorted_by_name = 0;
@@ -1525,8 +1524,8 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
                             symbol_conf.symfs, self->long_name);
                        break;
                case DSO__ORIG_GUEST_KMODULE:
-                       if (map->groups && map->groups->machine)
-                               root_dir = map->groups->machine->root_dir;
+                       if (map->groups && machine)
+                               root_dir = machine->root_dir;
                        else
                                root_dir = "";
                        snprintf(name, size, "%s%s%s", symbol_conf.symfs,
@@ -1836,7 +1835,7 @@ int dso__load_vmlinux(struct dso *self, struct map *map,
        int err = -1, fd;
        char symfs_vmlinux[PATH_MAX];
 
-       snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s/%s",
+       snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
                 symbol_conf.symfs, vmlinux);
        fd = open(symfs_vmlinux, O_RDONLY);
        if (fd < 0)
index 670cd1c88f54dc932d18b7c79d609d61e7560295..4d7ed09fe3321b8e9d6e45094602aa29f6dea635 100644 (file)
@@ -132,7 +132,6 @@ struct dso {
        struct rb_root   symbol_names[MAP__NR_TYPES];
        enum dso_kernel_type    kernel;
        u8               adjust_symbols:1;
-       u8               slen_calculated:1;
        u8               has_build_id:1;
        u8               hit:1;
        u8               annotate_warned:1;
index 00f4eade2e3e9c1fdd2e9c92c83222a10c1555fd..d5d3b22250f36e397f15c1298a47fcc6d2ccf360 100644 (file)
@@ -7,61 +7,6 @@
 #include "util.h"
 #include "debug.h"
 
-/* Skip "." and ".." directories */
-static int filter(const struct dirent *dir)
-{
-       if (dir->d_name[0] == '.')
-               return 0;
-       else
-               return 1;
-}
-
-struct thread_map *thread_map__new_by_pid(pid_t pid)
-{
-       struct thread_map *threads;
-       char name[256];
-       int items;
-       struct dirent **namelist = NULL;
-       int i;
-
-       sprintf(name, "/proc/%d/task", pid);
-       items = scandir(name, &namelist, filter, NULL);
-       if (items <= 0)
-                return NULL;
-
-       threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
-       if (threads != NULL) {
-               for (i = 0; i < items; i++)
-                       threads->map[i] = atoi(namelist[i]->d_name);
-               threads->nr = items;
-       }
-
-       for (i=0; i<items; i++)
-               free(namelist[i]);
-       free(namelist);
-
-       return threads;
-}
-
-struct thread_map *thread_map__new_by_tid(pid_t tid)
-{
-       struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
-
-       if (threads != NULL) {
-               threads->map[0] = tid;
-               threads->nr     = 1;
-       }
-
-       return threads;
-}
-
-struct thread_map *thread_map__new(pid_t pid, pid_t tid)
-{
-       if (pid != -1)
-               return thread_map__new_by_pid(pid);
-       return thread_map__new_by_tid(tid);
-}
-
 static struct thread *thread__new(pid_t pid)
 {
        struct thread *self = zalloc(sizeof(*self));
index d7574101054a8ae9b6ded40fd0a813c3cacf9965..e5f2401c1b5eb949c2c8c1e6193f3acc44588d06 100644 (file)
@@ -18,24 +18,10 @@ struct thread {
        int                     comm_len;
 };
 
-struct thread_map {
-       int nr;
-       int map[];
-};
-
 struct perf_session;
 
 void thread__delete(struct thread *self);
 
-struct thread_map *thread_map__new_by_pid(pid_t pid);
-struct thread_map *thread_map__new_by_tid(pid_t tid);
-struct thread_map *thread_map__new(pid_t pid, pid_t tid);
-
-static inline void thread_map__delete(struct thread_map *threads)
-{
-       free(threads);
-}
-
 int thread__set_comm(struct thread *self, const char *comm);
 int thread__comm_len(struct thread *self);
 struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
new file mode 100644 (file)
index 0000000..a5df131
--- /dev/null
@@ -0,0 +1,64 @@
+#include <dirent.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "thread_map.h"
+
+/* Skip "." and ".." directories */
+static int filter(const struct dirent *dir)
+{
+       if (dir->d_name[0] == '.')
+               return 0;
+       else
+               return 1;
+}
+
+struct thread_map *thread_map__new_by_pid(pid_t pid)
+{
+       struct thread_map *threads;
+       char name[256];
+       int items;
+       struct dirent **namelist = NULL;
+       int i;
+
+       sprintf(name, "/proc/%d/task", pid);
+       items = scandir(name, &namelist, filter, NULL);
+       if (items <= 0)
+                return NULL;
+
+       threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
+       if (threads != NULL) {
+               for (i = 0; i < items; i++)
+                       threads->map[i] = atoi(namelist[i]->d_name);
+               threads->nr = items;
+       }
+
+       for (i=0; i<items; i++)
+               free(namelist[i]);
+       free(namelist);
+
+       return threads;
+}
+
+struct thread_map *thread_map__new_by_tid(pid_t tid)
+{
+       struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
+
+       if (threads != NULL) {
+               threads->map[0] = tid;
+               threads->nr     = 1;
+       }
+
+       return threads;
+}
+
+struct thread_map *thread_map__new(pid_t pid, pid_t tid)
+{
+       if (pid != -1)
+               return thread_map__new_by_pid(pid);
+       return thread_map__new_by_tid(tid);
+}
+
+void thread_map__delete(struct thread_map *threads)
+{
+       free(threads);
+}
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
new file mode 100644 (file)
index 0000000..3cb9073
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef __PERF_THREAD_MAP_H
+#define __PERF_THREAD_MAP_H
+
+#include <sys/types.h>
+
+struct thread_map {
+       int nr;
+       int map[];
+};
+
+struct thread_map *thread_map__new_by_pid(pid_t pid);
+struct thread_map *thread_map__new_by_tid(pid_t tid);
+struct thread_map *thread_map__new(pid_t pid, pid_t tid);
+void thread_map__delete(struct thread_map *threads);
+#endif /* __PERF_THREAD_MAP_H */
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
new file mode 100644 (file)
index 0000000..75cfe4d
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Refactored from builtin-top.c, see that files for further copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "cpumap.h"
+#include "event.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "parse-events.h"
+#include "symbol.h"
+#include "top.h"
+#include <inttypes.h>
+
+/*
+ * Ordering weight: count-1 * count-2 * ... / count-n
+ */
+static double sym_weight(const struct sym_entry *sym, struct perf_top *top)
+{
+       double weight = sym->snap_count;
+       int counter;
+
+       if (!top->display_weighted)
+               return weight;
+
+       for (counter = 1; counter < top->evlist->nr_entries - 1; counter++)
+               weight *= sym->count[counter];
+
+       weight /= (sym->count[counter] + 1);
+
+       return weight;
+}
+
+static void perf_top__remove_active_sym(struct perf_top *top, struct sym_entry *syme)
+{
+       pthread_mutex_lock(&top->active_symbols_lock);
+       list_del_init(&syme->node);
+       pthread_mutex_unlock(&top->active_symbols_lock);
+}
+
+static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
+{
+       struct rb_node **p = &tree->rb_node;
+       struct rb_node *parent = NULL;
+       struct sym_entry *iter;
+
+       while (*p != NULL) {
+               parent = *p;
+               iter = rb_entry(parent, struct sym_entry, rb_node);
+
+               if (se->weight > iter->weight)
+                       p = &(*p)->rb_left;
+               else
+                       p = &(*p)->rb_right;
+       }
+
+       rb_link_node(&se->rb_node, parent, p);
+       rb_insert_color(&se->rb_node, tree);
+}
+
+#define SNPRINTF(buf, size, fmt, args...) \
+({ \
+       size_t r = snprintf(buf, size, fmt, ## args); \
+       r > size ?  size : r; \
+})
+
+size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
+{
+       struct perf_evsel *counter;
+       float samples_per_sec = top->samples / top->delay_secs;
+       float ksamples_per_sec = top->kernel_samples / top->delay_secs;
+       float esamples_percent = (100.0 * top->exact_samples) / top->samples;
+       size_t ret = 0;
+
+       if (!perf_guest) {
+               ret = SNPRINTF(bf, size,
+                              "   PerfTop:%8.0f irqs/sec  kernel:%4.1f%%"
+                              "  exact: %4.1f%% [", samples_per_sec,
+                              100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
+                                       samples_per_sec)),
+                               esamples_percent);
+       } else {
+               float us_samples_per_sec = top->us_samples / top->delay_secs;
+               float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
+               float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
+
+               ret = SNPRINTF(bf, size,
+                              "   PerfTop:%8.0f irqs/sec  kernel:%4.1f%% us:%4.1f%%"
+                              " guest kernel:%4.1f%% guest us:%4.1f%%"
+                              " exact: %4.1f%% [", samples_per_sec,
+                              100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
+                                                samples_per_sec)),
+                              100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
+                                                samples_per_sec)),
+                              100.0 - (100.0 * ((samples_per_sec -
+                                                 guest_kernel_samples_per_sec) /
+                                                samples_per_sec)),
+                              100.0 - (100.0 * ((samples_per_sec -
+                                                 guest_us_samples_per_sec) /
+                                                samples_per_sec)),
+                              esamples_percent);
+       }
+
+       if (top->evlist->nr_entries == 1 || !top->display_weighted) {
+               struct perf_evsel *first;
+               first = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+               ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
+                               (uint64_t)first->attr.sample_period,
+                               top->freq ? "Hz" : "");
+       }
+
+       if (!top->display_weighted) {
+               ret += SNPRINTF(bf + ret, size - ret, "%s",
+                               event_name(top->sym_evsel));
+       } else {
+               /*
+                * Don't let events eat all the space. Leaving 30 bytes
+                * for the rest should be enough.
+                */
+               size_t last_pos = size - 30;
+
+               list_for_each_entry(counter, &top->evlist->entries, node) {
+                       ret += SNPRINTF(bf + ret, size - ret, "%s%s",
+                                       counter->idx ? "/" : "",
+                                       event_name(counter));
+                       if (ret > last_pos) {
+                               sprintf(bf + last_pos - 3, "..");
+                               ret = last_pos - 1;
+                               break;
+                       }
+               }
+       }
+
+       ret += SNPRINTF(bf + ret, size - ret, "], ");
+
+       if (top->target_pid != -1)
+               ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %d",
+                               top->target_pid);
+       else if (top->target_tid != -1)
+               ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %d",
+                               top->target_tid);
+       else
+               ret += SNPRINTF(bf + ret, size - ret, " (all");
+
+       if (top->cpu_list)
+               ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
+                               top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list);
+       else {
+               if (top->target_tid != -1)
+                       ret += SNPRINTF(bf + ret, size - ret, ")");
+               else
+                       ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
+                                       top->evlist->cpus->nr,
+                                       top->evlist->cpus->nr > 1 ? "s" : "");
+       }
+
+       return ret;
+}
+
+void perf_top__reset_sample_counters(struct perf_top *top)
+{
+       top->samples = top->us_samples = top->kernel_samples =
+       top->exact_samples = top->guest_kernel_samples =
+       top->guest_us_samples = 0;
+}
+
+float perf_top__decay_samples(struct perf_top *top, struct rb_root *root)
+{
+       struct sym_entry *syme, *n;
+       float sum_ksamples = 0.0;
+       int snap = !top->display_weighted ? top->sym_counter : 0, j;
+
+       /* Sort the active symbols */
+       pthread_mutex_lock(&top->active_symbols_lock);
+       syme = list_entry(top->active_symbols.next, struct sym_entry, node);
+       pthread_mutex_unlock(&top->active_symbols_lock);
+
+       top->rb_entries = 0;
+       list_for_each_entry_safe_from(syme, n, &top->active_symbols, node) {
+               syme->snap_count = syme->count[snap];
+               if (syme->snap_count != 0) {
+
+                       if ((top->hide_user_symbols &&
+                            syme->origin == PERF_RECORD_MISC_USER) ||
+                           (top->hide_kernel_symbols &&
+                            syme->origin == PERF_RECORD_MISC_KERNEL)) {
+                               perf_top__remove_active_sym(top, syme);
+                               continue;
+                       }
+                       syme->weight = sym_weight(syme, top);
+
+                       if ((int)syme->snap_count >= top->count_filter) {
+                               rb_insert_active_sym(root, syme);
+                               ++top->rb_entries;
+                       }
+                       sum_ksamples += syme->snap_count;
+
+                       for (j = 0; j < top->evlist->nr_entries; j++)
+                               syme->count[j] = top->zero ? 0 : syme->count[j] * 7 / 8;
+               } else
+                       perf_top__remove_active_sym(top, syme);
+       }
+
+       return sum_ksamples;
+}
+
+/*
+ * Find the longest symbol name that will be displayed
+ */
+void perf_top__find_widths(struct perf_top *top, struct rb_root *root,
+                          int *dso_width, int *dso_short_width, int *sym_width)
+{
+       struct rb_node *nd;
+       int printed = 0;
+
+       *sym_width = *dso_width = *dso_short_width = 0;
+
+       for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+               struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node);
+               struct symbol *sym = sym_entry__symbol(syme);
+
+               if (++printed > top->print_entries ||
+                   (int)syme->snap_count < top->count_filter)
+                       continue;
+
+               if (syme->map->dso->long_name_len > *dso_width)
+                       *dso_width = syme->map->dso->long_name_len;
+
+               if (syme->map->dso->short_name_len > *dso_short_width)
+                       *dso_short_width = syme->map->dso->short_name_len;
+
+               if (sym->namelen > *sym_width)
+                       *sym_width = sym->namelen;
+       }
+}
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
new file mode 100644 (file)
index 0000000..96d1cb7
--- /dev/null
@@ -0,0 +1,66 @@
+#ifndef __PERF_TOP_H
+#define __PERF_TOP_H 1
+
+#include "types.h"
+#include "../perf.h"
+#include <stddef.h>
+#include <pthread.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+struct perf_evlist;
+struct perf_evsel;
+
+struct sym_entry {
+       struct rb_node          rb_node;
+       struct list_head        node;
+       unsigned long           snap_count;
+       double                  weight;
+       int                     skip;
+       u8                      origin;
+       struct map              *map;
+       unsigned long           count[0];
+};
+
+static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
+{
+       return ((void *)self) + symbol_conf.priv_size;
+}
+
+struct perf_top {
+       struct perf_evlist *evlist;
+       /*
+        * Symbols will be added here in perf_event__process_sample and will
+        * get out after decayed.
+        */
+       struct list_head   active_symbols;
+       pthread_mutex_t    active_symbols_lock;
+       pthread_cond_t     active_symbols_cond;
+       u64                samples;
+       u64                kernel_samples, us_samples;
+       u64                exact_samples;
+       u64                guest_us_samples, guest_kernel_samples;
+       int                print_entries, count_filter, delay_secs;
+       int                display_weighted, freq, rb_entries, sym_counter;
+       pid_t              target_pid, target_tid;
+       bool               hide_kernel_symbols, hide_user_symbols, zero;
+       const char         *cpu_list;
+       struct sym_entry   *sym_filter_entry;
+       struct perf_evsel  *sym_evsel;
+};
+
+size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
+void perf_top__reset_sample_counters(struct perf_top *top);
+float perf_top__decay_samples(struct perf_top *top, struct rb_root *root);
+void perf_top__find_widths(struct perf_top *top, struct rb_root *root,
+                          int *dso_width, int *dso_short_width, int *sym_width);
+
+#ifdef NO_NEWT_SUPPORT
+static inline int perf_top__tui_browser(struct perf_top *top __used)
+{
+       return 0;
+}
+#else
+int perf_top__tui_browser(struct perf_top *top);
+#endif
+#endif /* __PERF_TOP_H */
index 73a02223c62922125b0dddb97e8a26099d8fdcd8..d8e622dd738aaafe4ce2a586f426f95e94da9e48 100644 (file)
@@ -153,7 +153,7 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused)
        char *next = NULL;
        char *addr_str;
        char ch;
-       int ret;
+       int ret __used;
        int i;
 
        line = strtok_r(file, "\n", &next);
index 8bc010edca259f48014218a5fcc810d7fe8863b1..611219f80680e7a9fa00f1404f66f3c92b545686 100644 (file)
@@ -1,4 +1,5 @@
 #include "libslang.h"
+#include "ui.h"
 #include <linux/compiler.h>
 #include <linux/list.h>
 #include <linux/rbtree.h>
@@ -156,6 +157,20 @@ void ui_browser__add_exit_keys(struct ui_browser *self, int keys[])
        }
 }
 
+void __ui_browser__show_title(struct ui_browser *browser, const char *title)
+{
+       SLsmg_gotorc(0, 0);
+       ui_browser__set_color(browser, NEWT_COLORSET_ROOT);
+       slsmg_write_nstring(title, browser->width);
+}
+
+void ui_browser__show_title(struct ui_browser *browser, const char *title)
+{
+       pthread_mutex_lock(&ui__lock);
+       __ui_browser__show_title(browser, title);
+       pthread_mutex_unlock(&ui__lock);
+}
+
 int ui_browser__show(struct ui_browser *self, const char *title,
                     const char *helpline, ...)
 {
@@ -178,9 +193,8 @@ int ui_browser__show(struct ui_browser *self, const char *title,
        if (self->sb == NULL)
                return -1;
 
-       SLsmg_gotorc(0, 0);
-       ui_browser__set_color(self, NEWT_COLORSET_ROOT);
-       slsmg_write_nstring(title, self->width);
+       pthread_mutex_lock(&ui__lock);
+       __ui_browser__show_title(self, title);
 
        ui_browser__add_exit_keys(self, keys);
        newtFormAddComponent(self->form, self->sb);
@@ -188,25 +202,30 @@ int ui_browser__show(struct ui_browser *self, const char *title,
        va_start(ap, helpline);
        ui_helpline__vpush(helpline, ap);
        va_end(ap);
+       pthread_mutex_unlock(&ui__lock);
        return 0;
 }
 
 void ui_browser__hide(struct ui_browser *self)
 {
+       pthread_mutex_lock(&ui__lock);
        newtFormDestroy(self->form);
        self->form = NULL;
        ui_helpline__pop();
+       pthread_mutex_unlock(&ui__lock);
 }
 
 int ui_browser__refresh(struct ui_browser *self)
 {
        int row;
 
+       pthread_mutex_lock(&ui__lock);
        newtScrollbarSet(self->sb, self->index, self->nr_entries - 1);
        row = self->refresh(self);
        ui_browser__set_color(self, HE_COLORSET_NORMAL);
        SLsmg_fill_region(self->y + row, self->x,
                          self->height - row, self->width, ' ');
+       pthread_mutex_unlock(&ui__lock);
 
        return 0;
 }
index 0dc7e4da36f52c42ef3574dc89dce7102ae8438d..fc63dda109109d267d034314685fc8c371bf70a4 100644 (file)
@@ -24,7 +24,6 @@ struct ui_browser {
        u32           nr_entries;
 };
 
-
 void ui_browser__set_color(struct ui_browser *self, int color);
 void ui_browser__set_percent_color(struct ui_browser *self,
                                   double percent, bool current);
@@ -35,6 +34,8 @@ void ui_browser__reset_index(struct ui_browser *self);
 void ui_browser__gotorc(struct ui_browser *self, int y, int x);
 void ui_browser__add_exit_key(struct ui_browser *self, int key);
 void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]);
+void __ui_browser__show_title(struct ui_browser *browser, const char *title);
+void ui_browser__show_title(struct ui_browser *browser, const char *title);
 int ui_browser__show(struct ui_browser *self, const char *title,
                     const char *helpline, ...);
 void ui_browser__hide(struct ui_browser *self);
index 82b78f99251bb2b764165cf8066a85f1e6e4b97d..8c17a8730e4a36a9923537f79466c3793fd06559 100644 (file)
@@ -1,9 +1,12 @@
 #include "../browser.h"
 #include "../helpline.h"
 #include "../libslang.h"
+#include "../../annotate.h"
 #include "../../hist.h"
 #include "../../sort.h"
 #include "../../symbol.h"
+#include "../../annotate.h"
+#include <pthread.h>
 
 static void ui__error_window(const char *fmt, ...)
 {
@@ -42,8 +45,6 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
                struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
                ui_browser__set_percent_color(self, olrb->percent, current_entry);
                slsmg_printf(" %7.2f ", olrb->percent);
-               if (!current_entry)
-                       ui_browser__set_color(self, HE_COLORSET_CODE);
        } else {
                ui_browser__set_percent_color(self, 0, current_entry);
                slsmg_write_nstring(" ", 9);
@@ -55,35 +56,40 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
                slsmg_write_nstring(" ", width - 18);
        else
                slsmg_write_nstring(ol->line, width - 18);
+
+       if (!current_entry)
+               ui_browser__set_color(self, HE_COLORSET_CODE);
 }
 
 static double objdump_line__calc_percent(struct objdump_line *self,
-                                        struct list_head *head,
-                                        struct symbol *sym)
+                                        struct symbol *sym, int evidx)
 {
        double percent = 0.0;
 
        if (self->offset != -1) {
                int len = sym->end - sym->start;
                unsigned int hits = 0;
-               struct sym_priv *priv = symbol__priv(sym);
-               struct sym_ext *sym_ext = priv->ext;
-               struct sym_hist *h = priv->hist;
+               struct annotation *notes = symbol__annotation(sym);
+               struct source_line *src_line = notes->src->lines;
+               struct sym_hist *h = annotation__histogram(notes, evidx);
                s64 offset = self->offset;
-               struct objdump_line *next = objdump__get_next_ip_line(head, self);
-
+               struct objdump_line *next;
 
+               next = objdump__get_next_ip_line(&notes->src->source, self);
                while (offset < (s64)len &&
                       (next == NULL || offset < next->offset)) {
-                       if (sym_ext) {
-                               percent += sym_ext[offset].percent;
+                       if (src_line) {
+                               percent += src_line[offset].percent;
                        } else
-                               hits += h->ip[offset];
+                               hits += h->addr[offset];
 
                        ++offset;
                }
-
-               if (sym_ext == NULL && h->sum)
+               /*
+                * If the percentage wasn't already calculated in
+                * symbol__get_source_line, do it now:
+                */
+               if (src_line == NULL && h->sum)
                        percent = 100.0 * hits / h->sum;
        }
 
@@ -133,103 +139,161 @@ static void annotate_browser__set_top(struct annotate_browser *self,
        self->curr_hot = nd;
 }
 
-static int annotate_browser__run(struct annotate_browser *self)
+static void annotate_browser__calc_percent(struct annotate_browser *browser,
+                                          int evidx)
 {
-       struct rb_node *nd;
-       struct hist_entry *he = self->b.priv;
-       int key;
+       struct symbol *sym = browser->b.priv;
+       struct annotation *notes = symbol__annotation(sym);
+       struct objdump_line *pos;
 
-       if (ui_browser__show(&self->b, he->ms.sym->name,
-                            "<-, -> or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
-               return -1;
+       browser->entries = RB_ROOT;
+
+       pthread_mutex_lock(&notes->lock);
+
+       list_for_each_entry(pos, &notes->src->source, node) {
+               struct objdump_line_rb_node *rbpos = objdump_line__rb(pos);
+               rbpos->percent = objdump_line__calc_percent(pos, sym, evidx);
+               if (rbpos->percent < 0.01) {
+                       RB_CLEAR_NODE(&rbpos->rb_node);
+                       continue;
+               }
+               objdump__insert_line(&browser->entries, rbpos);
+       }
+       pthread_mutex_unlock(&notes->lock);
+
+       browser->curr_hot = rb_last(&browser->entries);
+}
+
+static int annotate_browser__run(struct annotate_browser *self, int evidx,
+                                int refresh)
+{
+       struct rb_node *nd = NULL;
+       struct symbol *sym = self->b.priv;
        /*
-        * To allow builtin-annotate to cycle thru multiple symbols by
+        * RIGHT To allow builtin-annotate to cycle thru multiple symbols by
         * examining the exit key for this function.
         */
-       ui_browser__add_exit_key(&self->b, NEWT_KEY_RIGHT);
+       int exit_keys[] = { 'H', NEWT_KEY_TAB, NEWT_KEY_UNTAB,
+                           NEWT_KEY_RIGHT, 0 };
+       int key;
+
+       if (ui_browser__show(&self->b, sym->name,
+                            "<-, -> or ESC: exit, TAB/shift+TAB: "
+                            "cycle hottest lines, H: Hottest") < 0)
+               return -1;
+
+       ui_browser__add_exit_keys(&self->b, exit_keys);
+       annotate_browser__calc_percent(self, evidx);
+
+       if (self->curr_hot)
+               annotate_browser__set_top(self, self->curr_hot);
 
        nd = self->curr_hot;
-       if (nd) {
-               int tabs[] = { NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0 };
-               ui_browser__add_exit_keys(&self->b, tabs);
-       }
+
+       if (refresh != 0)
+               newtFormSetTimer(self->b.form, refresh);
 
        while (1) {
                key = ui_browser__run(&self->b);
 
+               if (refresh != 0) {
+                       annotate_browser__calc_percent(self, evidx);
+                       /*
+                        * Current line focus got out of the list of most active
+                        * lines, NULL it so that if TAB|UNTAB is pressed, we
+                        * move to curr_hot (current hottest line).
+                        */
+                       if (nd != NULL && RB_EMPTY_NODE(nd))
+                               nd = NULL;
+               }
+
                switch (key) {
+               case -1:
+                       /*
+                        * FIXME we need to check if it was
+                        * es.reason == NEWT_EXIT_TIMER
+                        */
+                       if (refresh != 0)
+                               symbol__annotate_decay_histogram(sym, evidx);
+                       continue;
                case NEWT_KEY_TAB:
-                       nd = rb_prev(nd);
-                       if (nd == NULL)
-                               nd = rb_last(&self->entries);
-                       annotate_browser__set_top(self, nd);
+                       if (nd != NULL) {
+                               nd = rb_prev(nd);
+                               if (nd == NULL)
+                                       nd = rb_last(&self->entries);
+                       } else
+                               nd = self->curr_hot;
                        break;
                case NEWT_KEY_UNTAB:
-                       nd = rb_next(nd);
-                       if (nd == NULL)
-                               nd = rb_first(&self->entries);
-                       annotate_browser__set_top(self, nd);
+                       if (nd != NULL)
+                               nd = rb_next(nd);
+                               if (nd == NULL)
+                                       nd = rb_first(&self->entries);
+                       else
+                               nd = self->curr_hot;
+                       break;
+               case 'H':
+                       nd = self->curr_hot;
                        break;
                default:
                        goto out;
                }
+
+               if (nd != NULL)
+                       annotate_browser__set_top(self, nd);
        }
 out:
        ui_browser__hide(&self->b);
        return key;
 }
 
-int hist_entry__tui_annotate(struct hist_entry *self)
+int hist_entry__tui_annotate(struct hist_entry *he, int evidx)
+{
+       return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, 0);
+}
+
+int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
+                        int refresh)
 {
        struct objdump_line *pos, *n;
-       struct objdump_line_rb_node *rbpos;
-       LIST_HEAD(head);
+       struct annotation *notes = symbol__annotation(sym);
        struct annotate_browser browser = {
                .b = {
-                       .entries = &head,
+                       .entries = &notes->src->source,
                        .refresh = ui_browser__list_head_refresh,
                        .seek    = ui_browser__list_head_seek,
                        .write   = annotate_browser__write,
-                       .priv    = self,
+                       .priv    = sym,
                },
        };
        int ret;
 
-       if (self->ms.sym == NULL)
+       if (sym == NULL)
                return -1;
 
-       if (self->ms.map->dso->annotate_warned)
+       if (map->dso->annotate_warned)
                return -1;
 
-       if (hist_entry__annotate(self, &head, sizeof(*rbpos)) < 0) {
+       if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) {
                ui__error_window(ui_helpline__last_msg);
                return -1;
        }
 
        ui_helpline__push("Press <- or ESC to exit");
 
-       list_for_each_entry(pos, &head, node) {
+       list_for_each_entry(pos, &notes->src->source, node) {
+               struct objdump_line_rb_node *rbpos;
                size_t line_len = strlen(pos->line);
+
                if (browser.b.width < line_len)
                        browser.b.width = line_len;
                rbpos = objdump_line__rb(pos);
                rbpos->idx = browser.b.nr_entries++;
-               rbpos->percent = objdump_line__calc_percent(pos, &head, self->ms.sym);
-               if (rbpos->percent < 0.01)
-                       continue;
-               objdump__insert_line(&browser.entries, rbpos);
        }
 
-       /*
-        * Position the browser at the hottest line.
-        */
-       browser.curr_hot = rb_last(&browser.entries);
-       if (browser.curr_hot)
-               annotate_browser__set_top(&browser, browser.curr_hot);
-
        browser.b.width += 18; /* Percentage */
-       ret = annotate_browser__run(&browser);
-       list_for_each_entry_safe(pos, n, &head, node) {
+       ret = annotate_browser__run(&browser, evidx, refresh);
+       list_for_each_entry_safe(pos, n, &notes->src->source, node) {
                list_del(&pos->node);
                objdump_line__free(pos);
        }
index 60c463c16028946541560f5f71e37617cfd69c9b..798efdca3eadd9fe89b84157ceba5c56a8d5dcb2 100644 (file)
@@ -7,6 +7,8 @@
 #include <newt.h>
 #include <linux/rbtree.h>
 
+#include "../../evsel.h"
+#include "../../evlist.h"
 #include "../../hist.h"
 #include "../../pstack.h"
 #include "../../sort.h"
@@ -292,7 +294,8 @@ static int hist_browser__run(struct hist_browser *self, const char *title)
 {
        int key;
        int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't',
-                           NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, 0, };
+                           NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT,
+                           NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0, };
 
        self->b.entries = &self->hists->entries;
        self->b.nr_entries = self->hists->nr_entries;
@@ -377,7 +380,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
        while (node) {
                struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
                struct rb_node *next = rb_next(node);
-               u64 cumul = cumul_hits(child);
+               u64 cumul = callchain_cumul_hits(child);
                struct callchain_list *chain;
                char folded_sign = ' ';
                int first = true;
@@ -638,6 +641,9 @@ static void ui_browser__hists_seek(struct ui_browser *self,
        struct rb_node *nd;
        bool first = true;
 
+       if (self->nr_entries == 0)
+               return;
+
        switch (whence) {
        case SEEK_SET:
                nd = hists__filter_entries(rb_first(self->entries));
@@ -797,8 +803,11 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size,
        return printed;
 }
 
-int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
+static int perf_evsel__hists_browse(struct perf_evsel *evsel,
+                                   const char *helpline, const char *ev_name,
+                                   bool left_exits)
 {
+       struct hists *self = &evsel->hists;
        struct hist_browser *browser = hist_browser__new(self);
        struct pstack *fstack;
        const struct thread *thread_filter = NULL;
@@ -818,8 +827,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
        hists__browser_title(self, msg, sizeof(msg), ev_name,
                             dso_filter, thread_filter);
        while (1) {
-               const struct thread *thread;
-               const struct dso *dso;
+               const struct thread *thread = NULL;
+               const struct dso *dso = NULL;
                char *options[16];
                int nr_options = 0, choice = 0, i,
                    annotate = -2, zoom_dso = -2, zoom_thread = -2,
@@ -827,8 +836,10 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
 
                key = hist_browser__run(browser, msg);
 
-               thread = hist_browser__selected_thread(browser);
-               dso = browser->selection->map ? browser->selection->map->dso : NULL;
+               if (browser->he_selection != NULL) {
+                       thread = hist_browser__selected_thread(browser);
+                       dso = browser->selection->map ? browser->selection->map->dso : NULL;
+               }
 
                switch (key) {
                case NEWT_KEY_TAB:
@@ -839,7 +850,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
                         */
                        goto out_free_stack;
                case 'a':
-                       if (browser->selection->map == NULL &&
+                       if (browser->selection == NULL ||
+                           browser->selection->map == NULL ||
                            browser->selection->map->dso->annotate_warned)
                                continue;
                        goto do_annotate;
@@ -858,6 +870,7 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
                                        "E         Expand all callchains\n"
                                        "d         Zoom into current DSO\n"
                                        "t         Zoom into current Thread\n"
+                                       "TAB/UNTAB Switch events\n"
                                        "q/CTRL+C  Exit browser");
                        continue;
                case NEWT_KEY_ENTER:
@@ -867,8 +880,14 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
                case NEWT_KEY_LEFT: {
                        const void *top;
 
-                       if (pstack__empty(fstack))
+                       if (pstack__empty(fstack)) {
+                               /*
+                                * Go back to the perf_evsel_menu__run or other user
+                                */
+                               if (left_exits)
+                                       goto out_free_stack;
                                continue;
+                       }
                        top = pstack__pop(fstack);
                        if (top == &dso_filter)
                                goto zoom_out_dso;
@@ -877,14 +896,16 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
                        continue;
                }
                case NEWT_KEY_ESCAPE:
-                       if (!ui__dialog_yesno("Do you really want to exit?"))
+                       if (!left_exits &&
+                           !ui__dialog_yesno("Do you really want to exit?"))
                                continue;
                        /* Fall thru */
                default:
                        goto out_free_stack;
                }
 
-               if (browser->selection->sym != NULL &&
+               if (browser->selection != NULL &&
+                   browser->selection->sym != NULL &&
                    !browser->selection->map->dso->annotate_warned &&
                    asprintf(&options[nr_options], "Annotate %s",
                             browser->selection->sym->name) > 0)
@@ -903,7 +924,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
                             (dso->kernel ? "the Kernel" : dso->short_name)) > 0)
                        zoom_dso = nr_options++;
 
-               if (browser->selection->map != NULL &&
+               if (browser->selection != NULL &&
+                   browser->selection->map != NULL &&
                    asprintf(&options[nr_options], "Browse map details") > 0)
                        browse_map = nr_options++;
 
@@ -923,19 +945,11 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
                if (choice == annotate) {
                        struct hist_entry *he;
 do_annotate:
-                       if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) {
-                               browser->selection->map->dso->annotate_warned = 1;
-                               ui_helpline__puts("No vmlinux file found, can't "
-                                                "annotate with just a "
-                                                "kallsyms file");
-                               continue;
-                       }
-
                        he = hist_browser__selected_entry(browser);
                        if (he == NULL)
                                continue;
 
-                       hist_entry__tui_annotate(he);
+                       hist_entry__tui_annotate(he, evsel->idx);
                } else if (choice == browse_map)
                        map__browse(browser->selection->map);
                else if (choice == zoom_dso) {
@@ -984,30 +998,141 @@ out:
        return key;
 }
 
-int hists__tui_browse_tree(struct rb_root *self, const char *help)
+struct perf_evsel_menu {
+       struct ui_browser b;
+       struct perf_evsel *selection;
+};
+
+static void perf_evsel_menu__write(struct ui_browser *browser,
+                                  void *entry, int row)
+{
+       struct perf_evsel_menu *menu = container_of(browser,
+                                                   struct perf_evsel_menu, b);
+       struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
+       bool current_entry = ui_browser__is_current_entry(browser, row);
+       unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE];
+       const char *ev_name = event_name(evsel);
+       char bf[256], unit;
+
+       ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
+                                                      HE_COLORSET_NORMAL);
+
+       nr_events = convert_unit(nr_events, &unit);
+       snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
+                unit, unit == ' ' ? "" : " ", ev_name);
+       slsmg_write_nstring(bf, browser->width);
+
+       if (current_entry)
+               menu->selection = evsel;
+}
+
+static int perf_evsel_menu__run(struct perf_evsel_menu *menu, const char *help)
 {
-       struct rb_node *first = rb_first(self), *nd = first, *next;
-       int key = 0;
+       int exit_keys[] = { NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, };
+       struct perf_evlist *evlist = menu->b.priv;
+       struct perf_evsel *pos;
+       const char *ev_name, *title = "Available samples";
+       int key;
+
+       if (ui_browser__show(&menu->b, title,
+                            "ESC: exit, ENTER|->: Browse histograms") < 0)
+               return -1;
+
+       ui_browser__add_exit_keys(&menu->b, exit_keys);
 
-       while (nd) {
-               struct hists *hists = rb_entry(nd, struct hists, rb_node);
-               const char *ev_name = __event_name(hists->type, hists->config);
+       while (1) {
+               key = ui_browser__run(&menu->b);
 
-               key = hists__browse(hists, help, ev_name);
                switch (key) {
-               case NEWT_KEY_TAB:
-                       next = rb_next(nd);
-                       if (next)
-                               nd = next;
+               case NEWT_KEY_RIGHT:
+               case NEWT_KEY_ENTER:
+                       if (!menu->selection)
+                               continue;
+                       pos = menu->selection;
+browse_hists:
+                       ev_name = event_name(pos);
+                       key = perf_evsel__hists_browse(pos, help, ev_name, true);
+                       ui_browser__show_title(&menu->b, title);
                        break;
-               case NEWT_KEY_UNTAB:
-                       if (nd == first)
+               case NEWT_KEY_LEFT:
+                       continue;
+               case NEWT_KEY_ESCAPE:
+                       if (!ui__dialog_yesno("Do you really want to exit?"))
                                continue;
-                       nd = rb_prev(nd);
+                       /* Fall thru */
+               default:
+                       goto out;
+               }
+
+               switch (key) {
+               case NEWT_KEY_TAB:
+                       if (pos->node.next == &evlist->entries)
+                               pos = list_entry(evlist->entries.next, struct perf_evsel, node);
+                       else
+                               pos = list_entry(pos->node.next, struct perf_evsel, node);
+                       goto browse_hists;
+               case NEWT_KEY_UNTAB:
+                       if (pos->node.prev == &evlist->entries)
+                               pos = list_entry(evlist->entries.prev, struct perf_evsel, node);
+                       else
+                               pos = list_entry(pos->node.prev, struct perf_evsel, node);
+                       goto browse_hists;
+               case 'q':
+               case CTRL('c'):
+                       goto out;
                default:
-                       return key;
+                       break;
                }
        }
 
+out:
+       ui_browser__hide(&menu->b);
        return key;
 }
+
+static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
+                                          const char *help)
+{
+       struct perf_evsel *pos;
+       struct perf_evsel_menu menu = {
+               .b = {
+                       .entries    = &evlist->entries,
+                       .refresh    = ui_browser__list_head_refresh,
+                       .seek       = ui_browser__list_head_seek,
+                       .write      = perf_evsel_menu__write,
+                       .nr_entries = evlist->nr_entries,
+                       .priv       = evlist,
+               },
+       };
+
+       ui_helpline__push("Press ESC to exit");
+
+       list_for_each_entry(pos, &evlist->entries, node) {
+               const char *ev_name = event_name(pos);
+               size_t line_len = strlen(ev_name) + 7;
+
+               if (menu.b.width < line_len)
+                       menu.b.width = line_len;
+               /*
+                * Cache the evsel name, tracepoints have a _high_ cost per
+                * event_name() call.
+                */
+               if (pos->name == NULL)
+                       pos->name = strdup(ev_name);
+       }
+
+       return perf_evsel_menu__run(&menu, help);
+}
+
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help)
+{
+
+       if (evlist->nr_entries == 1) {
+               struct perf_evsel *first = list_entry(evlist->entries.next,
+                                                     struct perf_evsel, node);
+               const char *ev_name = event_name(first);
+               return perf_evsel__hists_browse(first, help, ev_name, false);
+       }
+
+       return __perf_evlist__tui_browse_hists(evlist, help);
+}
index e5158369106eee37234baf658d18cddec0539bb5..8462bffe20bc84ad31a5a81019990ddcb5ab401e 100644 (file)
@@ -41,7 +41,7 @@ static int ui_entry__read(const char *title, char *bf, size_t size, int width)
 out_free_form:
        newtPopWindow();
        newtFormDestroy(form);
-       return 0;
+       return err;
 }
 
 struct map_browser {
diff --git a/tools/perf/util/ui/browsers/top.c b/tools/perf/util/ui/browsers/top.c
new file mode 100644 (file)
index 0000000..5a06538
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+#include "../browser.h"
+#include "../../annotate.h"
+#include "../helpline.h"
+#include "../libslang.h"
+#include "../util.h"
+#include "../../evlist.h"
+#include "../../hist.h"
+#include "../../sort.h"
+#include "../../symbol.h"
+#include "../../top.h"
+
+struct perf_top_browser {
+       struct ui_browser b;
+       struct rb_root    root;
+       struct sym_entry  *selection;
+       float             sum_ksamples;
+       int               dso_width;
+       int               dso_short_width;
+       int               sym_width;
+};
+
+static void perf_top_browser__write(struct ui_browser *browser, void *entry, int row)
+{
+       struct perf_top_browser *top_browser = container_of(browser, struct perf_top_browser, b);
+       struct sym_entry *syme = rb_entry(entry, struct sym_entry, rb_node);
+       bool current_entry = ui_browser__is_current_entry(browser, row);
+       struct symbol *symbol = sym_entry__symbol(syme);
+       struct perf_top *top = browser->priv;
+       int width = browser->width;
+       double pcnt;
+
+       pcnt = 100.0 - (100.0 * ((top_browser->sum_ksamples - syme->snap_count) /
+                                top_browser->sum_ksamples));
+       ui_browser__set_percent_color(browser, pcnt, current_entry);
+
+       if (top->evlist->nr_entries == 1 || !top->display_weighted) {
+               slsmg_printf("%20.2f ", syme->weight);
+               width -= 24;
+       } else {
+               slsmg_printf("%9.1f %10ld ", syme->weight, syme->snap_count);
+               width -= 23;
+       }
+
+       slsmg_printf("%4.1f%%", pcnt);
+       width -= 7;
+
+       if (verbose) {
+               slsmg_printf(" %016" PRIx64, symbol->start);
+               width -= 17;
+       }
+
+       slsmg_printf(" %-*.*s ", top_browser->sym_width, top_browser->sym_width,
+                    symbol->name);
+       width -= top_browser->sym_width;
+       slsmg_write_nstring(width >= syme->map->dso->long_name_len ?
+                               syme->map->dso->long_name :
+                               syme->map->dso->short_name, width);
+
+       if (current_entry)
+               top_browser->selection = syme;
+}
+
+static void perf_top_browser__update_rb_tree(struct perf_top_browser *browser)
+{
+       struct perf_top *top = browser->b.priv;
+       u64 top_idx = browser->b.top_idx;
+
+       browser->root = RB_ROOT;
+       browser->b.top = NULL;
+       browser->sum_ksamples = perf_top__decay_samples(top, &browser->root);
+       /*
+        * No active symbols
+        */
+       if (top->rb_entries == 0)
+               return;
+
+       perf_top__find_widths(top, &browser->root, &browser->dso_width,
+                             &browser->dso_short_width,
+                              &browser->sym_width);
+       if (browser->sym_width + browser->dso_width > browser->b.width - 29) {
+               browser->dso_width = browser->dso_short_width;
+               if (browser->sym_width + browser->dso_width > browser->b.width - 29)
+                       browser->sym_width = browser->b.width - browser->dso_width - 29;
+       }
+
+       /*
+        * Adjust the ui_browser indexes since the entries in the browser->root
+        * rb_tree may have changed, then seek it from start, so that we get a
+        * possible new top of the screen.
+        */
+       browser->b.nr_entries = top->rb_entries;
+
+       if (top_idx >= browser->b.nr_entries) {
+               if (browser->b.height >= browser->b.nr_entries)
+                       top_idx = browser->b.nr_entries - browser->b.height;
+               else
+                       top_idx = 0;
+       }
+
+       if (browser->b.index >= top_idx + browser->b.height)
+               browser->b.index = top_idx + browser->b.index - browser->b.top_idx;
+
+       if (browser->b.index >= browser->b.nr_entries)
+               browser->b.index = browser->b.nr_entries - 1;
+
+       browser->b.top_idx = top_idx;
+       browser->b.seek(&browser->b, top_idx, SEEK_SET);
+}
+
+static void perf_top_browser__annotate(struct perf_top_browser *browser)
+{
+       struct sym_entry *syme = browser->selection;
+       struct symbol *sym = sym_entry__symbol(syme);
+       struct annotation *notes = symbol__annotation(sym);
+       struct perf_top *top = browser->b.priv;
+
+       if (notes->src != NULL)
+               goto do_annotation;
+
+       pthread_mutex_lock(&notes->lock);
+
+       top->sym_filter_entry = NULL;
+
+       if (symbol__alloc_hist(sym, top->evlist->nr_entries) < 0) {
+               pr_err("Not enough memory for annotating '%s' symbol!\n",
+                      sym->name);
+               pthread_mutex_unlock(&notes->lock);
+               return;
+       }
+
+       top->sym_filter_entry = syme;
+
+       pthread_mutex_unlock(&notes->lock);
+do_annotation:
+       symbol__tui_annotate(sym, syme->map, 0, top->delay_secs * 1000);
+}
+
+static int perf_top_browser__run(struct perf_top_browser *browser)
+{
+       int key;
+       char title[160];
+       struct perf_top *top = browser->b.priv;
+       int delay_msecs = top->delay_secs * 1000;
+       int exit_keys[] = { 'a', NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, };
+
+       perf_top_browser__update_rb_tree(browser);
+        perf_top__header_snprintf(top, title, sizeof(title));
+        perf_top__reset_sample_counters(top);
+
+       if (ui_browser__show(&browser->b, title,
+                            "ESC: exit, ENTER|->|a: Live Annotate") < 0)
+               return -1;
+
+       newtFormSetTimer(browser->b.form, delay_msecs);
+       ui_browser__add_exit_keys(&browser->b, exit_keys);
+
+       while (1) {
+               key = ui_browser__run(&browser->b);
+
+               switch (key) {
+               case -1:
+                       /* FIXME we need to check if it was es.reason == NEWT_EXIT_TIMER */
+                       perf_top_browser__update_rb_tree(browser);
+                       perf_top__header_snprintf(top, title, sizeof(title));
+                       perf_top__reset_sample_counters(top);
+                       ui_browser__set_color(&browser->b, NEWT_COLORSET_ROOT);
+                       SLsmg_gotorc(0, 0);
+                       slsmg_write_nstring(title, browser->b.width);
+                       break;
+               case 'a':
+               case NEWT_KEY_RIGHT:
+               case NEWT_KEY_ENTER:
+                       if (browser->selection)
+                               perf_top_browser__annotate(browser);
+                       break;
+               case NEWT_KEY_LEFT:
+                       continue;
+               case NEWT_KEY_ESCAPE:
+                       if (!ui__dialog_yesno("Do you really want to exit?"))
+                               continue;
+                       /* Fall thru */
+               default:
+                       goto out;
+               }
+       }
+out:
+       ui_browser__hide(&browser->b);
+       return key;
+}
+
+int perf_top__tui_browser(struct perf_top *top)
+{
+       struct perf_top_browser browser = {
+               .b = {
+                       .entries = &browser.root,
+                       .refresh = ui_browser__rb_tree_refresh,
+                       .seek    = ui_browser__rb_tree_seek,
+                       .write   = perf_top_browser__write,
+                       .priv    = top,
+               },
+       };
+
+       ui_helpline__push("Press <- or ESC to exit");
+       return perf_top_browser__run(&browser);
+}
index 8d79daa4458ab4fd86e62be18e48d620af1fd9b8..f36d2ff509ed285baa5290d6780b3e9d8ca4ae9e 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "../debug.h"
 #include "helpline.h"
+#include "ui.h"
 
 void ui_helpline__pop(void)
 {
@@ -55,7 +56,8 @@ int ui_helpline__show_help(const char *format, va_list ap)
        int ret;
        static int backlog;
 
-        ret = vsnprintf(ui_helpline__last_msg + backlog,
+       pthread_mutex_lock(&ui__lock);
+       ret = vsnprintf(ui_helpline__last_msg + backlog,
                        sizeof(ui_helpline__last_msg) - backlog, format, ap);
        backlog += ret;
 
@@ -64,6 +66,7 @@ int ui_helpline__show_help(const char *format, va_list ap)
                newtRefresh();
                backlog = 0;
        }
+       pthread_mutex_unlock(&ui__lock);
 
        return ret;
 }
index 5623da8e80805e5874eee54cfcb0fdb31a9ac25c..2b63e1c9b181f679e75293bcb4e47111a6d3c76a 100644 (file)
 
 #if SLANG_VERSION < 20104
 #define slsmg_printf(msg, args...) \
-       SLsmg_printf((char *)msg, ##args)
+       SLsmg_printf((char *)(msg), ##args)
 #define slsmg_write_nstring(msg, len) \
-       SLsmg_write_nstring((char *)msg, len)
+       SLsmg_write_nstring((char *)(msg), len)
 #define sltt_set_color(obj, name, fg, bg) \
-       SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
+       SLtt_set_color(obj,(char *)(name), (char *)(fg), (char *)(bg))
 #else
 #define slsmg_printf SLsmg_printf
 #define slsmg_write_nstring SLsmg_write_nstring
index 662085032eb7f4d577953961b7537a6e522dd118..ee46d671db5990a9c453447e065bf95864943385 100644 (file)
@@ -6,6 +6,9 @@
 #include "../debug.h"
 #include "browser.h"
 #include "helpline.h"
+#include "ui.h"
+
+pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
 
 static void newt_suspend(void *d __used)
 {
@@ -14,11 +17,12 @@ static void newt_suspend(void *d __used)
        newtResume();
 }
 
-void setup_browser(void)
+void setup_browser(bool fallback_to_pager)
 {
        if (!isatty(1) || !use_browser || dump_trace) {
                use_browser = 0;
-               setup_pager();
+               if (fallback_to_pager)
+                       setup_pager();
                return;
        }
 
diff --git a/tools/perf/util/ui/ui.h b/tools/perf/util/ui/ui.h
new file mode 100644 (file)
index 0000000..d264e05
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _PERF_UI_H_
+#define _PERF_UI_H_ 1
+
+#include <pthread.h>
+
+extern pthread_mutex_t ui__lock;
+
+#endif /* _PERF_UI_H_ */
index 7b5a8926624e49be67d802fc91afc56451bead23..fdf1fc8f08bc83b2300311870f9b68ee559ef7c6 100644 (file)
@@ -9,6 +9,7 @@
 #include "../debug.h"
 #include "browser.h"
 #include "helpline.h"
+#include "ui.h"
 #include "util.h"
 
 static void newt_form__set_exit_keys(newtComponent self)
@@ -118,10 +119,12 @@ void ui__warning(const char *format, ...)
        va_list args;
 
        va_start(args, format);
-       if (use_browser > 0)
+       if (use_browser > 0) {
+               pthread_mutex_lock(&ui__lock);
                newtWinMessagev((char *)warning_str, (char *)ok,
                                (char *)format, args);
-       else
+               pthread_mutex_unlock(&ui__lock);
+       } else
                vfprintf(stderr, format, args);
        va_end(args);
 }
index e833f26f3bfc7d702cd58ba55c7ae85657fe3801..fc784284ac8be9070c1ab3681a216512150a3688 100644 (file)
@@ -70,9 +70,7 @@
 #include <sys/poll.h>
 #include <sys/socket.h>
 #include <sys/ioctl.h>
-#ifndef NO_SYS_SELECT_H
 #include <sys/select.h>
-#endif
 #include <netinet/in.h>
 #include <netinet/tcp.h>
 #include <arpa/inet.h>
 #include "types.h"
 #include <sys/ttydefaults.h>
 
-#ifndef NO_ICONV
-#include <iconv.h>
-#endif
-
 extern const char *graph_line;
 extern const char *graph_dotted_line;
 extern char buildid_dir[];
@@ -236,26 +230,6 @@ static inline int sane_case(int x, int high)
        return x;
 }
 
-#ifndef DIR_HAS_BSD_GROUP_SEMANTICS
-# define FORCE_DIR_SET_GID S_ISGID
-#else
-# define FORCE_DIR_SET_GID 0
-#endif
-
-#ifdef NO_NSEC
-#undef USE_NSEC
-#define ST_CTIME_NSEC(st) 0
-#define ST_MTIME_NSEC(st) 0
-#else
-#ifdef USE_ST_TIMESPEC
-#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec))
-#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec))
-#else
-#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec))
-#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec))
-#endif
-#endif
-
 int mkdir_p(char *path, mode_t mode);
 int copyfile(const char *from, const char *to);
 
index e1c62eeb88f5f845640b49ddb2fb00313039928e..ba7c63af6f3b2bc8a53a91de50ff9ad669468fb8 100755 (executable)
@@ -1,6 +1,6 @@
 #!/usr/bin/perl -w
 #
-# Copywrite 2010 - Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+# Copyright 2010 - Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
 # Licensed under the terms of the GNU GPL License version 2
 #