Merge branch develop-3.10 into develop-3.10-next
authorHuang, Tao <huangtao@rock-chips.com>
Fri, 12 Dec 2014 06:12:56 +0000 (14:12 +0800)
committerHuang, Tao <huangtao@rock-chips.com>
Fri, 12 Dec 2014 06:12:56 +0000 (14:12 +0800)
1225 files changed:
Documentation/arm64/memory.txt
Documentation/devicetree/bindings/arm/gic.txt
Documentation/devicetree/bindings/mailbox/mailbox.txt
Documentation/lzo.txt [new file with mode: 0644]
Documentation/mailbox.txt [new file with mode: 0644]
Documentation/networking/ip-sysctl.txt
Documentation/sound/alsa/ALSA-Configuration.txt
Documentation/stable_kernel_rules.txt
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/devices/arm-vgic.txt [new file with mode: 0644]
Documentation/virtual/kvm/devices/vfio.txt [new file with mode: 0644]
Documentation/virtual/kvm/locking.txt
Documentation/x86/x86_64/mm.txt
MAINTAINERS
Makefile
android/configs/android-base.cfg
arch/Kconfig
arch/alpha/mm/fault.c
arch/arc/boot/dts/nsimosci.dts
arch/arc/include/asm/kgdb.h
arch/arc/include/uapi/asm/ptrace.h
arch/arc/kernel/ptrace.c
arch/arc/mm/fault.c
arch/arm/Kconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/include/asm/assembler.h
arch/arm/include/asm/barrier.h
arch/arm/include/asm/cputype.h
arch/arm/include/asm/hugetlb-3level.h [new file with mode: 0644]
arch/arm/include/asm/hugetlb.h [new file with mode: 0644]
arch/arm/include/asm/kvm_arch_timer.h [deleted file]
arch/arm/include/asm/kvm_arm.h
arch/arm/include/asm/kvm_asm.h
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/asm/kvm_psci.h
arch/arm/include/asm/kvm_vgic.h [deleted file]
arch/arm/include/asm/memory.h
arch/arm/include/asm/pgtable-3level-hwdef.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/smp_scu.h
arch/arm/include/asm/syscall.h
arch/arm/include/asm/tlb.h
arch/arm/include/asm/tlbflush.h
arch/arm/include/asm/unistd.h
arch/arm/include/uapi/asm/kvm.h
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/calls.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/irq.c
arch/arm/kernel/kprobes-common.c
arch/arm/kernel/kprobes-thumb.c
arch/arm/kernel/kprobes.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/perf_event_cpu.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/relocate_kernel.S
arch/arm/kernel/smp.c
arch/arm/kvm/Kconfig
arch/arm/kvm/Makefile
arch/arm/kvm/arch_timer.c [deleted file]
arch/arm/kvm/arm.c
arch/arm/kvm/coproc.c
arch/arm/kvm/coproc.h
arch/arm/kvm/coproc_a15.c
arch/arm/kvm/coproc_a7.c [new file with mode: 0644]
arch/arm/kvm/emulate.c
arch/arm/kvm/guest.c
arch/arm/kvm/handle_exit.c
arch/arm/kvm/init.S
arch/arm/kvm/interrupts.S
arch/arm/kvm/interrupts_head.S
arch/arm/kvm/mmio.c
arch/arm/kvm/mmu.c
arch/arm/kvm/psci.c
arch/arm/kvm/reset.c
arch/arm/kvm/trace.h
arch/arm/kvm/vgic.c [deleted file]
arch/arm/lib/copy_template.S
arch/arm/lib/csumpartialcopygeneric.S
arch/arm/lib/io-readsl.S
arch/arm/lib/io-writesl.S
arch/arm/lib/memmove.S
arch/arm/lib/uaccess.S
arch/arm/mach-at91/clock.c
arch/arm/mach-omap2/control.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-rockchip/Kconfig
arch/arm/mach-rockchip/Kconfig.common [new file with mode: 0644]
arch/arm/mach-rockchip/common.c
arch/arm/mach-rockchip/efuse.c
arch/arm/mach-rockchip/last_log.c
arch/arm/mach-rockchip/rknandbase.c
arch/arm/mach-rockchip/rockchip_pm.c
arch/arm/mach-rockchip/sram.h
arch/arm/mm/Kconfig
arch/arm/mm/Makefile
arch/arm/mm/abort-ev6.S
arch/arm/mm/abort-ev7.S
arch/arm/mm/alignment.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/fault.c
arch/arm/mm/flush.c
arch/arm/mm/fsr-3level.c
arch/arm/mm/hugetlbpage.c [new file with mode: 0644]
arch/arm/mm/idmap.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/rk3368-clocks.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/rk3368-fpga.dts [new file with mode: 0644]
arch/arm64/boot/dts/rk3368.dtsi [new file with mode: 0644]
arch/arm64/configs/rockchip_defconfig [new file with mode: 0644]
arch/arm64/crypto/Makefile
arch/arm64/crypto/aes-glue.c
arch/arm64/crypto/ghash-ce-core.S
arch/arm64/crypto/ghash-ce-glue.c
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/cmpxchg.h
arch/arm64/include/asm/compat.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/debug-monitors.h
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/fpsimdmacros.h
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/hw_breakpoint.h
arch/arm64/include/asm/kvm_arm.h [new file with mode: 0644]
arch/arm64/include/asm/kvm_asm.h [new file with mode: 0644]
arch/arm64/include/asm/kvm_coproc.h [new file with mode: 0644]
arch/arm64/include/asm/kvm_emulate.h [new file with mode: 0644]
arch/arm64/include/asm/kvm_host.h [new file with mode: 0644]
arch/arm64/include/asm/kvm_mmio.h [new file with mode: 0644]
arch/arm64/include/asm/kvm_mmu.h [new file with mode: 0644]
arch/arm64/include/asm/kvm_psci.h [new file with mode: 0644]
arch/arm64/include/asm/neon.h [new file with mode: 0644]
arch/arm64/include/asm/opcodes.h [new file with mode: 0644]
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/seccomp.h [new file with mode: 0644]
arch/arm64/include/asm/syscall.h
arch/arm64/include/asm/sysreg.h [new file with mode: 0644]
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/traps.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/include/asm/virt.h
arch/arm64/include/uapi/asm/kvm.h [new file with mode: 0644]
arch/arm64/include/uapi/asm/ptrace.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/entry-fpsimd.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/head.S
arch/arm64/kernel/hyp-stub.S
arch/arm64/kernel/kuser32.S
arch/arm64/kernel/opcodes.c [new file with mode: 0644]
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/signal32.c
arch/arm64/kernel/swp_emulate.c [new file with mode: 0644]
arch/arm64/kernel/sys_compat.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/Kconfig [new file with mode: 0644]
arch/arm64/kvm/Makefile [new file with mode: 0644]
arch/arm64/kvm/emulate.c [new file with mode: 0644]
arch/arm64/kvm/guest.c [new file with mode: 0644]
arch/arm64/kvm/handle_exit.c [new file with mode: 0644]
arch/arm64/kvm/hyp-init.S [new file with mode: 0644]
arch/arm64/kvm/hyp.S [new file with mode: 0644]
arch/arm64/kvm/inject_fault.c [new file with mode: 0644]
arch/arm64/kvm/regmap.c [new file with mode: 0644]
arch/arm64/kvm/reset.c [new file with mode: 0644]
arch/arm64/kvm/sys_regs.c [new file with mode: 0644]
arch/arm64/kvm/sys_regs.h [new file with mode: 0644]
arch/arm64/kvm/sys_regs_generic_v8.c [new file with mode: 0644]
arch/arm64/kvm/vgic-v2-switch.S [new file with mode: 0644]
arch/arm64/kvm/vgic-v3-switch.S [new file with mode: 0644]
arch/arm64/lib/bitops.S
arch/arm64/lib/clear_user.S
arch/arm64/mach-rockchip/Kconfig [new file with mode: 0644]
arch/arm64/mach-rockchip/Makefile [new file with mode: 0644]
arch/arm64/mm/fault.c
arch/arm64/mm/init.c
arch/arm64/mm/proc.S
arch/avr32/mm/fault.c
arch/cris/mm/fault.c
arch/frv/mm/fault.c
arch/hexagon/mm/vm_fault.c
arch/ia64/include/asm/barrier.h
arch/ia64/include/asm/kvm_host.h
arch/ia64/kvm/Kconfig
arch/ia64/kvm/Makefile
arch/ia64/kvm/kvm-ia64.c
arch/ia64/mm/fault.c
arch/m32r/mm/fault.c
arch/m68k/mm/fault.c
arch/m68k/mm/hwtest.c
arch/metag/include/asm/barrier.h
arch/metag/mm/fault.c
arch/microblaze/mm/fault.c
arch/mips/boot/compressed/decompress.c
arch/mips/cavium-octeon/setup.c
arch/mips/include/asm/barrier.h
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/reg.h
arch/mips/include/asm/thread_info.h
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/irq-gic.c
arch/mips/kernel/mcount.S
arch/mips/kernel/ptrace.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/unaligned.c
arch/mips/kvm/kvm_mips.c
arch/mips/mm/c-r4k.c
arch/mips/mm/fault.c
arch/mips/mm/init.c
arch/mips/mm/tlbex.c
arch/mn10300/mm/fault.c
arch/openrisc/kernel/entry.S
arch/openrisc/kernel/signal.c
arch/openrisc/mm/fault.c
arch/parisc/Makefile
arch/parisc/include/uapi/asm/shmbuf.h
arch/parisc/include/uapi/asm/signal.h
arch/parisc/kernel/syscall_table.S
arch/parisc/mm/fault.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/barrier.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/pte-hash64-64k.h
arch/powerpc/include/asm/ptrace.h
arch/powerpc/kvm/Kconfig
arch/powerpc/kvm/Makefile
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/mpic.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/fault.c
arch/powerpc/mm/numa.c
arch/powerpc/perf/callchain.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/s390/include/asm/barrier.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/syscall.h
arch/s390/kernel/ptrace.c
arch/s390/kvm/Makefile
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/fault.c
arch/s390/mm/init.c
arch/score/mm/fault.c
arch/sh/mm/fault.c
arch/sparc/Kconfig
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/barrier_64.h
arch/sparc/include/asm/cmpxchg_32.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/tlbflush_64.h
arch/sparc/include/asm/vio.h
arch/sparc/kernel/ldc.c
arch/sparc/kernel/pci_schizo.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/sys32.S
arch/sparc/kernel/unaligned_64.c
arch/sparc/lib/NG2memcpy.S
arch/sparc/lib/atomic32.c
arch/sparc/math-emu/math_32.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/mm/init_64.c
arch/sparc/mm/tsb.c
arch/tile/mm/fault.c
arch/um/kernel/trap.c
arch/unicore32/mm/fault.c
arch/x86/Kconfig
arch/x86/boot/header.S
arch/x86/boot/tools/build.c
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/barrier.h
arch/x86/include/asm/elf.h
arch/x86/include/asm/espfix.h [new file with mode: 0644]
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/pgtable_64_types.h
arch/x86/include/asm/setup.h
arch/x86/include/asm/syscall.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/include/uapi/asm/processor-flags.h
arch/x86/kernel/Makefile
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/espfix_64.c [new file with mode: 0644]
arch/x86/kernel/ldt.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/resource.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/kernel/vsyscall_64.c
arch/x86/kernel/xsave.c
arch/x86/kvm/Kconfig
arch/x86/kvm/Makefile
arch/x86/kvm/cpuid.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/emulate.c
arch/x86/kvm/i8254.c
arch/x86/kvm/irq.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/dump_pagetables.c
arch/x86/mm/fault.c
arch/x86/mm/pageattr.c
arch/x86/pci/i386.c
arch/x86/syscalls/syscall_32.tbl
arch/x86/syscalls/syscall_64.tbl
arch/x86/vdso/vdso32-setup.c
arch/xtensa/include/asm/pgtable.h
arch/xtensa/include/asm/uaccess.h
arch/xtensa/include/uapi/asm/ioctls.h
arch/xtensa/include/uapi/asm/unistd.h
arch/xtensa/kernel/entry.S
arch/xtensa/kernel/pci-dma.c
arch/xtensa/mm/fault.c
block/blk-cgroup.c
block/blk-settings.c
block/blk-tag.c
block/cfq-iosched.c
block/compat_ioctl.c
block/genhd.c
block/partition-generic.c
block/partitions/rk.c
block/scsi_ioctl.c
crypto/ablk_helper.c
crypto/af_alg.c
crypto/algif_skcipher.c
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exprep.c
drivers/acpi/acpica/utcopy.c
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/ata/ahci.c
drivers/ata/ata_piix.c
drivers/ata/libata-core.c
drivers/ata/libata-sff.c
drivers/ata/pata_scc.c
drivers/ata/pata_serverworks.c
drivers/base/core.c
drivers/base/cpu.c
drivers/base/firmware_class.c
drivers/base/power/main.c
drivers/base/power/wakeup.c
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap.c
drivers/base/syscore.c
drivers/block/drbd/drbd_interval.c
drivers/block/drbd/drbd_nl.c
drivers/block/rbd.c
drivers/block/sunvdc.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_h5.c
drivers/char/random.c
drivers/char/tpm/tpm.c
drivers/char/tpm/tpm.h
drivers/char/tpm/tpm_tis.c
drivers/clk/rockchip/clk-ops.c
drivers/clk/rockchip/clk-ops.h
drivers/clk/rockchip/clk-pd.c
drivers/clk/rockchip/clk-pll.c
drivers/clk/rockchip/clk-pll.h
drivers/clk/rockchip/clk.c
drivers/clocksource/rockchip_timer.c
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_interactive.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/rockchip-cpufreq.c
drivers/crypto/ux500/cryp/cryp_core.c
drivers/edac/cpc925_edac.c
drivers/edac/e7xxx_edac.c
drivers/edac/i3200_edac.c
drivers/edac/i82860_edac.c
drivers/firewire/core-cdev.c
drivers/firmware/efi/vars.c
drivers/gator/Kconfig
drivers/gator/Makefile
drivers/gator/gator.h
drivers/gator/gator_annotate.c
drivers/gator/gator_annotate_kernel.c
drivers/gator/gator_backtrace.c
drivers/gator/gator_buffer.c
drivers/gator/gator_buffer_write.c
drivers/gator/gator_cookies.c
drivers/gator/gator_events_armv6.c
drivers/gator/gator_events_armv7.c
drivers/gator/gator_events_block.c
drivers/gator/gator_events_ccn-504.c [deleted file]
drivers/gator/gator_events_irq.c
drivers/gator/gator_events_l2c-310.c
drivers/gator/gator_events_mali_4xx.c
drivers/gator/gator_events_mali_common.c
drivers/gator/gator_events_mali_common.h
drivers/gator/gator_events_mali_midgard.c [new file with mode: 0644]
drivers/gator/gator_events_mali_midgard_hw.c [new file with mode: 0644]
drivers/gator/gator_events_mali_midgard_hw_test.c [new file with mode: 0644]
drivers/gator/gator_events_mali_t6xx.c [deleted file]
drivers/gator/gator_events_mali_t6xx_hw.c [deleted file]
drivers/gator/gator_events_mali_t6xx_hw_test.c [deleted file]
drivers/gator/gator_events_meminfo.c
drivers/gator/gator_events_mmapped.c
drivers/gator/gator_events_net.c
drivers/gator/gator_events_perf_pmu.c
drivers/gator/gator_events_sched.c
drivers/gator/gator_events_scorpion.c
drivers/gator/gator_fs.c
drivers/gator/gator_hrtimer_gator.c
drivers/gator/gator_iks.c
drivers/gator/gator_main.c
drivers/gator/gator_marshaling.c
drivers/gator/gator_trace_gpu.c
drivers/gator/gator_trace_gpu.h [deleted file]
drivers/gator/gator_trace_power.c
drivers/gator/gator_trace_sched.c
drivers/gator/mali/mali_kbase_gator_api.h [new file with mode: 0644]
drivers/gator/mali/mali_mjollnir_profiling_gator_api.h
drivers/gator/mali/mali_utgard_profiling_gator_api.h
drivers/gator/mali_midgard.mk [new file with mode: 0644]
drivers/gator/mali_t6xx.mk [deleted file]
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/qxl/qxl_irq.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_panel.c
drivers/gpu/drm/tilcdc/tilcdc_slave.c
drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/headset_observe/rk_headset.c
drivers/headset_observe/rk_headset_irq_hook_adc.c
drivers/hid/hid-cherry.c
drivers/hid/hid-kye.c
drivers/hid/hid-lg.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-logitech-dj.h
drivers/hid/hid-magicmouse.c
drivers/hid/hid-monterey.c
drivers/hid/hid-petalynx.c
drivers/hid/hid-picolcd_core.c
drivers/hid/hid-sunplus.c
drivers/hv/channel.c
drivers/hv/connection.c
drivers/hv/hv_kvp.c
drivers/hv/hv_util.c
drivers/hwmon/ads1015.c
drivers/hwmon/adt7470.c
drivers/hwmon/da9052-hwmon.c
drivers/hwmon/da9055-hwmon.c
drivers/hwmon/dme1737.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/lm78.c
drivers/hwmon/lm85.c
drivers/hwmon/sis5595.c
drivers/hwmon/smsc47m192.c
drivers/i2c/busses/i2c-at91.c
drivers/iio/adc/ad_sigma_delta.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/gyro/itg3200_buffer.c
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
drivers/iio/industrialio-buffer.c
drivers/iio/inkern.c
drivers/iio/magnetometer/st_magn_core.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/input.c
drivers/input/keyboard/atkbd.c
drivers/input/keyreset.c
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/input/sensors/sensor-dev.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/serport.c
drivers/input/touchscreen/rk29_i2c_goodix.c
drivers/iommu/amd_iommu.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-gic-common.c [new file with mode: 0644]
drivers/irqchip/irq-gic-common.h [new file with mode: 0644]
drivers/irqchip/irq-gic-v3.c [new file with mode: 0644]
drivers/irqchip/irq-gic.c
drivers/lguest/x86/core.c
drivers/mailbox/mailbox.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-crypt.c
drivers/md/dm-log-userspace-transfer.c
drivers/md/dm-raid.c
drivers/md/dm-thin-metadata.c
drivers/md/persistent-data/dm-btree-internal.h
drivers/md/persistent-data/dm-btree-spine.c
drivers/md/persistent-data/dm-btree.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb-frontends/ds3000.c
drivers/media/dvb-frontends/tda10071.c
drivers/media/i2c/tda7432.c
drivers/media/media-device.c
drivers/media/pci/cx18/cx18-driver.c
drivers/media/tuners/xc4000.c
drivers/media/tuners/xc5000.c
drivers/media/usb/au0828/au0828-video.c
drivers/media/usb/em28xx/em28xx-video.c
drivers/media/usb/gspca/pac7302.c
drivers/media/usb/hdpvr/hdpvr-video.c
drivers/media/usb/ttusb-dec/ttusbdecfe.c
drivers/media/v4l2-core/v4l2-common.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/video/rk30_camera_oneframe.c
drivers/media/video/rk_camsys/camsys_gpio.h
drivers/media/video/rk_camsys/camsys_internal.h
drivers/media/video/rk_camsys/camsys_soc_priv.c
drivers/message/fusion/mptspi.c
drivers/mfd/omap-usb-host.c
drivers/mfd/rtsx_pcr.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.c
drivers/misc/mei/nfc.c
drivers/mmc/host/Kconfig
drivers/mmc/host/rk_sdmmc.c
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/mtd/ftl.c
drivers/mtd/nand/omap2.c
drivers/mtd/ubi/fastmap.c
drivers/net/Kconfig
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/rockchip/gmac/stmmac_platform.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/ppp/ppp_generic.c
drivers/net/ppp/pppoe.c
drivers/net/ppp/pptp.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/qmi_wwan.c
drivers/net/vxlan.c
drivers/net/wireless/ath/carl9170/carl9170.h
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/linux_osl.c
drivers/net/wireless/rockchip_wlan/rtl8188eu/core/rtw_br_ext.c
drivers/net/wireless/rt2x00/rt2800.h
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/nfc/microread/microread.c
drivers/of/base.c
drivers/of/selftest.c
drivers/pci/pci-sysfs.c
drivers/pci/quirks.c
drivers/pinctrl/Kconfig
drivers/pinctrl/Makefile
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinctrl-rk3368.c [new file with mode: 0755]
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/dell-wmi.c
drivers/power/avs/Kconfig
drivers/power/avs/Makefile
drivers/power/avs/rockchip-io-domain.c [new file with mode: 0755]
drivers/power/rk30_factory_adc_battery.c
drivers/pwm/pwm-rockchip.c
drivers/rapidio/devices/tsi721_dma.c
drivers/regulator/arizona-ldo1.c
drivers/regulator/core.c
drivers/sbus/char/bbc_envctrl.c
drivers/sbus/char/bbc_i2c.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/bfa/bfa_ioc.h
drivers/scsi/hpsa.c
drivers/scsi/libiscsi.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/storvsc_drv.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-orion.c
drivers/spi/spi-pl022.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-rockchip-core.c
drivers/spi/spi-rockchip-dma.c
drivers/staging/android/ashmem.c
drivers/staging/android/fiq_debugger/fiq_debugger.c
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_carveout_heap.c
drivers/staging/android/ion/ion_cma_heap.c
drivers/staging/android/ion/ion_drm_heap.c
drivers/staging/android/ion/ion_priv.h
drivers/staging/android/ion/ion_system_heap.c
drivers/staging/android/ion/rockchip/rockchip_ion.c
drivers/staging/android/ion/rockchip/rockchip_ion_snapshot.c
drivers/staging/android/trace/ion.h
drivers/staging/iio/impedance-analyzer/ad5933.c
drivers/staging/iio/meter/ade7758.h
drivers/staging/iio/meter/ade7758_core.c
drivers/staging/iio/meter/ade7758_ring.c
drivers/staging/iio/meter/ade7758_trigger.c
drivers/staging/speakup/selection.c
drivers/staging/vt6655/bssdb.c
drivers/staging/vt6655/device_main.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/target_core_device.c
drivers/target/target_core_pr.c
drivers/target/target_core_pr.h
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/thermal/cpu_cooling.c
drivers/thermal/of-thermal.c
drivers/thermal/thermal_core.c
drivers/tty/serial/8250/8250_dma.c
drivers/tty/serial/8250/8250_early.c
drivers/tty/serial/earlycon.c
drivers/tty/serial/rk_serial.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/sunsab.c
drivers/tty/tty_io.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/gadget.h
drivers/usb/dwc_otg_310/common_port/dwc_common_linux.c
drivers/usb/dwc_otg_310/common_port/dwc_os.h
drivers/usb/gadget/f_accessory.c
drivers/usb/gadget/f_acm.c
drivers/usb/gadget/f_audio_source.c
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/rndis.h
drivers/usb/gadget/u_ether.c
drivers/usb/gadget/u_ether.h
drivers/usb/gadget/udc-core.c
drivers/usb/host/ehci-h20ahb.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ohci-q.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/misc/sisusbvga/sisusb.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/opticon.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/sierra.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/whiteheat.c
drivers/usb/serial/zte_ev.c
drivers/usb/storage/transport.c
drivers/usb/storage/unusual_devs.h
drivers/video/adf/adf.c
drivers/video/console/bitblit.c
drivers/video/console/fbcon_ccw.c
drivers/video/console/fbcon_cw.c
drivers/video/console/fbcon_ud.c
drivers/video/rockchip/Makefile
drivers/video/rockchip/hdmi/chips/rk3288/rk3288_hdmi.c
drivers/video/rockchip/iep/iep_drv.c
drivers/video/rockchip/lcdc/Kconfig
drivers/video/rockchip/lcdc/Makefile
drivers/video/rockchip/lcdc/rk3368_lcdc.c [new file with mode: 0644]
drivers/video/rockchip/lcdc/rk3368_lcdc.h [new file with mode: 0644]
drivers/video/rockchip/transmitter/Kconfig
drivers/video/rockchip/transmitter/rk31xx_lvds.c
drivers/video/rockchip/transmitter/rk31xx_lvds.h
drivers/virtio/virtio_pci.c
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/relocation.c
fs/btrfs/transaction.c
fs/buffer.c
fs/cifs/cifsglob.h
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/readdir.c
fs/cifs/smb1ops.c
fs/cifs/smb2file.c
fs/cifs/smb2inode.c
fs/cifs/smb2maperror.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/coredump.c
fs/dcache.c
fs/ecryptfs/inode.c
fs/exec.c
fs/ext2/inode.c
fs/ext2/xip.c
fs/ext3/super.c
fs/ext4/ext4.h
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fuse/inode.c
fs/ioprio.c
fs/isofs/inode.c
fs/isofs/isofs.h
fs/isofs/rock.c
fs/jbd2/recovery.c
fs/jffs2/jffs2_fs_sb.h
fs/jffs2/wbuf.c
fs/lockd/mon.c
fs/lockd/svc.c
fs/namei.c
fs/namespace.c
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/nfs3acl.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4renewd.c
fs/nfs/nfs4state.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfssvc.c
fs/nilfs2/inode.c
fs/notify/fanotify/fanotify_user.c
fs/notify/fdinfo.c
fs/ocfs2/dlm/dlmmaster.c
fs/proc/array.c
fs/pstore/inode.c
fs/quota/dquot.c
fs/seq_file.c
fs/super.c
fs/ubifs/commit.c
fs/ubifs/log.c
fs/ubifs/master.c
fs/ubifs/super.c
fs/ubifs/ubifs.h
fs/udf/inode.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_file.c
fs/xfs/xfs_qm.c
include/asm-generic/barrier.h
include/asm-generic/seccomp.h [new file with mode: 0644]
include/asm-generic/syscall.h
include/drm/drm_pciids.h
include/dt-bindings/clock/rockchip,rk3368.h [new file with mode: 0644]
include/dt-bindings/clock/rockchip.h
include/kvm/arm_arch_timer.h [new file with mode: 0644]
include/kvm/arm_vgic.h [new file with mode: 0644]
include/linux/blkdev.h
include/linux/capability.h
include/linux/ceph/messenger.h
include/linux/cgroup.h
include/linux/clocksource.h
include/linux/compiler-gcc.h
include/linux/compiler-gcc5.h [new file with mode: 0644]
include/linux/compiler-intel.h
include/linux/compiler.h
include/linux/iio/trigger.h
include/linux/init_task.h
include/linux/ipv6.h
include/linux/irqchip/arm-gic-v3.h [new file with mode: 0644]
include/linux/irqchip/arm-gic.h
include/linux/jiffies.h
include/linux/kgdb.h
include/linux/kvm_host.h
include/linux/kvm_types.h
include/linux/libata.h
include/linux/mailbox_client.h
include/linux/mailbox_controller.h
include/linux/memcontrol.h
include/linux/mm.h
include/linux/mmc/rk_mmc.h
include/linux/mount.h
include/linux/nfs_xdr.h
include/linux/of.h
include/linux/oom.h
include/linux/printk.h
include/linux/regulator/consumer.h
include/linux/rockchip/cru.h
include/linux/rockchip/iomap.h
include/linux/rockchip_ion.h
include/linux/sched.h
include/linux/seccomp.h
include/linux/string.h
include/linux/sunrpc/svc_xprt.h
include/linux/suspend.h
include/linux/syscalls.h
include/linux/usb/quirks.h
include/linux/wakeup_reason.h
include/linux/wlan_plat.h
include/linux/workqueue.h
include/media/videobuf2-core.h
include/net/cfg80211.h
include/net/fib_rules.h
include/net/flow.h
include/net/inet_connection_sock.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip6_route.h
include/net/ipv6.h
include/net/route.h
include/net/sctp/command.h
include/net/sctp/sctp.h
include/net/sctp/sm.h
include/net/secure_seq.h
include/net/sock.h
include/net/tcp.h
include/trace/events/kvm.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/Kbuild
include/uapi/linux/audit.h
include/uapi/linux/fib_rules.h
include/uapi/linux/fs.h
include/uapi/linux/ipv6.h
include/uapi/linux/kvm.h
include/uapi/linux/netfilter/xt_bpf.h
include/uapi/linux/psci.h [new file with mode: 0644]
include/uapi/linux/rtnetlink.h
include/uapi/linux/seccomp.h
include/uapi/sound/compress_params.h
init/Kconfig
init/main.c
ipc/ipc_sysctl.c
kernel/Kconfig.locks
kernel/audit.c
kernel/audit_tree.c
kernel/capability.c
kernel/cgroup.c
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/freezer.c
kernel/irq/pm.c
kernel/kcmp.c
kernel/module.c
kernel/posix-timers.c
kernel/power/hibernate.c
kernel/power/main.c
kernel/power/power.h
kernel/power/process.c
kernel/power/suspend.c
kernel/power/suspend_test.c
kernel/power/wakeup_reason.c
kernel/printk.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/seccomp.c
kernel/smp.c
kernel/sys.c
kernel/sys_ni.c
kernel/time.c
kernel/time/alarmtimer.c
kernel/time/clockevents.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_clock.c
kernel/trace/trace_syscalls.c
lib/bitmap.c
lib/btree.c
lib/lzo/lzo1x_decompress_safe.c
lib/string.c
linaro/configs/linaro-base.conf
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_cgroup.c
mm/percpu-vm.c
mm/shmem.c
mm/slab_common.c
mm/truncate.c
mm/util.c
net/8021q/vlan_core.c
net/appletalk/ddp.c
net/bluetooth/l2cap_sock.c
net/bluetooth/rfcomm/core.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bridge/br_private.h
net/bridge/br_stp_bpdu.c
net/ceph/auth_x.c
net/ceph/crypto.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/compat.c
net/core/dst.c
net/core/fib_rules.c
net/core/iovec.c
net/core/secure_seq.c
net/core/skbuff.c
net/dns_resolver/dns_query.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inetpeer.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ipmr.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c
net/ipv4/udp.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ah6.c
net/ipv6/datagram.c
net/ipv6/esp6.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ipcomp6.c
net/ipv6/output_core.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_ppp.c
net/mac80211/mlme.c
net/mac80211/rate.c
net/mac80211/rx.c
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_nat_core.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/packet/af_packet.c
net/packet/internal.h
net/rfkill/rfkill-bt.c
net/rfkill/rfkill-wlan.c
net/sctp/associola.c
net/sctp/auth.c
net/sctp/inqueue.c
net/sctp/output.c
net/sctp/sm_make_chunk.c
net/sctp/sm_statefuns.c
net/sctp/sysctl.c
net/sctp/ulpevent.c
net/sunrpc/svcsock.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/tipc/bcast.c
net/wireless/nl80211.c
net/wireless/scan.c
net/wireless/trace.h
security/Kconfig
security/apparmor/domain.c
security/apparmor/include/apparmor.h
security/apparmor/lib.c
security/commoncap.c
security/integrity/evm/evm_main.c
security/selinux/hooks.c
sound/core/compress_offload.c
sound/core/info.c
sound/core/pcm_compat.c
sound/core/pcm_lib.c
sound/core/pcm_native.c
sound/pci/Kconfig
sound/pci/emu10k1/emu10k1_callback.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/oxygen/virtuoso.c
sound/pci/oxygen/xonar_pcm179x.c
sound/soc/codecs/max98090.c
sound/soc/codecs/rt3261-dsp.c
sound/soc/codecs/rt3261_ioctl.c
sound/soc/codecs/rt5640_ioctl.c
sound/soc/codecs/rt56xx_ioctl.c
sound/soc/codecs/rt_codec_ioctl.c
sound/soc/codecs/wm_adsp.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/pxa/pxa-ssp.c
sound/soc/samsung/i2s.c
sound/soc/soc-pcm.c
sound/usb/mixer_quirks.c
sound/usb/quirks-table.h
tools/gator/daemon/Android.mk
tools/gator/daemon/AnnotateListener.cpp [new file with mode: 0644]
tools/gator/daemon/AnnotateListener.h [new file with mode: 0644]
tools/gator/daemon/Application.mk [new file with mode: 0644]
tools/gator/daemon/Buffer.cpp
tools/gator/daemon/Buffer.h
tools/gator/daemon/CCNDriver.cpp [new file with mode: 0644]
tools/gator/daemon/CCNDriver.h [new file with mode: 0644]
tools/gator/daemon/CPUFreqDriver.cpp [new file with mode: 0644]
tools/gator/daemon/CPUFreqDriver.h [new file with mode: 0644]
tools/gator/daemon/CapturedXML.cpp
tools/gator/daemon/CapturedXML.h
tools/gator/daemon/Child.cpp
tools/gator/daemon/Child.h
tools/gator/daemon/Command.cpp [new file with mode: 0644]
tools/gator/daemon/Command.h [new file with mode: 0644]
tools/gator/daemon/Config.h
tools/gator/daemon/ConfigurationXML.cpp
tools/gator/daemon/Counter.h
tools/gator/daemon/DiskIODriver.cpp [new file with mode: 0644]
tools/gator/daemon/DiskIODriver.h [new file with mode: 0644]
tools/gator/daemon/Driver.cpp
tools/gator/daemon/Driver.h
tools/gator/daemon/DriverSource.cpp
tools/gator/daemon/DriverSource.h
tools/gator/daemon/DynBuf.cpp
tools/gator/daemon/EventsXML.cpp
tools/gator/daemon/EventsXML.h
tools/gator/daemon/ExternalSource.cpp
tools/gator/daemon/ExternalSource.h
tools/gator/daemon/FSDriver.cpp [new file with mode: 0644]
tools/gator/daemon/FSDriver.h [new file with mode: 0644]
tools/gator/daemon/Fifo.cpp
tools/gator/daemon/Fifo.h
tools/gator/daemon/FtraceDriver.cpp [new file with mode: 0644]
tools/gator/daemon/FtraceDriver.h [new file with mode: 0644]
tools/gator/daemon/FtraceSource.cpp [new file with mode: 0644]
tools/gator/daemon/FtraceSource.h [new file with mode: 0644]
tools/gator/daemon/Hwmon.cpp [deleted file]
tools/gator/daemon/Hwmon.h [deleted file]
tools/gator/daemon/HwmonDriver.cpp [new file with mode: 0644]
tools/gator/daemon/HwmonDriver.h [new file with mode: 0644]
tools/gator/daemon/KMod.cpp
tools/gator/daemon/KMod.h
tools/gator/daemon/LocalCapture.h
tools/gator/daemon/Logging.cpp
tools/gator/daemon/Logging.h
tools/gator/daemon/Makefile
tools/gator/daemon/Makefile_aarch64
tools/gator/daemon/MaliVideoDriver.cpp [new file with mode: 0644]
tools/gator/daemon/MaliVideoDriver.h [new file with mode: 0644]
tools/gator/daemon/MemInfoDriver.cpp [new file with mode: 0644]
tools/gator/daemon/MemInfoDriver.h [new file with mode: 0644]
tools/gator/daemon/Monitor.cpp
tools/gator/daemon/Monitor.h
tools/gator/daemon/NetDriver.cpp [new file with mode: 0644]
tools/gator/daemon/NetDriver.h [new file with mode: 0644]
tools/gator/daemon/OlySocket.cpp
tools/gator/daemon/OlySocket.h
tools/gator/daemon/PerfBuffer.cpp
tools/gator/daemon/PerfBuffer.h
tools/gator/daemon/PerfDriver.cpp
tools/gator/daemon/PerfDriver.h
tools/gator/daemon/PerfGroup.cpp
tools/gator/daemon/PerfGroup.h
tools/gator/daemon/PerfSource.cpp
tools/gator/daemon/PerfSource.h
tools/gator/daemon/Proc.cpp
tools/gator/daemon/Proc.h
tools/gator/daemon/Sender.cpp
tools/gator/daemon/Sender.h
tools/gator/daemon/SessionData.cpp
tools/gator/daemon/SessionData.h
tools/gator/daemon/SessionXML.cpp
tools/gator/daemon/SessionXML.h
tools/gator/daemon/Setup.cpp [new file with mode: 0644]
tools/gator/daemon/Setup.h [new file with mode: 0644]
tools/gator/daemon/StreamlineSetup.cpp
tools/gator/daemon/StreamlineSetup.h
tools/gator/daemon/UEvent.cpp
tools/gator/daemon/UserSpaceSource.cpp
tools/gator/daemon/UserSpaceSource.h
tools/gator/daemon/c++.cpp [new file with mode: 0644]
tools/gator/daemon/common.mk
tools/gator/daemon/defaults.xml
tools/gator/daemon/escape.c
tools/gator/daemon/events-CCI-400.xml
tools/gator/daemon/events-CCN-504.xml
tools/gator/daemon/events-Cortex-A12.xml [deleted file]
tools/gator/daemon/events-Cortex-A17.xml [new file with mode: 0644]
tools/gator/daemon/events-Cortex-A53.xml
tools/gator/daemon/events-Cortex-A57.xml
tools/gator/daemon/events-Filesystem.xml [new file with mode: 0644]
tools/gator/daemon/events-L2C-310.xml
tools/gator/daemon/events-Linux.xml
tools/gator/daemon/events-Mali-4xx.xml
tools/gator/daemon/events-Mali-Midgard.xml [new file with mode: 0644]
tools/gator/daemon/events-Mali-Midgard_hw.xml [new file with mode: 0644]
tools/gator/daemon/events-Mali-T60x_hw.xml [new file with mode: 0644]
tools/gator/daemon/events-Mali-T62x_hw.xml [new file with mode: 0644]
tools/gator/daemon/events-Mali-T6xx.xml [deleted file]
tools/gator/daemon/events-Mali-T6xx_hw.xml [deleted file]
tools/gator/daemon/events-Mali-T72x_hw.xml [new file with mode: 0644]
tools/gator/daemon/events-Mali-T76x_hw.xml [new file with mode: 0644]
tools/gator/daemon/events-Mali-V500.xml [new file with mode: 0644]
tools/gator/daemon/events-ftrace.xml [new file with mode: 0644]
tools/gator/daemon/main.cpp
tools/gator/daemon/mxml/config.h
tools/gator/daemon/mxml/mxml-attr.c
tools/gator/daemon/mxml/mxml-entity.c
tools/gator/daemon/mxml/mxml-file.c
tools/gator/daemon/mxml/mxml-get.c
tools/gator/daemon/mxml/mxml-index.c
tools/gator/daemon/mxml/mxml-node.c
tools/gator/daemon/mxml/mxml-private.c
tools/gator/daemon/mxml/mxml-private.h
tools/gator/daemon/mxml/mxml-search.c
tools/gator/daemon/mxml/mxml-set.c
tools/gator/daemon/mxml/mxml-string.c
tools/gator/daemon/mxml/mxml.h
tools/perf/builtin-kmem.c
tools/testing/selftests/Makefile
tools/testing/selftests/mount/Makefile [new file with mode: 0644]
tools/testing/selftests/mount/unprivileged-remount-test.c [new file with mode: 0644]
virt/kvm/Kconfig
virt/kvm/arm/arch_timer.c [new file with mode: 0644]
virt/kvm/arm/vgic-v2.c [new file with mode: 0644]
virt/kvm/arm/vgic-v3.c [new file with mode: 0644]
virt/kvm/arm/vgic.c [new file with mode: 0644]
virt/kvm/async_pf.c
virt/kvm/eventfd.c
virt/kvm/ioapic.c
virt/kvm/ioapic.h
virt/kvm/iommu.c
virt/kvm/irq_comm.c
virt/kvm/irqchip.c
virt/kvm/kvm_main.c
virt/kvm/vfio.c [new file with mode: 0644]

index c6941f815f15a2567a838bba036b4ad46e1055e0..d50fa618371b3fa35c79bf3beb46e2f7dd4f4122 100644 (file)
@@ -102,3 +102,10 @@ Translation table lookup with 64KB pages:
  |                 |    +--------------------------> [41:29] L2 index (only 38:29 used)
  |                 +-------------------------------> [47:42] L1 index (not used)
  +-------------------------------------------------> [63] TTBR0/1
+
+When using KVM, the hypervisor maps kernel pages in EL2, at a fixed
+offset from the kernel VA (top 24bits of the kernel VA set to zero):
+
+Start                  End                     Size            Use
+-----------------------------------------------------------------------
+0000004000000000       0000007fffffffff         256GB          kernel objects mapped in HYP
index 3dfb0c0384f572c45cc3df9e6fe92d6c6212bc89..5357745772381f7afd6469867c17013193dfe744 100644 (file)
@@ -49,6 +49,11 @@ Optional
   regions, used when the GIC doesn't have banked registers. The offset is
   cpu-offset * cpu-nr.
 
+- arm,routable-irqs : Total number of gic irq inputs which are not directly
+                 connected from the peripherals, but are routed dynamically
+                 by a crossbar/multiplexer preceding the GIC. The GIC irq
+                 input line is assigned dynamically when the corresponding
+                 peripheral's crossbar line is mapped.
 Example:
 
        intc: interrupt-controller@fff11000 {
@@ -56,6 +61,7 @@ Example:
                #interrupt-cells = <3>;
                #address-cells = <1>;
                interrupt-controller;
+               arm,routable-irqs = <160>;
                reg = <0xfff11000 0x1000>,
                      <0xfff10100 0x100>;
        };
index 3f009555f392359ef5166e5ed5d8d1ace6ea480f..1a2cd3d266db0b59a5709bccecef737f9aadf139 100644 (file)
@@ -19,15 +19,20 @@ Example:
 * Mailbox Client
 
 Required property:
-- mbox: List of phandle and mailbox channel specifier.
+- mboxes: List of phandle and mailbox channel specifiers.
 
+Optional property:
 - mbox-names: List of identifier strings for each mailbox channel
-               required by the client.
+               required by the client. The use of this property
+               is discouraged in favor of using index in list of
+               'mboxes' while requesting a mailbox. Instead the
+               platforms may define channel indices, in DT headers,
+               to something legible.
 
 Example:
        pwr_cntrl: power {
                ...
                mbox-names = "pwr-ctrl", "rpc";
-               mbox = <&mailbox 0
+               mboxes = <&mailbox 0
                        &mailbox 1>;
        };
diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
new file mode 100644 (file)
index 0000000..ea45dd3
--- /dev/null
@@ -0,0 +1,164 @@
+
+LZO stream format as understood by Linux's LZO decompressor
+===========================================================
+
+Introduction
+
+  This is not a specification. No specification seems to be publicly available
+  for the LZO stream format. This document describes what input format the LZO
+  decompressor as implemented in the Linux kernel understands. The file subject
+  of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on
+  the compressor nor on any other implementations though it seems likely that
+  the format matches the standard one. The purpose of this document is to
+  better understand what the code does in order to propose more efficient fixes
+  for future bug reports.
+
+Description
+
+  The stream is composed of a series of instructions, operands, and data. The
+  instructions consist in a few bits representing an opcode, and bits forming
+  the operands for the instruction, whose size and position depend on the
+  opcode and on the number of literals copied by previous instruction. The
+  operands are used to indicate :
+
+    - a distance when copying data from the dictionary (past output buffer)
+    - a length (number of bytes to copy from dictionary)
+    - the number of literals to copy, which is retained in variable "state"
+      as a piece of information for next instructions.
+
+  Optionally depending on the opcode and operands, extra data may follow. These
+  extra data can be a complement for the operand (eg: a length or a distance
+  encoded on larger values), or a literal to be copied to the output buffer.
+
+  The first byte of the block follows a different encoding from other bytes, it
+  seems to be optimized for literal use only, since there is no dictionary yet
+  prior to that byte.
+
+  Lengths are always encoded on a variable size starting with a small number
+  of bits in the operand. If the number of bits isn't enough to represent the
+  length, up to 255 may be added in increments by consuming more bytes with a
+  rate of at most 255 per extra byte (thus the compression ratio cannot exceed
+  around 255:1). The variable length encoding using #bits is always the same :
+
+       length = byte & ((1 << #bits) - 1)
+       if (!length) {
+               length = ((1 << #bits) - 1)
+               length += 255*(number of zero bytes)
+               length += first-non-zero-byte
+       }
+       length += constant (generally 2 or 3)
+
+  For references to the dictionary, distances are relative to the output
+  pointer. Distances are encoded using very few bits belonging to certain
+  ranges, resulting in multiple copy instructions using different encodings.
+  Certain encodings involve one extra byte, others involve two extra bytes
+  forming a little-endian 16-bit quantity (marked LE16 below).
+
+  After any instruction except the large literal copy, 0, 1, 2 or 3 literals
+  are copied before starting the next instruction. The number of literals that
+  were copied may change the meaning and behaviour of the next instruction. In
+  practice, only one instruction needs to know whether 0, less than 4, or more
+  literals were copied. This is the information stored in the <state> variable
+  in this implementation. This number of immediate literals to be copied is
+  generally encoded in the last two bits of the instruction but may also be
+  taken from the last two bits of an extra operand (eg: distance).
+
+  End of stream is declared when a block copy of distance 0 is seen. Only one
+  instruction may encode this distance (0001HLLL), it takes one LE16 operand
+  for the distance, thus requiring 3 bytes.
+
+  IMPORTANT NOTE : in the code some length checks are missing because certain
+  instructions are called under the assumption that a certain number of bytes
+  follow because it has already been garanteed before parsing the instructions.
+  They just have to "refill" this credit if they consume extra bytes. This is
+  an implementation design choice independant on the algorithm or encoding.
+
+Byte sequences
+
+  First byte encoding :
+
+      0..17   : follow regular instruction encoding, see below. It is worth
+                noting that codes 16 and 17 will represent a block copy from
+                the dictionary which is empty, and that they will always be
+                invalid at this place.
+
+      18..21  : copy 0..3 literals
+                state = (byte - 17) = 0..3  [ copy <state> literals ]
+                skip byte
+
+      22..255 : copy literal string
+                length = (byte - 17) = 4..238
+                state = 4 [ don't copy extra literals ]
+                skip byte
+
+  Instruction encoding :
+
+      0 0 0 0 X X X X  (0..15)
+        Depends on the number of literals copied by the last instruction.
+        If last instruction did not copy any literal (state == 0), this
+        encoding will be a copy of 4 or more literal, and must be interpreted
+        like this :
+
+           0 0 0 0 L L L L  (0..15)  : copy long literal string
+           length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte)
+           state = 4  (no extra literals are copied)
+
+        If last instruction used to copy between 1 to 3 literals (encoded in
+        the instruction's opcode or distance), the instruction is a copy of a
+        2-byte block from the dictionary within a 1kB distance. It is worth
+        noting that this instruction provides little savings since it uses 2
+        bytes to encode a copy of 2 other bytes but it encodes the number of
+        following literals for free. It must be interpreted like this :
+
+           0 0 0 0 D D S S  (0..15)  : copy 2 bytes from <= 1kB distance
+           length = 2
+           state = S (copy S literals after this block)
+         Always followed by exactly one byte : H H H H H H H H
+           distance = (H << 2) + D + 1
+
+        If last instruction used to copy 4 or more literals (as detected by
+        state == 4), the instruction becomes a copy of a 3-byte block from the
+        dictionary from a 2..3kB distance, and must be interpreted like this :
+
+           0 0 0 0 D D S S  (0..15)  : copy 3 bytes from 2..3 kB distance
+           length = 3
+           state = S (copy S literals after this block)
+         Always followed by exactly one byte : H H H H H H H H
+           distance = (H << 2) + D + 2049
+
+      0 0 0 1 H L L L  (16..31)
+           Copy of a block within 16..48kB distance (preferably less than 10B)
+           length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte)
+        Always followed by exactly one LE16 :  D D D D D D D D : D D D D D D S S
+           distance = 16384 + (H << 14) + D
+           state = S (copy S literals after this block)
+           End of stream is reached if distance == 16384
+
+      0 0 1 L L L L L  (32..63)
+           Copy of small block within 16kB distance (preferably less than 34B)
+           length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte)
+        Always followed by exactly one LE16 :  D D D D D D D D : D D D D D D S S
+           distance = D + 1
+           state = S (copy S literals after this block)
+
+      0 1 L D D D S S  (64..127)
+           Copy 3-4 bytes from block within 2kB distance
+           state = S (copy S literals after this block)
+           length = 3 + L
+         Always followed by exactly one byte : H H H H H H H H
+           distance = (H << 3) + D + 1
+
+      1 L L D D D S S  (128..255)
+           Copy 5-8 bytes from block within 2kB distance
+           state = S (copy S literals after this block)
+           length = 5 + L
+         Always followed by exactly one byte : H H H H H H H H
+           distance = (H << 3) + D + 1
+
+Authors
+
+  This document was written by Willy Tarreau <w@1wt.eu> on 2014/07/19 during an
+  analysis of the decompression code available in Linux 3.16-rc5. The code is
+  tricky, it is possible that this document contains mistakes or that a few
+  corner cases were overlooked. In any case, please report any doubt, fix, or
+  proposed updates to the author(s) so that the document can be updated.
diff --git a/Documentation/mailbox.txt b/Documentation/mailbox.txt
new file mode 100644 (file)
index 0000000..60f43ff
--- /dev/null
@@ -0,0 +1,122 @@
+               The Common Mailbox Framework
+               Jassi Brar <jaswinder.singh@linaro.org>
+
+ This document aims to help developers write client and controller
+drivers for the API. But before we start, let us note that the
+client (especially) and controller drivers are likely going to be
+very platform specific because the remote firmware is likely to be
+proprietary and implement non-standard protocol. So even if two
+platforms employ, say, PL320 controller, the client drivers can't
+be shared across them. Even the PL320 driver might need to accommodate
+some platform specific quirks. So the API is meant mainly to avoid
+similar copies of code written for each platform. Having said that,
+nothing prevents the remote f/w to also be Linux based and use the
+same api there. However none of that helps us locally because we only
+ever deal at client's protocol level.
+ Some of the choices made during implementation are the result of this
+peculiarity of this "common" framework.
+
+
+
+       Part 1 - Controller Driver (See include/linux/mailbox_controller.h)
+
+ Allocate mbox_controller and the array of mbox_chan.
+Populate mbox_chan_ops, except peek_data() all are mandatory.
+The controller driver might know a message has been consumed
+by the remote by getting an IRQ or polling some hardware flag
+or it can never know (the client knows by way of the protocol).
+The method in order of preference is IRQ -> Poll -> None, which
+the controller driver should set via 'txdone_irq' or 'txdone_poll'
+or neither.
+
+
+       Part 2 - Client Driver (See include/linux/mailbox_client.h)
+
+ The client might want to operate in blocking mode (synchronously
+send a message through before returning) or non-blocking/async mode (submit
+a message and a callback function to the API and return immediately).
+
+
+struct demo_client {
+       struct mbox_client cl;
+       struct mbox_chan *mbox;
+       struct completion c;
+       bool async;
+       /* ... */
+};
+
+/*
+ * This is the handler for data received from remote. The behaviour is purely
+ * dependent upon the protocol. This is just an example.
+ */
+static void message_from_remote(struct mbox_client *cl, void *mssg)
+{
+       struct demo_client *dc = container_of(mbox_client,
+                                               struct demo_client, cl);
+       if (dc->aysnc) {
+               if (is_an_ack(mssg)) {
+                       /* An ACK to our last sample sent */
+                       return; /* Or do something else here */
+               } else { /* A new message from remote */
+                       queue_req(mssg);
+               }
+       } else {
+               /* Remote f/w sends only ACK packets on this channel */
+               return;
+       }
+}
+
+static void sample_sent(struct mbox_client *cl, void *mssg, int r)
+{
+       struct demo_client *dc = container_of(mbox_client,
+                                               struct demo_client, cl);
+       complete(&dc->c);
+}
+
+static void client_demo(struct platform_device *pdev)
+{
+       struct demo_client *dc_sync, *dc_async;
+       /* The controller already knows async_pkt and sync_pkt */
+       struct async_pkt ap;
+       struct sync_pkt sp;
+
+       dc_sync = kzalloc(sizeof(*dc_sync), GFP_KERNEL);
+       dc_async = kzalloc(sizeof(*dc_async), GFP_KERNEL);
+
+       /* Populate non-blocking mode client */
+       dc_async->cl.dev = &pdev->dev;
+       dc_async->cl.rx_callback = message_from_remote;
+       dc_async->cl.tx_done = sample_sent;
+       dc_async->cl.tx_block = false;
+       dc_async->cl.tx_tout = 0; /* doesn't matter here */
+       dc_async->cl.knows_txdone = false; /* depending upon protocol */
+       dc_async->async = true;
+       init_completion(&dc_async->c);
+
+       /* Populate blocking mode client */
+       dc_sync->cl.dev = &pdev->dev;
+       dc_sync->cl.rx_callback = message_from_remote;
+       dc_sync->cl.tx_done = NULL; /* operate in blocking mode */
+       dc_sync->cl.tx_block = true;
+       dc_sync->cl.tx_tout = 500; /* by half a second */
+       dc_sync->cl.knows_txdone = false; /* depending upon protocol */
+       dc_sync->async = false;
+
+       /* ASync mailbox is listed second in 'mboxes' property */
+       dc_async->mbox = mbox_request_channel(&dc_async->cl, 1);
+       /* Populate data packet */
+       /* ap.xxx = 123; etc */
+       /* Send async message to remote */
+       mbox_send_message(dc_async->mbox, &ap);
+
+       /* Sync mailbox is listed first in 'mboxes' property */
+       dc_sync->mbox = mbox_request_channel(&dc_sync->cl, 0);
+       /* Populate data packet */
+       /* sp.abc = 123; etc */
+       /* Send message to remote in blocking mode */
+       mbox_send_message(dc_sync->mbox, &sp);
+       /* At this point 'sp' has been sent */
+
+       /* Now wait for async chan to be done */
+       wait_for_completion(&dc_async->c);
+}
index 99d2164180613804ecc3e210879ff96760636e16..fbfc9e0d75e0d8011b8e2b097c9491b3d816cb59 100644 (file)
@@ -1339,6 +1339,19 @@ ndisc_notify - BOOLEAN
        1 - Generate unsolicited neighbour advertisements when device is brought
            up or hardware address changes.
 
+optimistic_dad - BOOLEAN
+       Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
+               0: disabled (default)
+               1: enabled
+
+use_optimistic - BOOLEAN
+       If enabled, do not classify optimistic addresses as deprecated during
+       source address selection.  Preferred addresses will still be chosen
+       before optimistic addresses, subject to other ranking in the source
+       address selection algorithm.
+               0: disabled (default)
+               1: enabled
+
 icmp/*:
 ratelimit - INTEGER
        Limit the maximal rates for sending ICMPv6 packets.
index 95731a08f25787ff77a03a4f542dec5791f120e6..8f08b2a717918cfce502477ae1f980ad7c5aa417 100644 (file)
@@ -2026,8 +2026,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
   -------------------
 
     Module for sound cards based on the Asus AV66/AV100/AV200 chips,
-    i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX,
-    HDAV1.3 (Deluxe), and HDAV1.3 Slim.
+    i.e., Xonar D1, DX, D2, D2X, DS, DSX, Essence ST (Deluxe),
+    Essence STX (II), HDAV1.3 (Deluxe), and HDAV1.3 Slim.
 
     This module supports autoprobe and multiple cards.
 
index b0714d8f678ac51d0c280a4f5f2980196052421f..8dfb6a5f427d941099732eb23e16435ac9a0df94 100644 (file)
@@ -29,6 +29,9 @@ Rules on what kind of patches are accepted, and which ones are not, into the
 
 Procedure for submitting patches to the -stable tree:
 
+ - If the patch covers files in net/ or drivers/net please follow netdev stable
+   submission guidelines as described in
+   Documentation/networking/netdev-FAQ.txt
  - Send the patch, after verifying that it follows the above rules, to
    stable@vger.kernel.org.  You must note the upstream commit ID in the
    changelog of your submission, as well as the kernel version you wish
index 5f91eda9164713faa1f66a613a998d36b44f5191..257a1f1eecc7f1514940126ebc88a37c40826a46 100644 (file)
@@ -148,9 +148,9 @@ of banks, as set via the KVM_X86_SETUP_MCE ioctl.
 
 4.4 KVM_CHECK_EXTENSION
 
-Capability: basic
+Capability: basic, KVM_CAP_CHECK_EXTENSION_VM for vm ioctl
 Architectures: all
-Type: system ioctl
+Type: system ioctl, vm ioctl
 Parameters: extension identifier (KVM_CAP_*)
 Returns: 0 if unsupported; 1 (or some other positive integer) if supported
 
@@ -160,6 +160,9 @@ receives an integer that describes the extension availability.
 Generally 0 means no and 1 means yes, but some extensions may report
 additional information in the integer return value.
 
+Based on their initialization different VMs may have different capabilities.
+It is thus encouraged to use the vm ioctl to query for capabilities (available
+with KVM_CAP_CHECK_EXTENSION_VM on the vm fd)
 
 4.5 KVM_GET_VCPU_MMAP_SIZE
 
@@ -280,7 +283,7 @@ kvm_run' (see below).
 4.11 KVM_GET_REGS
 
 Capability: basic
-Architectures: all except ARM
+Architectures: all except ARM, arm64
 Type: vcpu ioctl
 Parameters: struct kvm_regs (out)
 Returns: 0 on success, -1 on error
@@ -301,7 +304,7 @@ struct kvm_regs {
 4.12 KVM_SET_REGS
 
 Capability: basic
-Architectures: all except ARM
+Architectures: all except ARM, arm64
 Type: vcpu ioctl
 Parameters: struct kvm_regs (in)
 Returns: 0 on success, -1 on error
@@ -587,7 +590,7 @@ struct kvm_fpu {
 4.24 KVM_CREATE_IRQCHIP
 
 Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64, ARM
+Architectures: x86, ia64, ARM, arm64
 Type: vm ioctl
 Parameters: none
 Returns: 0 on success, -1 on error
@@ -595,14 +598,14 @@ Returns: 0 on success, -1 on error
 Creates an interrupt controller model in the kernel.  On x86, creates a virtual
 ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
 local APIC.  IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
-only go to the IOAPIC.  On ia64, a IOSAPIC is created. On ARM, a GIC is
+only go to the IOAPIC.  On ia64, a IOSAPIC is created. On ARM/arm64, a GIC is
 created.
 
 
 4.25 KVM_IRQ_LINE
 
 Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64, arm
+Architectures: x86, ia64, arm, arm64
 Type: vm ioctl
 Parameters: struct kvm_irq_level
 Returns: 0 on success, -1 on error
@@ -612,9 +615,10 @@ On some architectures it is required that an interrupt controller model has
 been previously created with KVM_CREATE_IRQCHIP.  Note that edge-triggered
 interrupts require the level to be set to 1 and then back to 0.
 
-ARM can signal an interrupt either at the CPU level, or at the in-kernel irqchip
-(GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for
-specific cpus.  The irq field is interpreted like this:
+ARM/arm64 can signal an interrupt either at the CPU level, or at the
+in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to
+use PPIs designated for specific cpus.  The irq field is interpreted
+like this:
 
  Â bits:  | 31 ... 24 | 23  ... 16 | 15    ...    0 |
   field: | irq_type  | vcpu_index |     irq_id     |
@@ -968,18 +972,20 @@ uniprocessor guests).
 
 Possible values are:
 
- - KVM_MP_STATE_RUNNABLE:        the vcpu is currently running
+ - KVM_MP_STATE_RUNNABLE:        the vcpu is currently running [x86, ia64]
  - KVM_MP_STATE_UNINITIALIZED:   the vcpu is an application processor (AP)
-                                 which has not yet received an INIT signal
+                                 which has not yet received an INIT signal [x86,
+                                 ia64]
  - KVM_MP_STATE_INIT_RECEIVED:   the vcpu has received an INIT signal, and is
-                                 now ready for a SIPI
+                                 now ready for a SIPI [x86, ia64]
  - KVM_MP_STATE_HALTED:          the vcpu has executed a HLT instruction and
-                                 is waiting for an interrupt
+                                 is waiting for an interrupt [x86, ia64]
  - KVM_MP_STATE_SIPI_RECEIVED:   the vcpu has just received a SIPI (vector
-                                 accessible via KVM_GET_VCPU_EVENTS)
+                                 accessible via KVM_GET_VCPU_EVENTS) [x86, ia64]
 
-This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
-irqchip, the multiprocessing state must be maintained by userspace.
+On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+in-kernel irqchip, the multiprocessing state must be maintained by userspace on
+these architectures.
 
 
 4.39 KVM_SET_MP_STATE
@@ -993,8 +999,9 @@ Returns: 0 on success; -1 on error
 Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for
 arguments.
 
-This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
-irqchip, the multiprocessing state must be maintained by userspace.
+On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+in-kernel irqchip, the multiprocessing state must be maintained by userspace on
+these architectures.
 
 
 4.40 KVM_SET_IDENTITY_MAP_ADDR
@@ -1121,9 +1128,9 @@ struct kvm_cpuid2 {
        struct kvm_cpuid_entry2 entries[0];
 };
 
-#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
-#define KVM_CPUID_FLAG_STATEFUL_FUNC    2
-#define KVM_CPUID_FLAG_STATE_READ_NEXT  4
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX                BIT(0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC           BIT(1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT         BIT(2)
 
 struct kvm_cpuid_entry2 {
        __u32 function;
@@ -1831,6 +1838,22 @@ ARM 32-bit VFP control registers have the following id bit patterns:
 ARM 64-bit FP registers have the following id bit patterns:
   0x4030 0000 0012 0 <regno:12>
 
+
+arm64 registers are mapped using the lower 32 bits. The upper 16 of
+that is the register group type, or coprocessor number:
+
+arm64 core/FP-SIMD registers have the following id bit patterns. Note
+that the size of the access is variable, as the kvm_regs structure
+contains elements ranging from 32 to 128 bits. The index is a 32bit
+value in the kvm_regs structure seen as a 32bit array.
+  0x60x0 0000 0010 <index into the kvm_regs struct:16>
+
+arm64 CCSIDR registers are demultiplexed by CSSELR value:
+  0x6020 0000 0011 00 <csselr:8>
+
+arm64 system registers have the following id bit patterns:
+  0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
+
 4.69 KVM_GET_ONE_REG
 
 Capability: KVM_CAP_ONE_REG
@@ -2264,7 +2287,7 @@ current state.  "addr" is ignored.
 4.77 KVM_ARM_VCPU_INIT
 
 Capability: basic
-Architectures: arm
+Architectures: arm, arm64
 Type: vcpu ioctl
 Parameters: struct struct kvm_vcpu_init (in)
 Returns: 0 on success; -1 on error
@@ -2283,12 +2306,14 @@ should be created before this ioctl is invoked.
 Possible features:
        - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
          Depends on KVM_CAP_ARM_PSCI.
+       - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
+         Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
 
 
 4.78 KVM_GET_REG_LIST
 
 Capability: basic
-Architectures: arm
+Architectures: arm, arm64
 Type: vcpu ioctl
 Parameters: struct kvm_reg_list (in/out)
 Returns: 0 on success; -1 on error
@@ -2305,10 +2330,10 @@ This ioctl returns the guest registers that are supported for the
 KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
 
 
-4.80 KVM_ARM_SET_DEVICE_ADDR
+4.85 KVM_ARM_SET_DEVICE_ADDR (deprecated)
 
 Capability: KVM_CAP_ARM_SET_DEVICE_ADDR
-Architectures: arm
+Architectures: arm, arm64
 Type: vm ioctl
 Parameters: struct kvm_arm_device_address (in)
 Returns: 0 on success, -1 on error
@@ -2329,20 +2354,25 @@ can access emulated or directly exposed devices, which the host kernel needs
 to know about. The id field is an architecture specific identifier for a
 specific device.
 
-ARM divides the id field into two parts, a device id and an address type id
-specific to the individual device.
+ARM/arm64 divides the id field into two parts, a device id and an
+address type id specific to the individual device.
 
  Â bits:  | 63        ...       32 | 31    ...    16 | 15    ...    0 |
   field: |        0x00000000      |     device id   |  addr type id  |
 
-ARM currently only require this when using the in-kernel GIC support for the
-hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2 as the device id.  When
-setting the base address for the guest's mapping of the VGIC virtual CPU
-and distributor interface, the ioctl must be called after calling
-KVM_CREATE_IRQCHIP, but before calling KVM_RUN on any of the VCPUs.  Calling
-this ioctl twice for any of the base addresses will return -EEXIST.
+ARM/arm64 currently only require this when using the in-kernel GIC
+support for the hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2
+as the device id.  When setting the base address for the guest's
+mapping of the VGIC virtual CPU and distributor interface, the ioctl
+must be called after calling KVM_CREATE_IRQCHIP, but before calling
+KVM_RUN on any of the VCPUs.  Calling this ioctl twice for any of the
+base addresses will return -EEXIST.
+
+Note, this IOCTL is deprecated and the more flexible SET/GET_DEVICE_ATTR API
+should be used instead.
 
-4.82 KVM_PPC_RTAS_DEFINE_TOKEN
+
+4.86 KVM_PPC_RTAS_DEFINE_TOKEN
 
 Capability: KVM_CAP_PPC_RTAS
 Architectures: ppc
@@ -2612,6 +2642,21 @@ It gets triggered whenever both KVM_CAP_PPC_EPR are enabled and an
 external interrupt has just been delivered into the guest. User space
 should put the acknowledged interrupt vector into the 'epr' field.
 
+               /* KVM_EXIT_SYSTEM_EVENT */
+               struct {
+#define KVM_SYSTEM_EVENT_SHUTDOWN       1
+#define KVM_SYSTEM_EVENT_RESET          2
+                       __u32 type;
+                       __u64 flags;
+               } system_event;
+
+If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered
+a system-level event using some architecture specific mechanism (hypercall
+or some special instruction). In case of ARM/ARM64, this is triggered using
+HVC instruction based PSCI call from the vcpu. The 'type' field describes
+the system-level event type. The 'flags' field describes architecture
+specific flags for the system-level event.
+
                /* Fix the size of the union. */
                char padding[256];
        };
@@ -2641,6 +2686,77 @@ and usually define the validity of a groups of registers. (e.g. one bit
 };
 
 
+4.81 KVM_GET_EMULATED_CPUID
+
+Capability: KVM_CAP_EXT_EMUL_CPUID
+Architectures: x86
+Type: system ioctl
+Parameters: struct kvm_cpuid2 (in/out)
+Returns: 0 on success, -1 on error
+
+struct kvm_cpuid2 {
+       __u32 nent;
+       __u32 flags;
+       struct kvm_cpuid_entry2 entries[0];
+};
+
+The member 'flags' is used for passing flags from userspace.
+
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX                BIT(0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC           BIT(1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT         BIT(2)
+
+struct kvm_cpuid_entry2 {
+       __u32 function;
+       __u32 index;
+       __u32 flags;
+       __u32 eax;
+       __u32 ebx;
+       __u32 ecx;
+       __u32 edx;
+       __u32 padding[3];
+};
+
+This ioctl returns x86 cpuid features which are emulated by
+kvm.Userspace can use the information returned by this ioctl to query
+which features are emulated by kvm instead of being present natively.
+
+Userspace invokes KVM_GET_EMULATED_CPUID by passing a kvm_cpuid2
+structure with the 'nent' field indicating the number of entries in
+the variable-size array 'entries'. If the number of entries is too low
+to describe the cpu capabilities, an error (E2BIG) is returned. If the
+number is too high, the 'nent' field is adjusted and an error (ENOMEM)
+is returned. If the number is just right, the 'nent' field is adjusted
+to the number of valid entries in the 'entries' array, which is then
+filled.
+
+The entries returned are the set CPUID bits of the respective features
+which kvm emulates, as returned by the CPUID instruction, with unknown
+or unsupported feature bits cleared.
+
+Features like x2apic, for example, may not be present in the host cpu
+but are exposed by kvm in KVM_GET_SUPPORTED_CPUID because they can be
+emulated efficiently and thus not included here.
+
+The fields in each entry are defined as follows:
+
+  function: the eax value used to obtain the entry
+  index: the ecx value used to obtain the entry (for entries that are
+         affected by ecx)
+  flags: an OR of zero or more of the following:
+        KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
+           if the index field is valid
+        KVM_CPUID_FLAG_STATEFUL_FUNC:
+           if cpuid for this function returns different values for successive
+           invocations; there will be several entries with the same function,
+           all with this flag set
+        KVM_CPUID_FLAG_STATE_READ_NEXT:
+           for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
+           the first entry to be read by a cpu
+   eax, ebx, ecx, edx: the values returned by the cpuid instruction for
+         this function/index combination
+
+
 6. Capabilities that can be enabled
 -----------------------------------
 
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
new file mode 100644 (file)
index 0000000..df8b0c7
--- /dev/null
@@ -0,0 +1,83 @@
+ARM Virtual Generic Interrupt Controller (VGIC)
+===============================================
+
+Device types supported:
+  KVM_DEV_TYPE_ARM_VGIC_V2     ARM Generic Interrupt Controller v2.0
+
+Only one VGIC instance may be instantiated through either this API or the
+legacy KVM_CREATE_IRQCHIP api.  The created VGIC will act as the VM interrupt
+controller, requiring emulated user-space devices to inject interrupts to the
+VGIC instead of directly to CPUs.
+
+Groups:
+  KVM_DEV_ARM_VGIC_GRP_ADDR
+  Attributes:
+    KVM_VGIC_V2_ADDR_TYPE_DIST (rw, 64-bit)
+      Base address in the guest physical address space of the GIC distributor
+      register mappings.
+
+    KVM_VGIC_V2_ADDR_TYPE_CPU (rw, 64-bit)
+      Base address in the guest physical address space of the GIC virtual cpu
+      interface register mappings.
+
+  KVM_DEV_ARM_VGIC_GRP_DIST_REGS
+  Attributes:
+    The attr field of kvm_device_attr encodes two values:
+    bits:     | 63   ....  40 | 39 ..  32  |  31   ....    0 |
+    values:   |    reserved   |   cpu id   |      offset     |
+
+    All distributor regs are (rw, 32-bit)
+
+    The offset is relative to the "Distributor base address" as defined in the
+    GICv2 specs.  Getting or setting such a register has the same effect as
+    reading or writing the register on the actual hardware from the cpu
+    specified with cpu id field.  Note that most distributor fields are not
+    banked, but return the same value regardless of the cpu id used to access
+    the register.
+  Limitations:
+    - Priorities are not implemented, and registers are RAZ/WI
+  Errors:
+    -ENODEV: Getting or setting this register is not yet supported
+    -EBUSY: One or more VCPUs are running
+
+  KVM_DEV_ARM_VGIC_GRP_CPU_REGS
+  Attributes:
+    The attr field of kvm_device_attr encodes two values:
+    bits:     | 63   ....  40 | 39 ..  32  |  31   ....    0 |
+    values:   |    reserved   |   cpu id   |      offset     |
+
+    All CPU interface regs are (rw, 32-bit)
+
+    The offset specifies the offset from the "CPU interface base address" as
+    defined in the GICv2 specs.  Getting or setting such a register has the
+    same effect as reading or writing the register on the actual hardware.
+
+    The Active Priorities Registers APRn are implementation defined, so we set a
+    fixed format for our implementation that fits with the model of a "GICv2
+    implementation without the security extensions" which we present to the
+    guest.  This interface always exposes four register APR[0-3] describing the
+    maximum possible 128 preemption levels.  The semantics of the register
+    indicate if any interrupts in a given preemption level are in the active
+    state by setting the corresponding bit.
+
+    Thus, preemption level X has one or more active interrupts if and only if:
+
+      APRn[X mod 32] == 0b1,  where n = X / 32
+
+    Bits for undefined preemption levels are RAZ/WI.
+
+  Limitations:
+    - Priorities are not implemented, and registers are RAZ/WI
+  Errors:
+    -ENODEV: Getting or setting this register is not yet supported
+    -EBUSY: One or more VCPUs are running
+
+  KVM_DEV_ARM_VGIC_GRP_NR_IRQS
+  Attributes:
+    A value describing the number of interrupts (SGI, PPI and SPI) for
+    this GIC instance, ranging from 64 to 1024, in increments of 32.
+
+  Errors:
+    -EINVAL: Value set is out of the expected range
+    -EBUSY: Value has already be set, or GIC has already been initialized
+            with default values.
diff --git a/Documentation/virtual/kvm/devices/vfio.txt b/Documentation/virtual/kvm/devices/vfio.txt
new file mode 100644 (file)
index 0000000..ef51740
--- /dev/null
@@ -0,0 +1,22 @@
+VFIO virtual device
+===================
+
+Device types supported:
+  KVM_DEV_TYPE_VFIO
+
+Only one VFIO instance may be created per VM.  The created device
+tracks VFIO groups in use by the VM and features of those groups
+important to the correctness and acceleration of the VM.  As groups
+are enabled and disabled for use by the VM, KVM should be updated
+about their presence.  When registered with KVM, a reference to the
+VFIO-group is held by KVM.
+
+Groups:
+  KVM_DEV_VFIO_GROUP
+
+KVM_DEV_VFIO_GROUP attributes:
+  KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking
+  KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking
+
+For each, kvm_device_attr.addr points to an int32_t file descriptor
+for the VFIO group.
index 41b7ac9884b5ebdeaba602077bebb11efebcc648..ba035c33d01c0d323c156868b3b8d8432a9485c3 100644 (file)
@@ -132,10 +132,14 @@ See the comments in spte_has_volatile_bits() and mmu_spte_update().
 ------------
 
 Name:          kvm_lock
-Type:          raw_spinlock
+Type:          spinlock_t
 Arch:          any
 Protects:      - vm_list
-               - hardware virtualization enable/disable
+
+Name:          kvm_count_lock
+Type:          raw_spinlock_t
+Arch:          any
+Protects:      - hardware virtualization enable/disable
 Comment:       'raw' because hardware enabling/disabling must be atomic /wrt
                migration.
 
index 881582f75c9ceb14e6eaacc48f3edecef31b3e3a..bd4370487b07e2c24c857fc74c5a110905d0593a 100644 (file)
@@ -12,6 +12,8 @@ ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
 ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
 ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
 ... unused hole ...
+ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
+... unused hole ...
 ffffffff80000000 - ffffffffa0000000 (=512 MB)  kernel text mapping, from phys 0
 ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
 ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
index b4f332475b7369c336066ff0035641fd5d0b1c45..7433b84439f3bde2cdb623b7d8a505cb49c8c0b1 100644 (file)
@@ -4719,6 +4719,15 @@ F:       arch/arm/include/uapi/asm/kvm*
 F:     arch/arm/include/asm/kvm*
 F:     arch/arm/kvm/
 
+KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
+M:     Marc Zyngier <marc.zyngier@arm.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     kvmarm@lists.cs.columbia.edu
+S:     Maintained
+F:     arch/arm64/include/uapi/asm/kvm*
+F:     arch/arm64/include/asm/kvm*
+F:     arch/arm64/kvm/
+
 KEXEC
 M:     Eric Biederman <ebiederm@xmission.com>
 W:     http://kernel.org/pub/linux/utils/kernel/kexec/
index 72174189a0bc84ec6b7c6deacae76a96c95edda6..6a56053e1b2a6c9acfa188a6401a4cf3db9ba5de 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 10
-SUBLEVEL = 49
+SUBLEVEL = 61
 EXTRAVERSION =
 NAME = TOSSUG Baby Fish
 
@@ -196,9 +196,16 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
 # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
 ARCH           ?= arm
 ARCH           ?= $(SUBARCH)
+ifeq ($(ARCH),arm64)
+ifneq ($(wildcard ../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9),)
+CROSS_COMPILE  ?= ../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android-
+endif
+endif
+ifeq ($(ARCH),arm)
 ifneq ($(wildcard ../prebuilts/gcc/linux-x86/arm/arm-eabi-4.6),)
 CROSS_COMPILE  ?= ../prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi-
 endif
+endif
 CROSS_COMPILE  ?= $(CONFIG_CROSS_COMPILE:"%"=%)
 
 # Architecture as present in compile.h
@@ -626,6 +633,8 @@ KBUILD_CFLAGS       += -fomit-frame-pointer
 endif
 endif
 
+KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
+
 ifdef CONFIG_DEBUG_INFO
 KBUILD_CFLAGS  += -g
 KBUILD_AFLAGS  += -gdwarf-2
index 5b888487ede11836dbb9850bf2f6a949985210ea..d8503e450957b18b3c9971e3c2634b8841ef5fa1 100644 (file)
@@ -6,6 +6,7 @@ CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_ANDROID_INTF_ALARM_DEV=y
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ARMV7_COMPAT=y
 CONFIG_ASHMEM=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_BLK_DEV_INITRD=y
@@ -24,6 +25,7 @@ CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_INET=y
 CONFIG_INET_ESP=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
 CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_IPTABLES=y
 CONFIG_IP6_NF_MANGLE=y
index 00e3702ec79b2f8affb37248f29a06a5807ecafd..4c0a1d03ae0d82940ac6328ebbd8fba3c32c3d67 100644 (file)
@@ -331,6 +331,7 @@ config HAVE_ARCH_SECCOMP_FILTER
          - secure_computing is called from a ptrace_event()-safe context
          - secure_computing return value is checked and a return value of -1
            results in the system call being skipped immediately.
+         - seccomp syscall wired up
 
 config SECCOMP_FILTER
        def_bool y
index 0c4132dd3507a0b62b3c40c2fc6fd4065a325262..98838a05ba6d89f0459742131010f57c38cbed05 100644 (file)
@@ -89,8 +89,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
        const struct exception_table_entry *fixup;
        int fault, si_code = SEGV_MAPERR;
        siginfo_t info;
-       unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                             (cause > 0 ? FAULT_FLAG_WRITE : 0));
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
           (or is suppressed by the PALcode).  Support that for older CPUs
@@ -115,7 +114,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
        if (address >= TASK_SIZE)
                goto vmalloc_fault;
 #endif
-
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -142,6 +142,7 @@ retry:
        } else {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        }
 
        /* If for any reason at all we couldn't handle the fault,
index 4f31b2eb5cdf680cb6b26dbae5aa0119d39bef33..398064cef746015563166c83e8a1d4db335f9eb9 100644 (file)
@@ -20,7 +20,7 @@
                /* this is for console on PGU */
                /* bootargs = "console=tty0 consoleblank=0"; */
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
+               bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
        };
 
        aliases {
index 4930957ca3d38c4cb312aa60f21ffa9cbb413087..e897610c657a4dfecc458807cc7819fbf6b3ac13 100644 (file)
@@ -19,7 +19,7 @@
  * register API yet */
 #undef DBG_MAX_REG_NUM
 
-#define GDB_MAX_REGS           39
+#define GDB_MAX_REGS           87
 
 #define BREAK_INSTR_SIZE       2
 #define CACHE_FLUSH_IS_SAFE    1
@@ -33,23 +33,27 @@ static inline void arch_kgdb_breakpoint(void)
 
 extern void kgdb_trap(struct pt_regs *regs, int param);
 
-enum arc700_linux_regnums {
+/* This is the numbering of registers according to the GDB. See GDB's
+ * arc-tdep.h for details.
+ *
+ * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
+enum arc_linux_regnums {
        _R0             = 0,
        _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
        _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
        _R25, _R26,
-       _BTA            = 27,
-       _LP_START       = 28,
-       _LP_END         = 29,
-       _LP_COUNT       = 30,
-       _STATUS32       = 31,
-       _BLINK          = 32,
-       _FP             = 33,
-       __SP            = 34,
-       _EFA            = 35,
-       _RET            = 36,
-       _ORIG_R8        = 37,
-       _STOP_PC        = 38
+       _FP             = 27,
+       __SP            = 28,
+       _R30            = 30,
+       _BLINK          = 31,
+       _LP_COUNT       = 60,
+       _STOP_PC        = 64,
+       _RET            = 64,
+       _LP_START       = 65,
+       _LP_END         = 66,
+       _STATUS32       = 67,
+       _ECR            = 76,
+       _BTA            = 82,
 };
 
 #else
index 30333cec0fef274365aeb559084d58a5ee023a9b..ef9d79a3db2550e4041b61f9dab4c3188952d202 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _UAPI__ASM_ARC_PTRACE_H
 #define _UAPI__ASM_ARC_PTRACE_H
 
+#define PTRACE_GET_THREAD_AREA 25
 
 #ifndef __ASSEMBLY__
 /*
index 0851604bb9cd8929f4d98465d4ca8a646a255ec3..f8a36ed9e0d57807af574fea6d1a5c0230c5b547 100644 (file)
@@ -136,6 +136,10 @@ long arch_ptrace(struct task_struct *child, long request,
        pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
 
        switch (request) {
+       case PTRACE_GET_THREAD_AREA:
+               ret = put_user(task_thread_info(child)->thr_ptr,
+                              (unsigned long __user *)data);
+               break;
        default:
                ret = ptrace_request(child, request, addr, data);
                break;
index 331a0846628e05ffc1a8b37b0b6540dcd603809c..50533b750a99d88dfd7e0fa1d89deb87717f9095 100644 (file)
@@ -59,8 +59,7 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
        struct mm_struct *mm = tsk->mm;
        siginfo_t info;
        int fault, ret;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                               (write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        /*
         * We fault-in kernel-space virtual memory on-demand. The
@@ -88,6 +87,8 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -115,12 +116,12 @@ good_area:
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
        }
 
-survive:
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
@@ -200,14 +201,12 @@ no_context:
        die("Oops", regs, address, cause_code);
 
 out_of_memory:
-       if (is_global_init(tsk)) {
-               yield();
-               goto survive;
-       }
        up_read(&mm->mmap_sem);
 
-       if (user_mode(regs))
-               do_group_exit(SIGKILL); /* This will never return */
+       if (user_mode(regs)) {
+               pagefault_out_of_memory();
+               return;
+       }
 
        goto no_context;
 
index f5c385a93edebb06bc0d5c89c61a95f2cdb86b52..0569212edae566e2d521527bddfa1c8b6774c56d 100644 (file)
@@ -4,6 +4,7 @@ config ARM
        select ARCH_BINFMT_ELF_RANDOMIZE_PIE
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
        select ARCH_HAVE_CUSTOM_GPIO_H
+       select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT if MMU
@@ -1877,6 +1878,14 @@ config HW_PERF_EVENTS
          Enable hardware performance counter support for perf events. If
          disabled, perf events will use software events only.
 
+config SYS_SUPPORTS_HUGETLBFS
+       def_bool y
+       depends on ARM_LPAE
+
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       def_bool y
+       depends on ARM_LPAE
+
 source "mm/Kconfig"
 
 config FORCE_MAX_ZONEORDER
index 9ce8ba1a1433273a3e362b735d5631e0a13318b2..adb9aa5c88c7a09f305e234d9a49aa1fd1cf31a0 100644 (file)
@@ -48,6 +48,8 @@ CONFIG_SERIAL_SIRFSOC=y
 CONFIG_SERIAL_SIRFSOC_CONSOLE=y
 CONFIG_SERIAL_VT8500=y
 CONFIG_SERIAL_VT8500_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_IPMI_HANDLER=y
 CONFIG_IPMI_SI=y
 CONFIG_I2C=y
index e780afbcee545152a98bab26aeba053e2128e1a4..f0963bb79935f6930af67ddce5b3d620243bfcad 100644 (file)
@@ -30,8 +30,8 @@
  * Endian independent macros for shifting bytes within registers.
  */
 #ifndef __ARMEB__
-#define pull            lsr
-#define push            lsl
+#define lspull          lsr
+#define lspush          lsl
 #define get_byte_0      lsl #0
 #define get_byte_1     lsr #8
 #define get_byte_2     lsr #16
@@ -41,8 +41,8 @@
 #define put_byte_2     lsl #16
 #define put_byte_3     lsl #24
 #else
-#define pull            lsl
-#define push            lsr
+#define lspull          lsl
+#define lspush          lsr
 #define get_byte_0     lsr #24
 #define get_byte_1     lsr #16
 #define get_byte_2     lsr #8
 #ifdef CONFIG_SMP
 #if __LINUX_ARM_ARCH__ >= 7
        .ifeqs "\mode","arm"
-       ALT_SMP(dmb)
+       ALT_SMP(dmb     ish)
        .else
-       ALT_SMP(W(dmb))
+       ALT_SMP(W(dmb)  ish)
        .endif
 #elif __LINUX_ARM_ARCH__ == 6
        ALT_SMP(mcr     p15, 0, r0, c7, c10, 5) @ dmb
index 8dcd9c702d90c9c352d85595d0ea8f83a42f215e..2f59f74433964016003167007badbeffcbba465a 100644 (file)
 #endif
 
 #if __LINUX_ARM_ARCH__ >= 7
-#define isb() __asm__ __volatile__ ("isb" : : : "memory")
-#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
-#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
+#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
+#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
 #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
-#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
                                    : : "r" (0) : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
                                    : : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
                                    : : "r" (0) : "memory")
 #elif defined(CONFIG_CPU_FA526)
-#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
                                    : : "r" (0) : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
                                    : : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #else
-#define isb() __asm__ __volatile__ ("" : : : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define isb(x) __asm__ __volatile__ ("" : : : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
                                    : : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #endif
 
 #ifdef CONFIG_ARCH_HAS_BARRIERS
@@ -42,7 +42,7 @@
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
 #define mb()           do { dsb(); outer_sync(); } while (0)
 #define rmb()          dsb()
-#define wmb()          mb()
+#define wmb()          do { dsb(st); outer_sync(); } while (0)
 #else
 #define mb()           barrier()
 #define rmb()          barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
 #else
-#define smp_mb()       dmb()
-#define smp_rmb()      dmb()
-#define smp_wmb()      dmb()
+#define smp_mb()       dmb(ish)
+#define smp_rmb()      smp_mb()
+#define smp_wmb()      dmb(ishst)
 #endif
 
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       smp_mb();                                                       \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       smp_mb();                                                       \
+       ___p1;                                                          \
+})
+
 #define read_barrier_depends()         do { } while(0)
 #define smp_read_barrier_depends()     do { } while(0)
 
index 3b74a681a0086c5d48ab218d162b956e79ed0309..3392fe2d317493a0d92d43af34cd66abcc915f62 100644 (file)
 #define ARM_CPU_IMP_ARM                        0x41
 #define ARM_CPU_IMP_INTEL              0x69
 
-#define ARM_CPU_PART_ARM1136           0xB360
-#define ARM_CPU_PART_ARM1156           0xB560
-#define ARM_CPU_PART_ARM1176           0xB760
-#define ARM_CPU_PART_ARM11MPCORE       0xB020
-#define ARM_CPU_PART_CORTEX_A8         0xC080
-#define ARM_CPU_PART_CORTEX_A9         0xC090
-#define ARM_CPU_PART_CORTEX_A5         0xC050
-#define ARM_CPU_PART_CORTEX_A15                0xC0F0
-#define ARM_CPU_PART_CORTEX_A7         0xC070
-#define ARM_CPU_PART_CORTEX_A12                0xC0D0
+/* ARM implemented processors */
+#define ARM_CPU_PART_ARM1136           0x4100b360
+#define ARM_CPU_PART_ARM1156           0x4100b560
+#define ARM_CPU_PART_ARM1176           0x4100b760
+#define ARM_CPU_PART_ARM11MPCORE       0x4100b020
+#define ARM_CPU_PART_CORTEX_A8         0x4100c080
+#define ARM_CPU_PART_CORTEX_A9         0x4100c090
+#define ARM_CPU_PART_CORTEX_A5         0x4100c050
+#define ARM_CPU_PART_CORTEX_A7         0x4100c070
+#define ARM_CPU_PART_CORTEX_A12                0x4100c0d0
+#define ARM_CPU_PART_CORTEX_A17                0x4100c0e0
+#define ARM_CPU_PART_CORTEX_A15                0x4100c0f0
 
 #define ARM_CPU_XSCALE_ARCH_MASK       0xe000
 #define ARM_CPU_XSCALE_ARCH_V1         0x2000
@@ -123,14 +125,24 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
        return (read_cpuid_id() & 0xFF000000) >> 24;
 }
 
-static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
+/*
+ * The CPU part number is meaningless without referring to the CPU
+ * implementer: implementers are free to define their own part numbers
+ * which are permitted to clash with other implementer part numbers.
+ */
+static inline unsigned int __attribute_const__ read_cpuid_part(void)
+{
+       return read_cpuid_id() & 0xff00fff0;
+}
+
+static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
 {
        return read_cpuid_id() & 0xFFF0;
 }
 
 static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
 {
-       return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK;
+       return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
 }
 
 static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h
new file mode 100644 (file)
index 0000000..d4014fb
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/include/asm/hugetlb-3level.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
+#define _ASM_ARM_HUGETLB_3LEVEL_H
+
+
+/*
+ * If our huge pte is non-zero then mark the valid bit.
+ * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
+ * ptes.
+ * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
+ */
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       pte_t retval = *ptep;
+       if (pte_val(retval))
+               pte_val(retval) |= L_PTE_VALID;
+       return retval;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                                  pte_t *ptep, pte_t pte)
+{
+       set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep)
+{
+       ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                                           unsigned long addr, pte_t *ptep)
+{
+       return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
new file mode 100644 (file)
index 0000000..1f1b1cd
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * arch/arm/include/asm/hugetlb.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_H
+#define _ASM_ARM_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
+
+#include <asm/hugetlb-3level.h>
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+                                         unsigned long addr, unsigned long end,
+                                         unsigned long floor,
+                                         unsigned long ceiling)
+{
+       free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+                                        unsigned long addr, unsigned long len)
+{
+       return 0;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+                                        unsigned long addr, unsigned long len)
+{
+       struct hstate *h = hstate_file(file);
+       if (len & ~huge_page_mask(h))
+               return -EINVAL;
+       if (addr & ~huge_page_mask(h))
+               return -EINVAL;
+       return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+       clear_bit(PG_dcache_clean, &page->flags);
+}
+
+#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/kvm_arch_timer.h b/arch/arm/include/asm/kvm_arch_timer.h
deleted file mode 100644 (file)
index 68cb9e1..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ASM_ARM_KVM_ARCH_TIMER_H
-#define __ASM_ARM_KVM_ARCH_TIMER_H
-
-#include <linux/clocksource.h>
-#include <linux/hrtimer.h>
-#include <linux/workqueue.h>
-
-struct arch_timer_kvm {
-#ifdef CONFIG_KVM_ARM_TIMER
-       /* Is the timer enabled */
-       bool                    enabled;
-
-       /* Virtual offset */
-       cycle_t                 cntvoff;
-#endif
-};
-
-struct arch_timer_cpu {
-#ifdef CONFIG_KVM_ARM_TIMER
-       /* Registers: control register, timer value */
-       u32                             cntv_ctl;       /* Saved/restored */
-       cycle_t                         cntv_cval;      /* Saved/restored */
-
-       /*
-        * Anything that is not used directly from assembly code goes
-        * here.
-        */
-
-       /* Background timer used when the guest is not running */
-       struct hrtimer                  timer;
-
-       /* Work queued with the above timer expires */
-       struct work_struct              expired;
-
-       /* Background timer active */
-       bool                            armed;
-
-       /* Timer IRQ */
-       const struct kvm_irq_level      *irq;
-#endif
-};
-
-#ifdef CONFIG_KVM_ARM_TIMER
-int kvm_timer_hyp_init(void);
-int kvm_timer_init(struct kvm *kvm);
-void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
-void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
-void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
-#else
-static inline int kvm_timer_hyp_init(void)
-{
-       return 0;
-};
-
-static inline int kvm_timer_init(struct kvm *kvm)
-{
-       return 0;
-}
-
-static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
-static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
-static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
-static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
-#endif
-
-#endif
index 124623e5ef14ab2bde6b6ab954901fa4d4b8c3a7..816db0bf2dd8addbd9844488b5a72d4495be72c7 100644 (file)
  * The bits we set in HCR:
  * TAC:                Trap ACTLR
  * TSC:                Trap SMC
+ * TVM:                Trap VM ops (until MMU and caches are on)
  * TSW:                Trap cache operations by set/way
  * TWI:                Trap WFI
+ * TWE:                Trap WFE
  * TIDCP:      Trap L2CTLR/L2ECTLR
  * BSU_IS:     Upgrade barriers to the inner shareable domain
  * FB:         Force broadcast of all maintainance operations
@@ -67,8 +69,7 @@
  */
 #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
                        HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
-                       HCR_SWIO | HCR_TIDCP)
-#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+                       HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
 
 /* System Control Register (SCTLR) bits */
 #define SCTLR_TE       (1 << 30)
 #define TTBCR_IRGN1    (3 << 24)
 #define TTBCR_EPD1     (1 << 23)
 #define TTBCR_A1       (1 << 22)
-#define TTBCR_T1SZ     (3 << 16)
+#define TTBCR_T1SZ     (7 << 16)
 #define TTBCR_SH0      (3 << 12)
 #define TTBCR_ORGN0    (3 << 10)
 #define TTBCR_IRGN0    (3 << 8)
 #define TTBCR_EPD0     (1 << 7)
-#define TTBCR_T0SZ     3
+#define TTBCR_T0SZ     (7 << 0)
 #define HTCR_MASK      (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
 
 /* Hyp System Trap Register */
 #define KVM_PHYS_MASK  (KVM_PHYS_SIZE - 1ULL)
 #define PTRS_PER_S2_PGD        (1ULL << (KVM_PHYS_SHIFT - 30))
 #define S2_PGD_ORDER   get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
-#define S2_PGD_SIZE    (1 << S2_PGD_ORDER)
 
 /* Virtualization Translation Control Register (VTCR) bits */
 #define VTCR_SH0       (3 << 12)
 #define HSR_EC_DABT    (0x24)
 #define HSR_EC_DABT_HYP        (0x25)
 
+#define HSR_WFI_IS_WFE         (1U << 0)
+
 #define HSR_HVC_IMM_MASK       ((1UL << 16) - 1)
 
 #define HSR_DABT_S1PTW         (1U << 7)
index 4bb08e3e52bc0160d5951646aa356b59e88d12d6..3a67bec72d0cddd61d145a11afbddad5b9412a05 100644 (file)
@@ -39,7 +39,7 @@
 #define c6_IFAR                17      /* Instruction Fault Address Register */
 #define c7_PAR         18      /* Physical Address Register */
 #define c7_PAR_high    19      /* PAR top 32 bits */
-#define c9_L2CTLR      20      /* Cortex A15 L2 Control Register */
+#define c9_L2CTLR      20      /* Cortex A15/A7 L2 Control Register */
 #define c10_PRRR       21      /* Primary Region Remap Register */
 #define c10_NMRR       22      /* Normal Memory Remap Register */
 #define c12_VBAR       23      /* Vector Base Address Register */
@@ -48,7 +48,9 @@
 #define c13_TID_URO    26      /* Thread ID, User R/O */
 #define c13_TID_PRIV   27      /* Thread ID, Privileged */
 #define c14_CNTKCTL    28      /* Timer Control Register (PL1) */
-#define NR_CP15_REGS   29      /* Number of regs (incl. invalid) */
+#define c10_AMAIR0     29      /* Auxilary Memory Attribute Indirection Reg0 */
+#define c10_AMAIR1     30      /* Auxilary Memory Attribute Indirection Reg1 */
+#define NR_CP15_REGS   31      /* Number of regs (incl. invalid) */
 
 #define ARM_EXCEPTION_RESET      0
 #define ARM_EXCEPTION_UNDEFINED   1
 #define ARM_EXCEPTION_FIQ        6
 #define ARM_EXCEPTION_HVC        7
 
+/*
+ * The rr_lo_hi macro swaps a pair of registers depending on
+ * current endianness. It is used in conjunction with ldrd and strd
+ * instructions that load/store a 64-bit value from/to memory to/from
+ * a pair of registers which are used with the mrrc and mcrr instructions.
+ * If used with the ldrd/strd instructions, the a1 parameter is the first
+ * source/destination register and the a2 parameter is the second
+ * source/destination register. Note that the ldrd/strd instructions
+ * already swap the bytes within the words correctly according to the
+ * endianness setting, but the order of the registers need to be effectively
+ * swapped when used with the mrrc/mcrr instructions.
+ */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define rr_lo_hi(a1, a2) a2, a1
+#else
+#define rr_lo_hi(a1, a2) a1, a2
+#endif
+
 #ifndef __ASSEMBLY__
 struct kvm;
 struct kvm_vcpu;
@@ -74,8 +94,6 @@ extern char __kvm_hyp_vector[];
 extern char __kvm_hyp_code_start[];
 extern char __kvm_hyp_code_end[];
 
-extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
-
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 
index 82b4babead2c8b7bff97a2c9b0e3f3fc37ba63f7..b9db269c6e6155bbd1e60f2d7f8bad077b503913 100644 (file)
@@ -65,11 +65,6 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
        return cpsr_mode > USR_MODE;;
 }
 
-static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
-{
-       return reg == 15;
-}
-
 static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.fault.hsr;
@@ -153,6 +148,11 @@ static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
 }
 
 static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
 {
        return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
 }
@@ -162,4 +162,69 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
        return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
 }
 
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.cp15[c0_MPIDR];
+}
+
+static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+{
+       *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+}
+
+static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+{
+       return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
+}
+
+static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
+                                                   unsigned long data,
+                                                   unsigned int len)
+{
+       if (kvm_vcpu_is_be(vcpu)) {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return be16_to_cpu(data & 0xffff);
+               default:
+                       return be32_to_cpu(data);
+               }
+       } else {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return le16_to_cpu(data & 0xffff);
+               default:
+                       return le32_to_cpu(data);
+               }
+       }
+}
+
+static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+                                                   unsigned long data,
+                                                   unsigned int len)
+{
+       if (kvm_vcpu_is_be(vcpu)) {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return cpu_to_be16(data & 0xffff);
+               default:
+                       return cpu_to_be32(data);
+               }
+       } else {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return cpu_to_le16(data & 0xffff);
+               default:
+                       return cpu_to_le32(data);
+               }
+       }
+}
+
 #endif /* __ARM_KVM_EMULATE_H__ */
index 57cb786a6203de0d6444d1f265344a59abc0de09..46e5d4da1989c2b52dfa229a68f287700e989277 100644 (file)
 #ifndef __ARM_KVM_HOST_H__
 #define __ARM_KVM_HOST_H__
 
+#include <linux/types.h>
+#include <linux/kvm_types.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
 #include <asm/fpstate.h>
-#include <asm/kvm_arch_timer.h>
+#include <kvm/arm_arch_timer.h>
 
+#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
 #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
+#else
+#define KVM_MAX_VCPUS 0
+#endif
+
 #define KVM_USER_MEM_SLOTS 32
 #define KVM_PRIVATE_MEM_SLOTS 4
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 #define KVM_HAVE_ONE_REG
 
-#define KVM_VCPU_MAX_FEATURES 1
-
-/* We don't currently support large pages. */
-#define KVM_HPAGE_GFN_SHIFT(x) 0
-#define KVM_NR_PAGE_SIZES      1
-#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
+#define KVM_VCPU_MAX_FEATURES 2
 
-#include <asm/kvm_vgic.h>
+#include <kvm/arm_vgic.h>
 
-struct kvm_vcpu;
 u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
-int kvm_target_cpu(void);
+int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
 
@@ -101,6 +102,12 @@ struct kvm_vcpu_arch {
        /* The CPU type we expose to the VM */
        u32 midr;
 
+       /* HYP trapping configuration */
+       u32 hcr;
+
+       /* Interrupt related fields */
+       u32 irq_lines;          /* IRQ and FIQ levels */
+
        /* Exception Information */
        struct kvm_vcpu_fault_info fault;
 
@@ -128,9 +135,6 @@ struct kvm_vcpu_arch {
        /* IO related fields */
        struct kvm_decode mmio_decode;
 
-       /* Interrupt related fields */
-       u32 irq_lines;          /* IRQ and FIQ levels */
-
        /* Cache some mmu pages needed inside spinlock regions */
        struct kvm_mmu_memory_cache mmu_page_cache;
 
@@ -146,19 +150,17 @@ struct kvm_vcpu_stat {
        u32 halt_wakeup;
 };
 
-struct kvm_vcpu_init;
 int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
                        const struct kvm_vcpu_init *init);
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-struct kvm_one_reg;
 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 u64 kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-struct kvm;
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
@@ -183,15 +185,14 @@ struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
 
 int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
 unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
-struct kvm_one_reg;
 int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
 int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
 
 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
                int exception_index);
 
-static inline void __cpu_init_hyp_mode(unsigned long long boot_pgd_ptr,
-                                      unsigned long long pgd_ptr,
+static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
+                                      phys_addr_t pgd_ptr,
                                       unsigned long hyp_stack_ptr,
                                       unsigned long vector_ptr)
 {
@@ -221,7 +222,18 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext)
        return 0;
 }
 
+static inline void vgic_arch_setup(const struct vgic_params *vgic)
+{
+       BUG_ON(vgic->type != VGIC_V2);
+}
+
 int kvm_perf_init(void);
 int kvm_perf_teardown(void);
 
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+
 #endif /* __ARM_KVM_HOST_H__ */
index 472ac7091003ac0cbae073ec1794413d80a04262..3f688b458143503f3f369a2c89d3f2eabd3b0934 100644 (file)
@@ -62,9 +62,15 @@ phys_addr_t kvm_get_idmap_vector(void);
 int kvm_mmu_init(void);
 void kvm_clear_hyp_idmap(void);
 
+static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
+{
+       *pmd = new_pmd;
+       flush_pmd_entry(pmd);
+}
+
 static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
 {
-       pte_val(*pte) = new_pte;
+       *pte = new_pte;
        /*
         * flush_pmd_entry just takes a void pointer and cleans the necessary
         * cache entries, so we can reuse the function for ptes.
@@ -72,17 +78,6 @@ static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
        flush_pmd_entry(pte);
 }
 
-static inline bool kvm_is_write_fault(unsigned long hsr)
-{
-       unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
-       if (hsr_ec == HSR_EC_IABT)
-               return false;
-       else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
-               return false;
-       else
-               return true;
-}
-
 static inline void kvm_clean_pgd(pgd_t *pgd)
 {
        clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
@@ -103,10 +98,51 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
        pte_val(*pte) |= L_PTE_S2_RDWR;
 }
 
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+       pmd_val(*pmd) |= L_PMD_S2_RDWR;
+}
+
+/* Open coded p*d_addr_end that can deal with 64bit addresses */
+#define kvm_pgd_addr_end(addr, end)                                    \
+({     u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;            \
+       (__boundary - 1 < (end) - 1)? __boundary: (end);                \
+})
+
+#define kvm_pud_addr_end(addr,end)             (end)
+
+#define kvm_pmd_addr_end(addr, end)                                    \
+({     u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK;                \
+       (__boundary - 1 < (end) - 1)? __boundary: (end);                \
+})
+
+static inline bool kvm_page_empty(void *ptr)
+{
+       struct page *ptr_page = virt_to_page(ptr);
+       return page_count(ptr_page) == 1;
+}
+
+
+#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
+#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
+#define kvm_pud_table_empty(pudp) (0)
+
+
 struct kvm;
 
-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+#define kvm_flush_dcache_to_poc(a,l)   __cpuc_flush_dcache_area((a), (l))
+
+static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+{
+       return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
+}
+
+static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
+                                            unsigned long size)
 {
+       if (!vcpu_has_cache_enabled(vcpu))
+               kvm_flush_dcache_to_poc((void *)hva, size);
+       
        /*
         * If we are going to insert an instruction page and the icache is
         * either VIPT or PIPT, there is a potential problem where the host
@@ -120,15 +156,16 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
         * need any kind of flushing (DDI 0406C.b - Page B3-1392).
         */
        if (icache_is_pipt()) {
-               unsigned long hva = gfn_to_hva(kvm, gfn);
-               __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
+               __cpuc_coherent_user_range(hva, hva + size);
        } else if (!icache_is_vivt_asid_tagged()) {
                /* any kind of VIPT cache */
                __flush_icache_all();
        }
 }
 
-#define kvm_flush_dcache_to_poc(a,l)   __cpuc_flush_dcache_area((a), (l))
+#define kvm_virt_to_phys(x)            virt_to_idmap((unsigned long)(x))
+
+void stage2_flush_vm(struct kvm *kvm);
 
 #endif /* !__ASSEMBLY__ */
 
index 9a83d98bf170c2c158c050ff275337c6a59537cc..6bda945d31fa8effe1b9d51c589733c5f9f66183 100644 (file)
 #ifndef __ARM_KVM_PSCI_H__
 #define __ARM_KVM_PSCI_H__
 
-bool kvm_psci_call(struct kvm_vcpu *vcpu);
+#define KVM_ARM_PSCI_0_1       1
+#define KVM_ARM_PSCI_0_2       2
+
+int kvm_psci_version(struct kvm_vcpu *vcpu);
+int kvm_psci_call(struct kvm_vcpu *vcpu);
 
 #endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
deleted file mode 100644 (file)
index 343744e..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ASM_ARM_KVM_VGIC_H
-#define __ASM_ARM_KVM_VGIC_H
-
-#include <linux/kernel.h>
-#include <linux/kvm.h>
-#include <linux/irqreturn.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/irqchip/arm-gic.h>
-
-#define VGIC_NR_IRQS           128
-#define VGIC_NR_SGIS           16
-#define VGIC_NR_PPIS           16
-#define VGIC_NR_PRIVATE_IRQS   (VGIC_NR_SGIS + VGIC_NR_PPIS)
-#define VGIC_NR_SHARED_IRQS    (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
-#define VGIC_MAX_CPUS          KVM_MAX_VCPUS
-#define VGIC_MAX_LRS           (1 << 6)
-
-/* Sanity checks... */
-#if (VGIC_MAX_CPUS > 8)
-#error Invalid number of CPU interfaces
-#endif
-
-#if (VGIC_NR_IRQS & 31)
-#error "VGIC_NR_IRQS must be a multiple of 32"
-#endif
-
-#if (VGIC_NR_IRQS > 1024)
-#error "VGIC_NR_IRQS must be <= 1024"
-#endif
-
-/*
- * The GIC distributor registers describing interrupts have two parts:
- * - 32 per-CPU interrupts (SGI + PPI)
- * - a bunch of shared interrupts (SPI)
- */
-struct vgic_bitmap {
-       union {
-               u32 reg[VGIC_NR_PRIVATE_IRQS / 32];
-               DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS);
-       } percpu[VGIC_MAX_CPUS];
-       union {
-               u32 reg[VGIC_NR_SHARED_IRQS / 32];
-               DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS);
-       } shared;
-};
-
-struct vgic_bytemap {
-       u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4];
-       u32 shared[VGIC_NR_SHARED_IRQS  / 4];
-};
-
-struct vgic_dist {
-#ifdef CONFIG_KVM_ARM_VGIC
-       spinlock_t              lock;
-       bool                    ready;
-
-       /* Virtual control interface mapping */
-       void __iomem            *vctrl_base;
-
-       /* Distributor and vcpu interface mapping in the guest */
-       phys_addr_t             vgic_dist_base;
-       phys_addr_t             vgic_cpu_base;
-
-       /* Distributor enabled */
-       u32                     enabled;
-
-       /* Interrupt enabled (one bit per IRQ) */
-       struct vgic_bitmap      irq_enabled;
-
-       /* Interrupt 'pin' level */
-       struct vgic_bitmap      irq_state;
-
-       /* Level-triggered interrupt in progress */
-       struct vgic_bitmap      irq_active;
-
-       /* Interrupt priority. Not used yet. */
-       struct vgic_bytemap     irq_priority;
-
-       /* Level/edge triggered */
-       struct vgic_bitmap      irq_cfg;
-
-       /* Source CPU per SGI and target CPU */
-       u8                      irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS];
-
-       /* Target CPU for each IRQ */
-       u8                      irq_spi_cpu[VGIC_NR_SHARED_IRQS];
-       struct vgic_bitmap      irq_spi_target[VGIC_MAX_CPUS];
-
-       /* Bitmap indicating which CPU has something pending */
-       unsigned long           irq_pending_on_cpu;
-#endif
-};
-
-struct vgic_cpu {
-#ifdef CONFIG_KVM_ARM_VGIC
-       /* per IRQ to LR mapping */
-       u8              vgic_irq_lr_map[VGIC_NR_IRQS];
-
-       /* Pending interrupts on this VCPU */
-       DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
-       DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS);
-
-       /* Bitmap of used/free list registers */
-       DECLARE_BITMAP( lr_used, VGIC_MAX_LRS);
-
-       /* Number of list registers on this CPU */
-       int             nr_lr;
-
-       /* CPU vif control registers for world switch */
-       u32             vgic_hcr;
-       u32             vgic_vmcr;
-       u32             vgic_misr;      /* Saved only */
-       u32             vgic_eisr[2];   /* Saved only */
-       u32             vgic_elrsr[2];  /* Saved only */
-       u32             vgic_apr;
-       u32             vgic_lr[VGIC_MAX_LRS];
-#endif
-};
-
-#define LR_EMPTY       0xff
-
-struct kvm;
-struct kvm_vcpu;
-struct kvm_run;
-struct kvm_exit_mmio;
-
-#ifdef CONFIG_KVM_ARM_VGIC
-int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr);
-int kvm_vgic_hyp_init(void);
-int kvm_vgic_init(struct kvm *kvm);
-int kvm_vgic_create(struct kvm *kvm);
-int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
-void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
-                       bool level);
-int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
-bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
-                     struct kvm_exit_mmio *mmio);
-
-#define irqchip_in_kernel(k)   (!!((k)->arch.vgic.vctrl_base))
-#define vgic_initialized(k)    ((k)->arch.vgic.ready)
-
-#else
-static inline int kvm_vgic_hyp_init(void)
-{
-       return 0;
-}
-
-static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
-{
-       return 0;
-}
-
-static inline int kvm_vgic_init(struct kvm *kvm)
-{
-       return 0;
-}
-
-static inline int kvm_vgic_create(struct kvm *kvm)
-{
-       return 0;
-}
-
-static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
-static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
-static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
-
-static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
-                                     unsigned int irq_num, bool level)
-{
-       return 0;
-}
-
-static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
-static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
-                                   struct kvm_exit_mmio *mmio)
-{
-       return false;
-}
-
-static inline int irqchip_in_kernel(struct kvm *kvm)
-{
-       return 0;
-}
-
-static inline bool vgic_initialized(struct kvm *kvm)
-{
-       return true;
-}
-#endif
-
-#endif
index 57870ab313c52cd103b327363b0191e76efad9f6..21b458e6b0b8690bdd54fca1234b5173f0f4af98 100644 (file)
  */
 #define __PV_BITS_31_24        0x81000000
 
+extern phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
 extern unsigned long __pv_phys_offset;
 #define PHYS_OFFSET __pv_phys_offset
 
@@ -232,6 +233,21 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define __va(x)                        ((void *)__phys_to_virt((unsigned long)(x)))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 
+/*
+ * These are for systems that have a hardware interconnect supported alias of
+ * physical memory for idmap purposes.  Most cases should leave these
+ * untouched.
+ */
+static inline phys_addr_t __virt_to_idmap(unsigned long x)
+{
+       if (arch_virt_to_idmap)
+               return arch_virt_to_idmap(x);
+       else
+               return __virt_to_phys(x);
+}
+
+#define virt_to_idmap(x)       __virt_to_idmap((unsigned long)(x))
+
 /*
  * Virtual <-> DMA view memory address translations
  * Again, these are *only* valid on the kernel direct mapped RAM
index 18f5cef82ad58988e1c8d9b77ab95eb995330bbf..f088c864c9926723f615feb125ddb3b207e50c21 100644 (file)
@@ -30,6 +30,7 @@
 #define PMD_TYPE_FAULT         (_AT(pmdval_t, 0) << 0)
 #define PMD_TYPE_TABLE         (_AT(pmdval_t, 3) << 0)
 #define PMD_TYPE_SECT          (_AT(pmdval_t, 1) << 0)
+#define PMD_TABLE_BIT          (_AT(pmdval_t, 1) << 1)
 #define PMD_BIT4               (_AT(pmdval_t, 0))
 #define PMD_DOMAIN(x)          (_AT(pmdval_t, 0))
 #define PMD_APTABLE_SHIFT      (61)
@@ -41,6 +42,8 @@
  */
 #define PMD_SECT_BUFFERABLE    (_AT(pmdval_t, 1) << 2)
 #define PMD_SECT_CACHEABLE     (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_USER          (_AT(pmdval_t, 1) << 6)         /* AP[1] */
+#define PMD_SECT_RDONLY                (_AT(pmdval_t, 1) << 7)         /* AP[2] */
 #define PMD_SECT_S             (_AT(pmdval_t, 3) << 8)
 #define PMD_SECT_AF            (_AT(pmdval_t, 1) << 10)
 #define PMD_SECT_nG            (_AT(pmdval_t, 1) << 11)
@@ -66,6 +69,7 @@
 #define PTE_TYPE_MASK          (_AT(pteval_t, 3) << 0)
 #define PTE_TYPE_FAULT         (_AT(pteval_t, 0) << 0)
 #define PTE_TYPE_PAGE          (_AT(pteval_t, 3) << 0)
+#define PTE_TABLE_BIT          (_AT(pteval_t, 1) << 1)
 #define PTE_BUFFERABLE         (_AT(pteval_t, 1) << 2)         /* AttrIndx[0] */
 #define PTE_CACHEABLE          (_AT(pteval_t, 1) << 3)         /* AttrIndx[1] */
 #define PTE_EXT_SHARED         (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
index 86b8fe398b9514d89a9032658f6bb3ad221b069e..c5d94b31bde43757adf163cb346ab0610989a894 100644 (file)
 
 #define USER_PTRS_PER_PGD      (PAGE_OFFSET / PGDIR_SIZE)
 
+/*
+ * Hugetlb definitions.
+ */
+#define HPAGE_SHIFT            PMD_SHIFT
+#define HPAGE_SIZE             (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK             (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
+
 /*
  * "Linux" PTE definitions for LPAE.
  *
 #define L_PTE_SPECIAL          (_AT(pteval_t, 1) << 56)        /* unused */
 #define L_PTE_NONE             (_AT(pteval_t, 1) << 57)        /* PROT_NONE */
 
+#define PMD_SECT_VALID         (_AT(pmdval_t, 1) << 0)
+#define PMD_SECT_DIRTY         (_AT(pmdval_t, 1) << 55)
+#define PMD_SECT_SPLITTING     (_AT(pmdval_t, 1) << 56)
+#define PMD_SECT_NONE          (_AT(pmdval_t, 1) << 57)
+
 /*
  * To be used in assembly code with the upper page attributes.
  */
 #define L_PTE_S2_RDONLY                 (_AT(pteval_t, 1) << 6)   /* HAP[1]   */
 #define L_PTE_S2_RDWR           (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
 
+#define L_PMD_S2_RDWR           (_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
+
 /*
  * Hyp-mode PL2 PTE definitions for LPAE.
  */
@@ -166,8 +181,83 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
                clean_pmd_entry(pmdp);  \
        } while (0)
 
+/*
+ * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
+ * that are written to a page table but not for ptes created with mk_pte.
+ *
+ * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
+ * hugetlb_cow, where it is compared with an entry in a page table.
+ * This comparison test fails erroneously leading ultimately to a memory leak.
+ *
+ * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
+ * present before running the comparison.
+ */
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(pte_a,pte_b)  ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG     \
+                                       : pte_val(pte_a))                               \
+                               == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG   \
+                                       : pte_val(pte_b)))
+
 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
 
+#define pte_huge(pte)          (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
+#define pte_mkhuge(pte)                (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+
+#define pmd_young(pmd)         (pmd_val(pmd) & PMD_SECT_AF)
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd)         (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_trans_huge(pmd)    (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
+#endif
+
+#define PMD_BIT_FUNC(fn,op) \
+static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+PMD_BIT_FUNC(wrprotect,        |= PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkold,    &= ~PMD_SECT_AF);
+PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+PMD_BIT_FUNC(mkwrite,   &= ~PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkdirty,   |= PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
+
+#define pmd_mkhuge(pmd)                (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+#define pmd_pfn(pmd)           (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot)      (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define mk_pmd(page,prot)      pfn_pmd(page_to_pfn(page),prot)
+
+/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+#define pmd_mknotpresent(pmd)  (__pmd(0))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+       const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
+                               PMD_SECT_VALID | PMD_SECT_NONE;
+       pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+       return pmd;
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+                             pmd_t *pmdp, pmd_t pmd)
+{
+       BUG_ON(addr >= TASK_SIZE);
+
+       /* create a faulting entry if PROT_NONE protected */
+       if (pmd_val(pmd) & PMD_SECT_NONE)
+               pmd_val(pmd) &= ~PMD_SECT_VALID;
+
+       *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
+       flush_pmd_entry(pmdp);
+}
+
+static inline int has_transparent_hugepage(void)
+{
+       return 1;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_PGTABLE_3LEVEL_H */
index 5aac06fcc97e43e80ad8ac568a04f41f3198a5a6..b1b7a49074891a6caf4932a85b0f18e248149472 100644 (file)
@@ -24,6 +24,9 @@
 #include <asm/memory.h>
 #include <asm/pgtable-hwdef.h>
 
+
+#include <asm/tlbflush.h>
+
 #ifdef CONFIG_ARM_LPAE
 #include <asm/pgtable-3level.h>
 #else
@@ -97,7 +100,7 @@ extern pgprot_t              pgprot_s2_device;
 #define PAGE_HYP               _MOD_PROT(pgprot_kernel, L_PTE_HYP)
 #define PAGE_HYP_DEVICE                _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
 #define PAGE_S2                        _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE         _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE         _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR)
 
 #define __PAGE_NONE            __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
 #define __PAGE_SHARED          __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
index 18d1693736124a75205c765b5e5f588663f3b95f..1a292d8be98819b44a2883a273aa153ca07cfb69 100644 (file)
@@ -11,7 +11,7 @@
 
 static inline bool scu_a9_has_base(void)
 {
-       return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
+       return read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
 }
 
 static inline unsigned long scu_a9_get_base(void)
index 73ddd7239b33aa77d178ae1341c0c46c736a08e5..ed805f1d3785691fdb79eb36fbaa56b355766a21 100644 (file)
@@ -103,8 +103,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
        memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-                                  struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
        /* ARM tasks don't change audit architectures on the fly. */
        return AUDIT_ARCH_ARM;
index aa9b4ac3fdf6dba0137f12daf725a5067137acf6..0baf7f0d939484264b089c772112657cb9f15c75 100644 (file)
@@ -207,6 +207,12 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 #endif
 }
 
+static inline void
+tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+{
+       tlb_add_flush(tlb, addr);
+}
+
 #define pte_free_tlb(tlb, ptep, addr)  __pte_free_tlb(tlb, ptep, addr)
 #define pmd_free_tlb(tlb, pmdp, addr)  __pmd_free_tlb(tlb, pmdp, addr)
 #define pud_free_tlb(tlb, pudp, addr)  pud_free((tlb)->mm, pudp)
index a3625d141c1d417389e14a176d175ce359ebf081..c37459299fc9cff7196eceb8a42a29ca7b22c22d 100644 (file)
@@ -535,6 +535,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 }
 #endif
 
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
 #endif
 
 #endif /* CONFIG_MMU */
index cbd61977c996cb13c55762bc881584f519ec5396..43876245fc5707780115c36d67d1843fce0cf6bf 100644 (file)
@@ -15,7 +15,7 @@
 
 #include <uapi/asm/unistd.h>
 
-#define __NR_syscalls  (380)
+#define __NR_syscalls  (384)
 #define __ARM_NR_cmpxchg               (__ARM_NR_BASE+0x00fff0)
 
 #define __ARCH_WANT_STAT64
index c1ee007523d78dd25b1dd21661af605da4aa7ef3..09ee408c1a67621e9c052e53492b61ef308de6f8 100644 (file)
 #define __ARM_KVM_H__
 
 #include <linux/types.h>
+#include <linux/psci.h>
 #include <asm/ptrace.h>
 
 #define __KVM_HAVE_GUEST_DEBUG
 #define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_READONLY_MEM
 
 #define KVM_REG_SIZE(id)                                               \
        (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -63,7 +65,8 @@ struct kvm_regs {
 
 /* Supported Processor Types */
 #define KVM_ARM_TARGET_CORTEX_A15      0
-#define KVM_ARM_NUM_TARGETS            1
+#define KVM_ARM_TARGET_CORTEX_A7       1
+#define KVM_ARM_NUM_TARGETS            2
 
 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
 #define KVM_ARM_DEVICE_TYPE_SHIFT      0
@@ -82,6 +85,7 @@ struct kvm_regs {
 #define KVM_VGIC_V2_CPU_SIZE           0x2000
 
 #define KVM_ARM_VCPU_POWER_OFF         0 /* CPU is started in OFF state */
+#define KVM_ARM_VCPU_PSCI_0_2          1 /* CPU uses PSCI v0.2 */
 
 struct kvm_vcpu_init {
        __u32 target;
@@ -118,6 +122,26 @@ struct kvm_arch_memory_slot {
 #define KVM_REG_ARM_32_CRN_MASK                0x0000000000007800
 #define KVM_REG_ARM_32_CRN_SHIFT       11
 
+#define ARM_CP15_REG_SHIFT_MASK(x,n) \
+       (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
+
+#define __ARM_CP15_REG(op1,crn,crm,op2) \
+       (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \
+       ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \
+       ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \
+       ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \
+       ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2))
+
+#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32)
+
+#define __ARM_CP15_REG64(op1,crm) \
+       (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
+#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
+
+#define KVM_REG_ARM_TIMER_CTL          ARM_CP15_REG32(0, 14, 3, 1)
+#define KVM_REG_ARM_TIMER_CNT          ARM_CP15_REG64(1, 14) 
+#define KVM_REG_ARM_TIMER_CVAL         ARM_CP15_REG64(3, 14) 
+
 /* Normal registers are mapped as coprocessor 16. */
 #define KVM_REG_ARM_CORE               (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
 #define KVM_REG_ARM_CORE_REG(name)     (offsetof(struct kvm_regs, name) / 4)
@@ -142,6 +166,15 @@ struct kvm_arch_memory_slot {
 #define KVM_REG_ARM_VFP_FPINST         0x1009
 #define KVM_REG_ARM_VFP_FPINST2                0x100A
 
+/* Device Control API: ARM VGIC */
+#define KVM_DEV_ARM_VGIC_GRP_ADDR      0
+#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
+#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS  2
+#define   KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
+#define   KVM_DEV_ARM_VGIC_CPUID_MASK  (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT        0
+#define   KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS   3
 
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_TYPE_SHIFT         24
@@ -172,9 +205,9 @@ struct kvm_arch_memory_slot {
 #define KVM_PSCI_FN_CPU_ON             KVM_PSCI_FN(2)
 #define KVM_PSCI_FN_MIGRATE            KVM_PSCI_FN(3)
 
-#define KVM_PSCI_RET_SUCCESS           0
-#define KVM_PSCI_RET_NI                        ((unsigned long)-1)
-#define KVM_PSCI_RET_INVAL             ((unsigned long)-2)
-#define KVM_PSCI_RET_DENIED            ((unsigned long)-3)
+#define KVM_PSCI_RET_SUCCESS           PSCI_RET_SUCCESS
+#define KVM_PSCI_RET_NI                        PSCI_RET_NOT_SUPPORTED
+#define KVM_PSCI_RET_INVAL             PSCI_RET_INVALID_PARAMS
+#define KVM_PSCI_RET_DENIED            PSCI_RET_DENIED
 
 #endif /* __ARM_KVM_H__ */
index af33b44990ed4a395662f0c5e7021049e5755a78..17407c92c0dabd746d538657d929f29380d1b600 100644 (file)
 #define __NR_process_vm_writev         (__NR_SYSCALL_BASE+377)
 #define __NR_kcmp                      (__NR_SYSCALL_BASE+378)
 #define __NR_finit_module              (__NR_SYSCALL_BASE+379)
+/* Reserve for later
+#define __NR_sched_setattr             (__NR_SYSCALL_BASE+380)
+#define __NR_sched_getattr             (__NR_SYSCALL_BASE+381)
+#define __NR_renameat2                 (__NR_SYSCALL_BASE+382)
+*/
+#define __NR_seccomp                   (__NR_SYSCALL_BASE+383)
 
 /*
  * This may need to be greater than __NR_last_syscall+1 in order to
index ee68cce6b48e4cfc5a65609b199b55ed6642594d..776d9186e9c11987c49ccbb314365fc14a5ea6b9 100644 (file)
@@ -168,6 +168,7 @@ int main(void)
   DEFINE(VCPU_FIQ_REGS,                offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
   DEFINE(VCPU_PC,              offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
   DEFINE(VCPU_CPSR,            offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
+  DEFINE(VCPU_HCR,             offsetof(struct kvm_vcpu, arch.hcr));
   DEFINE(VCPU_IRQ_LINES,       offsetof(struct kvm_vcpu, arch.irq_lines));
   DEFINE(VCPU_HSR,             offsetof(struct kvm_vcpu, arch.fault.hsr));
   DEFINE(VCPU_HxFAR,           offsetof(struct kvm_vcpu, arch.fault.hxfar));
@@ -175,13 +176,13 @@ int main(void)
   DEFINE(VCPU_HYP_PC,          offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
 #ifdef CONFIG_KVM_ARM_VGIC
   DEFINE(VCPU_VGIC_CPU,                offsetof(struct kvm_vcpu, arch.vgic_cpu));
-  DEFINE(VGIC_CPU_HCR,         offsetof(struct vgic_cpu, vgic_hcr));
-  DEFINE(VGIC_CPU_VMCR,                offsetof(struct vgic_cpu, vgic_vmcr));
-  DEFINE(VGIC_CPU_MISR,                offsetof(struct vgic_cpu, vgic_misr));
-  DEFINE(VGIC_CPU_EISR,                offsetof(struct vgic_cpu, vgic_eisr));
-  DEFINE(VGIC_CPU_ELRSR,       offsetof(struct vgic_cpu, vgic_elrsr));
-  DEFINE(VGIC_CPU_APR,         offsetof(struct vgic_cpu, vgic_apr));
-  DEFINE(VGIC_CPU_LR,          offsetof(struct vgic_cpu, vgic_lr));
+  DEFINE(VGIC_V2_CPU_HCR,      offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
+  DEFINE(VGIC_V2_CPU_VMCR,     offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
+  DEFINE(VGIC_V2_CPU_MISR,     offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
+  DEFINE(VGIC_V2_CPU_EISR,     offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
+  DEFINE(VGIC_V2_CPU_ELRSR,    offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
+  DEFINE(VGIC_V2_CPU_APR,      offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
+  DEFINE(VGIC_V2_CPU_LR,       offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
   DEFINE(VGIC_CPU_NR_LR,       offsetof(struct vgic_cpu, nr_lr));
 #ifdef CONFIG_KVM_ARM_TIMER
   DEFINE(VCPU_TIMER_CNTV_CTL,  offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
index c6ca7e376773fcc73ef619e1c5a97793d97b41b4..725f844926eaac3ab58468eda87ae3a57e6fddcd 100644 (file)
                CALL(sys_process_vm_writev)
                CALL(sys_kcmp)
                CALL(sys_finit_module)
+/* 380 */      CALL(sys_ni_syscall)            /* reserved sys_sched_setattr */
+               CALL(sys_ni_syscall)            /* reserved sys_sched_getattr */
+               CALL(sys_ni_syscall)            /* reserved sys_renameat2     */
+               CALL(sys_seccomp)
+
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 8c79344552d5f22ff1a39d07074fa1188ec267ad..11d68917d3b1ec23fd6f6156c2030f325b04d2b6 100644 (file)
@@ -362,6 +362,16 @@ ENTRY(vector_swi)
        str     r0, [sp, #S_OLD_R0]             @ Save OLD_R0
        zero_fp
 
+#ifdef CONFIG_ALIGNMENT_TRAP
+       ldr     ip, __cr_alignment
+       ldr     ip, [ip]
+       mcr     p15, 0, ip, c1, c0              @ update control register
+#endif
+
+       enable_irq
+       ct_user_exit
+       get_thread_info tsk
+
        /*
         * Get the system call number.
         */
@@ -375,9 +385,9 @@ ENTRY(vector_swi)
 #ifdef CONFIG_ARM_THUMB
        tst     r8, #PSR_T_BIT
        movne   r10, #0                         @ no thumb OABI emulation
      ldreq   r10, [lr, #-4]                  @ get SWI instruction
USER( ldreq   r10, [lr, #-4]          )       @ get SWI instruction
 #else
      ldr     r10, [lr, #-4]                  @ get SWI instruction
USER( ldr     r10, [lr, #-4]          )       @ get SWI instruction
 #endif
  ARM_BE8(rev   r10, r10)                       @ little endian instruction
 
@@ -390,22 +400,13 @@ ENTRY(vector_swi)
        /* Legacy ABI only, possibly thumb mode. */
        tst     r8, #PSR_T_BIT                  @ this is SPSR from save_user_regs
        addne   scno, r7, #__NR_SYSCALL_BASE    @ put OS number in
-       ldreq   scno, [lr, #-4]
+ USER( ldreq   scno, [lr, #-4]         )
 
 #else
        /* Legacy ABI only. */
      ldr     scno, [lr, #-4]                 @ get SWI instruction
USER( ldr     scno, [lr, #-4]         )       @ get SWI instruction
 #endif
 
-#ifdef CONFIG_ALIGNMENT_TRAP
-       ldr     ip, __cr_alignment
-       ldr     ip, [ip]
-       mcr     p15, 0, ip, c1, c0              @ update control register
-#endif
-       enable_irq
-       ct_user_exit
-
-       get_thread_info tsk
        adr     tbl, sys_call_table             @ load syscall table pointer
 
 #if defined(CONFIG_OABI_COMPAT)
@@ -440,6 +441,21 @@ local_restart:
        eor     r0, scno, #__NR_SYSCALL_BASE    @ put OS number back
        bcs     arm_syscall     
        b       sys_ni_syscall                  @ not private func
+
+#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
+       /*
+        * We failed to handle a fault trying to access the page
+        * containing the swi instruction, but we're not really in a
+        * position to return -EFAULT. Instead, return back to the
+        * instruction and re-enter the user fault handling path trying
+        * to page it in. This will likely result in sending SEGV to the
+        * current task.
+        */
+9001:
+       sub     lr, lr, #4
+       str     lr, [sp, #S_PC]
+       b       ret_fast_syscall
+#endif
 ENDPROC(vector_swi)
 
        /*
index 9723d17b8f38552212b8540aaadc10dd4442fc2b..1e782bdeee490dc69f1b9a43688ae7116f52edbc 100644 (file)
@@ -163,7 +163,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
        c = irq_data_get_irq_chip(d);
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-       else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+       else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
                cpumask_copy(d->affinity, affinity);
 
        return ret;
index 18a76282970e6dfaa414b6f9ff266553258bd27d..380c20fb9c85a459ecbb1ee7868df1c7f28a1064 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/kprobes.h>
 #include <asm/system_info.h>
+#include <asm/opcodes.h>
 
 #include "kprobes.h"
 
@@ -305,7 +306,8 @@ kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 
        if (handler) {
                /* We can emulate the instruction in (possibly) modified form */
-               asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
+               asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) |
+                                                  (rn << 16) | reglist);
                asi->insn_handler = handler;
                return INSN_GOOD;
        }
@@ -334,13 +336,14 @@ prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
 #ifdef CONFIG_THUMB2_KERNEL
        if (thumb) {
                u16 *thumb_insn = (u16 *)asi->insn;
-               thumb_insn[1] = 0x4770; /* Thumb bx lr */
-               thumb_insn[2] = 0x4770; /* Thumb bx lr */
+               /* Thumb bx lr */
+               thumb_insn[1] = __opcode_to_mem_thumb16(0x4770);
+               thumb_insn[2] = __opcode_to_mem_thumb16(0x4770);
                return insn;
        }
-       asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
+       asi->insn[1] = __opcode_to_mem_arm(0xe12fff1e); /* ARM bx lr */
 #else
-       asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
+       asi->insn[1] = __opcode_to_mem_arm(0xe1a0f00e); /* mov pc, lr */
 #endif
        /* Make an ARM instruction unconditional */
        if (insn < 0xe0000000)
@@ -360,12 +363,12 @@ set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
        if (thumb) {
                u16 *ip = (u16 *)asi->insn;
                if (is_wide_instruction(insn))
-                       *ip++ = insn >> 16;
-               *ip++ = insn;
+                       *ip++ = __opcode_to_mem_thumb16(insn >> 16);
+               *ip++ = __opcode_to_mem_thumb16(insn);
                return;
        }
 #endif
-       asi->insn[0] = insn;
+       asi->insn[0] = __opcode_to_mem_arm(insn);
 }
 
 /*
index 6123daf397a7bbb7ffe161075165ddf57f175d10..b82e798983c4fdf280b77b104cbafc68184b5e72 100644 (file)
@@ -163,9 +163,9 @@ t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
        enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi);
 
        /* Fixup modified instruction to have halfwords in correct order...*/
-       insn = asi->insn[0];
-       ((u16 *)asi->insn)[0] = insn >> 16;
-       ((u16 *)asi->insn)[1] = insn & 0xffff;
+       insn = __mem_to_opcode_arm(asi->insn[0]);
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16);
+       ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff);
 
        return ret;
 }
@@ -1153,7 +1153,7 @@ t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
        insn &= ~0x00ff;
        insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
-       ((u16 *)asi->insn)[0] = insn;
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn);
        asi->insn_handler = t16_emulate_hiregs;
        return INSN_GOOD;
 }
@@ -1182,8 +1182,10 @@ t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi)
         * and call it with R9=SP and LR in the register list represented
         * by R8.
         */
-       ((u16 *)asi->insn)[0] = 0xe929;         /* 1st half STMDB R9!,{} */
-       ((u16 *)asi->insn)[1] = insn & 0x1ff;   /* 2nd half (register list) */
+       /* 1st half STMDB R9!,{} */
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929);
+       /* 2nd half (register list) */
+       ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
        asi->insn_handler = t16_emulate_push;
        return INSN_GOOD;
 }
@@ -1232,8 +1234,10 @@ t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi)
         * and call it with R9=SP and PC in the register list represented
         * by R8.
         */
-       ((u16 *)asi->insn)[0] = 0xe8b9;         /* 1st half LDMIA R9!,{} */
-       ((u16 *)asi->insn)[1] = insn & 0x1ff;   /* 2nd half (register list) */
+       /* 1st half LDMIA R9!,{} */
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9);
+       /* 2nd half (register list) */
+       ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
        asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
                                         : t16_emulate_pop_nopc;
        return INSN_GOOD;
index 170e9f34003f414030c6070e8c913df21c5091cd..1c6ece51781c9d0a84ee25ff5095bc0edb59241a 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/stop_machine.h>
 #include <linux/stringify.h>
 #include <asm/traps.h>
+#include <asm/opcodes.h>
 #include <asm/cacheflush.h>
 
 #include "kprobes.h"
@@ -62,10 +63,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 #ifdef CONFIG_THUMB2_KERNEL
        thumb = true;
        addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
-       insn = ((u16 *)addr)[0];
+       insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
        if (is_wide_instruction(insn)) {
-               insn <<= 16;
-               insn |= ((u16 *)addr)[1];
+               u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
+               insn = __opcode_thumb32_compose(insn, inst2);
                decode_insn = thumb32_kprobe_decode_insn;
        } else
                decode_insn = thumb16_kprobe_decode_insn;
@@ -73,7 +74,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
        thumb = false;
        if (addr & 0x3)
                return -EINVAL;
-       insn = *p->addr;
+       insn = __mem_to_opcode_arm(*p->addr);
        decode_insn = arm_kprobe_decode_insn;
 #endif
 
index c3ef920823b6051a7926edf691e35d7e282afd0c..70ae735dec53fe51b019a18d465b483f794f272c 100644 (file)
 #include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
+#include <asm/fncpy.h>
 #include <asm/mach-types.h>
 #include <asm/system_misc.h>
 
-extern const unsigned char relocate_new_kernel[];
+extern void relocate_new_kernel(void);
 extern const unsigned int relocate_new_kernel_size;
 
 extern unsigned long kexec_start_address;
@@ -133,6 +134,8 @@ void machine_kexec(struct kimage *image)
 {
        unsigned long page_list;
        unsigned long reboot_code_buffer_phys;
+       unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
+       unsigned long reboot_entry_phys;
        void *reboot_code_buffer;
 
        if (num_online_cpus() > 1) {
@@ -156,18 +159,18 @@ void machine_kexec(struct kimage *image)
 
 
        /* copy our kernel relocation code to the control code page */
-       memcpy(reboot_code_buffer,
-              relocate_new_kernel, relocate_new_kernel_size);
+       reboot_entry = fncpy(reboot_code_buffer,
+                            reboot_entry,
+                            relocate_new_kernel_size);
+       reboot_entry_phys = (unsigned long)reboot_entry +
+               (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
 
-
-       flush_icache_range((unsigned long) reboot_code_buffer,
-                          (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
        printk(KERN_INFO "Bye!\n");
 
        if (kexec_reinit)
                kexec_reinit();
 
-       soft_restart(reboot_code_buffer_phys);
+       soft_restart(reboot_entry_phys);
 }
 
 void arch_crash_save_vmcoreinfo(void)
index 00f6337f5c79b564d4794b5a7cd48565dd656f1b..684c5b14e28d0fe0c3c5ca4a8c77381514e88d7b 100644 (file)
@@ -235,49 +235,39 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
 static int probe_current_pmu(struct arm_pmu *pmu)
 {
        int cpu = get_cpu();
-       unsigned long implementor = read_cpuid_implementor();
-       unsigned long part_number = read_cpuid_part_number();
        int ret = -ENODEV;
 
        pr_info("probing PMU on CPU %d\n", cpu);
 
+       switch (read_cpuid_part()) {
        /* ARM Ltd CPUs. */
-       if (implementor == ARM_CPU_IMP_ARM) {
-               switch (part_number) {
-               case ARM_CPU_PART_ARM1136:
-               case ARM_CPU_PART_ARM1156:
-               case ARM_CPU_PART_ARM1176:
-                       ret = armv6pmu_init(pmu);
-                       break;
-               case ARM_CPU_PART_ARM11MPCORE:
-                       ret = armv6mpcore_pmu_init(pmu);
-                       break;
-               case ARM_CPU_PART_CORTEX_A8:
-                       ret = armv7_a8_pmu_init(pmu);
-                       break;
-               case ARM_CPU_PART_CORTEX_A9:
-                       ret = armv7_a9_pmu_init(pmu);
-                       break;
-               case ARM_CPU_PART_CORTEX_A5:
-                       ret = armv7_a5_pmu_init(pmu);
-                       break;
-               case ARM_CPU_PART_CORTEX_A15:
-                       ret = armv7_a15_pmu_init(pmu);
-                       break;
-               case ARM_CPU_PART_CORTEX_A7:
-                       ret = armv7_a7_pmu_init(pmu);
-                       break;
-               }
-       /* Intel CPUs [xscale]. */
-       } else if (implementor == ARM_CPU_IMP_INTEL) {
-               switch (xscale_cpu_arch_version()) {
-               case ARM_CPU_XSCALE_ARCH_V1:
-                       ret = xscale1pmu_init(pmu);
-                       break;
-               case ARM_CPU_XSCALE_ARCH_V2:
-                       ret = xscale2pmu_init(pmu);
-                       break;
+       case ARM_CPU_PART_ARM1136:
+       case ARM_CPU_PART_ARM1156:
+       case ARM_CPU_PART_ARM1176:
+               ret = armv6pmu_init(pmu);
+               break;
+       case ARM_CPU_PART_ARM11MPCORE:
+               ret = armv6mpcore_pmu_init(pmu);
+               break;
+       case ARM_CPU_PART_CORTEX_A8:
+               ret = armv7_a8_pmu_init(pmu);
+               break;
+       case ARM_CPU_PART_CORTEX_A9:
+               ret = armv7_a9_pmu_init(pmu);
+               break;
+
+       default:
+               if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
+                       switch (xscale_cpu_arch_version()) {
+                       case ARM_CPU_XSCALE_ARCH_V1:
+                               ret = xscale1pmu_init(pmu);
+                               break;
+                       case ARM_CPU_XSCALE_ARCH_V2:
+                               ret = xscale2pmu_init(pmu);
+                               break;
+                       }
                }
+               break;
        }
 
        /* assume PMU support all the CPUs in this case */
index 03deeffd9f6d06e6ff380126592e10dbf7bf1a25..394424b2525422bc83fd295a8eccb69c2fa199bf 100644 (file)
@@ -916,7 +916,7 @@ enum ptrace_syscall_dir {
        PTRACE_SYSCALL_EXIT,
 };
 
-static int tracehook_report_syscall(struct pt_regs *regs,
+static void tracehook_report_syscall(struct pt_regs *regs,
                                    enum ptrace_syscall_dir dir)
 {
        unsigned long ip;
@@ -934,7 +934,6 @@ static int tracehook_report_syscall(struct pt_regs *regs,
                current_thread_info()->syscall = -1;
 
        regs->ARM_ip = ip;
-       return current_thread_info()->syscall;
 }
 
 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
@@ -946,7 +945,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
                return -1;
 
        if (test_thread_flag(TIF_SYSCALL_TRACE))
-               scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+               tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+       scno = current_thread_info()->syscall;
 
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_enter(regs, scno);
index d0cdedf4864dc52092355e105f3ba04bba5c5704..95858966d84ec0f10d392a3356cb0367fcee0b85 100644 (file)
@@ -2,10 +2,12 @@
  * relocate_kernel.S - put the kernel image in place to boot
  */
 
+#include <linux/linkage.h>
 #include <asm/kexec.h>
 
-       .globl relocate_new_kernel
-relocate_new_kernel:
+       .align  3       /* not needed for this code, but keeps fncpy() happy */
+
+ENTRY(relocate_new_kernel)
 
        ldr     r0,kexec_indirection_page
        ldr     r1,kexec_start_address
@@ -79,6 +81,8 @@ kexec_mach_type:
 kexec_boot_atags:
        .long   0x0
 
+ENDPROC(relocate_new_kernel)
+
 relocate_new_kernel_end:
 
        .globl relocate_new_kernel_size
index ed3243bb6c07436545912fb40c94c146415b8730..f2724e475b96708ddcf829263952a56d4b73067a 100644 (file)
@@ -92,8 +92,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
         * its stack and the page tables.
         */
        secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
-       secondary_data.pgdir = virt_to_phys(idmap_pgd);
-       secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
+       secondary_data.pgdir = virt_to_idmap(idmap_pgd);
+       secondary_data.swapper_pg_dir = virt_to_idmap(swapper_pg_dir);
        __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
        outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
 
index 370e1a8af6ac0663974b2fdc186dd05c7f3b996a..466bd299b1a8aad54949364d976d9c5430c2375e 100644 (file)
@@ -20,6 +20,7 @@ config KVM
        bool "Kernel-based Virtual Machine (KVM) support"
        select PREEMPT_NOTIFIERS
        select ANON_INODES
+       select HAVE_KVM_CPU_RELAX_INTERCEPT
        select KVM_MMIO
        select KVM_ARM_HOST
        depends on ARM_VIRT_EXT && ARM_LPAE
@@ -41,9 +42,9 @@ config KVM_ARM_HOST
          Provides host support for ARM processors.
 
 config KVM_ARM_MAX_VCPUS
-       int "Number maximum supported virtual CPUs per VM" if KVM_ARM_HOST
-       default 4 if KVM_ARM_HOST
-       default 0
+       int "Number maximum supported virtual CPUs per VM"
+       depends on KVM_ARM_HOST
+       default 4
        help
          Static number of max supported virtual CPUs per VM.
 
@@ -67,6 +68,4 @@ config KVM_ARM_TIMER
        ---help---
          Adds support for the Architected Timers in virtual machines
 
-source drivers/virtio/Kconfig
-
 endif # VIRTUALIZATION
index 53c5ed83d16fc47455073d96355e3ac787ee5190..f7057ed045b63bc0c42a240095f1a5927ad59de0 100644 (file)
@@ -14,10 +14,12 @@ CFLAGS_mmu.o := -I.
 AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
 AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
 
-kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+KVM := ../../../virt/kvm
+kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
 
 obj-y += kvm-arm.o init.o interrupts.o
 obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
-obj-y += coproc.o coproc_a15.o mmio.o psci.o perf.o
-obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o
-obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o
+obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
+obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
+obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arch_timer.c b/arch/arm/kvm/arch_timer.c
deleted file mode 100644 (file)
index c55b608..0000000
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/cpu.h>
-#include <linux/of_irq.h>
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <linux/interrupt.h>
-
-#include <clocksource/arm_arch_timer.h>
-#include <asm/arch_timer.h>
-
-#include <asm/kvm_vgic.h>
-#include <asm/kvm_arch_timer.h>
-
-static struct timecounter *timecounter;
-static struct workqueue_struct *wqueue;
-static struct kvm_irq_level timer_irq = {
-       .level  = 1,
-};
-
-static cycle_t kvm_phys_timer_read(void)
-{
-       return timecounter->cc->read(timecounter->cc);
-}
-
-static bool timer_is_armed(struct arch_timer_cpu *timer)
-{
-       return timer->armed;
-}
-
-/* timer_arm: as in "arm the timer", not as in ARM the company */
-static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
-{
-       timer->armed = true;
-       hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
-                     HRTIMER_MODE_ABS);
-}
-
-static void timer_disarm(struct arch_timer_cpu *timer)
-{
-       if (timer_is_armed(timer)) {
-               hrtimer_cancel(&timer->timer);
-               cancel_work_sync(&timer->expired);
-               timer->armed = false;
-       }
-}
-
-static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
-{
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-
-       timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
-       kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
-                           vcpu->arch.timer_cpu.irq->irq,
-                           vcpu->arch.timer_cpu.irq->level);
-}
-
-static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
-{
-       struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
-
-       /*
-        * We disable the timer in the world switch and let it be
-        * handled by kvm_timer_sync_hwstate(). Getting a timer
-        * interrupt at this point is a sure sign of some major
-        * breakage.
-        */
-       pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
-       return IRQ_HANDLED;
-}
-
-static void kvm_timer_inject_irq_work(struct work_struct *work)
-{
-       struct kvm_vcpu *vcpu;
-
-       vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
-       vcpu->arch.timer_cpu.armed = false;
-       kvm_timer_inject_irq(vcpu);
-}
-
-static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
-{
-       struct arch_timer_cpu *timer;
-       timer = container_of(hrt, struct arch_timer_cpu, timer);
-       queue_work(wqueue, &timer->expired);
-       return HRTIMER_NORESTART;
-}
-
-/**
- * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
- * @vcpu: The vcpu pointer
- *
- * Disarm any pending soft timers, since the world-switch code will write the
- * virtual timer state back to the physical CPU.
- */
-void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
-{
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-
-       /*
-        * We're about to run this vcpu again, so there is no need to
-        * keep the background timer running, as we're about to
-        * populate the CPU timer again.
-        */
-       timer_disarm(timer);
-}
-
-/**
- * kvm_timer_sync_hwstate - sync timer state from cpu
- * @vcpu: The vcpu pointer
- *
- * Check if the virtual timer was armed and either schedule a corresponding
- * soft timer or inject directly if already expired.
- */
-void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
-{
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-       cycle_t cval, now;
-       u64 ns;
-
-       if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
-               !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
-               return;
-
-       cval = timer->cntv_cval;
-       now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
-       BUG_ON(timer_is_armed(timer));
-
-       if (cval <= now) {
-               /*
-                * Timer has already expired while we were not
-                * looking. Inject the interrupt and carry on.
-                */
-               kvm_timer_inject_irq(vcpu);
-               return;
-       }
-
-       ns = cyclecounter_cyc2ns(timecounter->cc, cval - now);
-       timer_arm(timer, ns);
-}
-
-void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
-{
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-
-       INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
-       hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-       timer->timer.function = kvm_timer_expire;
-       timer->irq = &timer_irq;
-}
-
-static void kvm_timer_init_interrupt(void *info)
-{
-       enable_percpu_irq(timer_irq.irq, 0);
-}
-
-
-static int kvm_timer_cpu_notify(struct notifier_block *self,
-                               unsigned long action, void *cpu)
-{
-       switch (action) {
-       case CPU_STARTING:
-       case CPU_STARTING_FROZEN:
-               kvm_timer_init_interrupt(NULL);
-               break;
-       case CPU_DYING:
-       case CPU_DYING_FROZEN:
-               disable_percpu_irq(timer_irq.irq);
-               break;
-       }
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block kvm_timer_cpu_nb = {
-       .notifier_call = kvm_timer_cpu_notify,
-};
-
-static const struct of_device_id arch_timer_of_match[] = {
-       { .compatible   = "arm,armv7-timer",    },
-       {},
-};
-
-int kvm_timer_hyp_init(void)
-{
-       struct device_node *np;
-       unsigned int ppi;
-       int err;
-
-       timecounter = arch_timer_get_timecounter();
-       if (!timecounter)
-               return -ENODEV;
-
-       np = of_find_matching_node(NULL, arch_timer_of_match);
-       if (!np) {
-               kvm_err("kvm_arch_timer: can't find DT node\n");
-               return -ENODEV;
-       }
-
-       ppi = irq_of_parse_and_map(np, 2);
-       if (!ppi) {
-               kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
-               err = -EINVAL;
-               goto out;
-       }
-
-       err = request_percpu_irq(ppi, kvm_arch_timer_handler,
-                                "kvm guest timer", kvm_get_running_vcpus());
-       if (err) {
-               kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
-                       ppi, err);
-               goto out;
-       }
-
-       timer_irq.irq = ppi;
-
-       err = register_cpu_notifier(&kvm_timer_cpu_nb);
-       if (err) {
-               kvm_err("Cannot register timer CPU notifier\n");
-               goto out_free;
-       }
-
-       wqueue = create_singlethread_workqueue("kvm_arch_timer");
-       if (!wqueue) {
-               err = -ENOMEM;
-               goto out_free;
-       }
-
-       kvm_info("%s IRQ%d\n", np->name, ppi);
-       on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
-
-       goto out;
-out_free:
-       free_percpu_irq(ppi, kvm_get_running_vcpus());
-out:
-       of_node_put(np);
-       return err;
-}
-
-void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
-{
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-
-       timer_disarm(timer);
-}
-
-int kvm_timer_init(struct kvm *kvm)
-{
-       if (timecounter && wqueue) {
-               kvm->arch.timer.cntvoff = kvm_phys_timer_read();
-               kvm->arch.timer.enabled = 1;
-       }
-
-       return 0;
-}
index 1d55afe7fd4bd0a8802e1e2efd6c891fa5631c94..d0c8ee654bbf5c580c128f498c4f13d02797f5e5 100644 (file)
@@ -82,12 +82,12 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
 /**
  * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
  */
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void)
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
 {
        return &kvm_arm_running_vcpu;
 }
 
-int kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void)
 {
        return 0;
 }
@@ -97,27 +97,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
 }
 
-void kvm_arch_hardware_disable(void *garbage)
-{
-}
-
 int kvm_arch_hardware_setup(void)
 {
        return 0;
 }
 
-void kvm_arch_hardware_unsetup(void)
-{
-}
-
 void kvm_arch_check_processor_compat(void *rtn)
 {
        *(int *)rtn = 0;
 }
 
-void kvm_arch_sync_events(struct kvm *kvm)
-{
-}
 
 /**
  * kvm_arch_init_vm - initializes a VM data structure
@@ -138,6 +127,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        if (ret)
                goto out_free_stage2_pgd;
 
+       kvm_timer_init(kvm);
+
        /* Mark the initial VMID generation invalid */
        kvm->arch.vmid_gen = 0;
 
@@ -153,15 +144,6 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
        return VM_FAULT_SIGBUS;
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
-                          struct kvm_memory_slot *dont)
-{
-}
-
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
-{
-       return 0;
-}
 
 /**
  * kvm_arch_destroy_vm - destroy the VM data structure
@@ -179,20 +161,25 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                        kvm->vcpus[i] = NULL;
                }
        }
+
+       kvm_vgic_destroy(kvm);
 }
 
-int kvm_dev_ioctl_check_extension(long ext)
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 {
        int r;
        switch (ext) {
        case KVM_CAP_IRQCHIP:
                r = vgic_present;
                break;
+       case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_USER_MEMORY:
        case KVM_CAP_SYNC_MMU:
        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
        case KVM_CAP_ONE_REG:
        case KVM_CAP_ARM_PSCI:
+       case KVM_CAP_ARM_PSCI_0_2:
+       case KVM_CAP_READONLY_MEM:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -220,29 +207,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
        return -EINVAL;
 }
 
-int kvm_arch_prepare_memory_region(struct kvm *kvm,
-                                  struct kvm_memory_slot *memslot,
-                                  struct kvm_userspace_memory_region *mem,
-                                  enum kvm_mr_change change)
-{
-       return 0;
-}
-
-void kvm_arch_commit_memory_region(struct kvm *kvm,
-                                  struct kvm_userspace_memory_region *mem,
-                                  const struct kvm_memory_slot *old,
-                                  enum kvm_mr_change change)
-{
-}
-
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
-                                  struct kvm_memory_slot *slot)
-{
-}
 
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 {
@@ -281,6 +245,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
        kvm_mmu_free_memory_caches(vcpu);
        kvm_timer_vcpu_terminate(vcpu);
+       kvm_vgic_vcpu_destroy(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
@@ -296,26 +261,15 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 {
-       int ret;
-
        /* Force users to call KVM_ARM_VCPU_INIT */
        vcpu->arch.target = -1;
 
-       /* Set up VGIC */
-       ret = kvm_vgic_vcpu_init(vcpu);
-       if (ret)
-               return ret;
-
        /* Set up the timer */
        kvm_timer_vcpu_init(vcpu);
 
        return 0;
 }
 
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        vcpu->cpu = cpu;
@@ -335,6 +289,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       /*
+        * The arch-generic KVM code expects the cpu field of a vcpu to be -1
+        * if the vcpu is no longer assigned to a cpu.  This is used for the
+        * optimized make_all_cpus_request path.
+        */
+       vcpu->cpu = -1;
+
        kvm_arm_set_running_vcpu(NULL);
 }
 
@@ -449,15 +410,17 @@ static void update_vttbr(struct kvm *kvm)
 
        /* update vttbr to be used with the new vmid */
        pgd_phys = virt_to_phys(kvm->arch.pgd);
+       BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
        vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
-       kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
-       kvm->arch.vttbr |= vmid;
+       kvm->arch.vttbr = pgd_phys | vmid;
 
        spin_unlock(&kvm_vmid_lock);
 }
 
 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 {
+       int ret;
+
        if (likely(vcpu->arch.has_run_once))
                return 0;
 
@@ -467,22 +430,12 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
         * Initialize the VGIC before running a vcpu the first time on
         * this VM.
         */
-       if (irqchip_in_kernel(vcpu->kvm) &&
-           unlikely(!vgic_initialized(vcpu->kvm))) {
-               int ret = kvm_vgic_init(vcpu->kvm);
+       if (unlikely(!vgic_initialized(vcpu->kvm))) {
+               ret = kvm_vgic_init(vcpu->kvm);
                if (ret)
                        return ret;
        }
 
-       /*
-        * Handle the "start in power-off" case by calling into the
-        * PSCI code.
-        */
-       if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
-               *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
-               kvm_psci_call(vcpu);
-       }
-
        return 0;
 }
 
@@ -696,6 +649,24 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
        return -EINVAL;
 }
 
+static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+                                        struct kvm_vcpu_init *init)
+{
+       int ret;
+
+       ret = kvm_vcpu_set_target(vcpu, init);
+       if (ret)
+               return ret;
+
+       /*
+        * Handle the "start in power-off" case by marking the VCPU as paused.
+        */
+       if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+               vcpu->arch.pause = true;
+
+       return 0;
+}
+
 long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
 {
@@ -709,8 +680,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                if (copy_from_user(&init, argp, sizeof(init)))
                        return -EFAULT;
 
-               return kvm_vcpu_set_target(vcpu, &init);
-
+               return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
        }
        case KVM_SET_ONE_REG:
        case KVM_GET_ONE_REG: {
@@ -768,7 +738,7 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
        case KVM_ARM_DEVICE_VGIC_V2:
                if (!vgic_present)
                        return -ENXIO;
-               return kvm_vgic_set_addr(kvm, type, dev_addr->addr);
+               return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
        default:
                return -ENODEV;
        }
@@ -794,6 +764,19 @@ long kvm_arch_vm_ioctl(struct file *filp,
                        return -EFAULT;
                return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
        }
+       case KVM_ARM_PREFERRED_TARGET: {
+               int err;
+               struct kvm_vcpu_init init;
+
+               err = kvm_vcpu_preferred_target(&init);
+               if (err)
+                       return err;
+
+               if (copy_to_user(argp, &init, sizeof(init)))
+                       return -EFAULT;
+
+               return 0;
+       }
        default:
                return -EINVAL;
        }
@@ -801,8 +784,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
 
 static void cpu_init_hyp_mode(void *dummy)
 {
-       unsigned long long boot_pgd_ptr;
-       unsigned long long pgd_ptr;
+       phys_addr_t boot_pgd_ptr;
+       phys_addr_t pgd_ptr;
        unsigned long hyp_stack_ptr;
        unsigned long stack_page;
        unsigned long vector_ptr;
@@ -810,8 +793,8 @@ static void cpu_init_hyp_mode(void *dummy)
        /* Switch from the HYP stub to our own HYP init vector */
        __hyp_set_vectors(kvm_get_idmap_vector());
 
-       boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr();
-       pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
+       boot_pgd_ptr = kvm_mmu_get_boot_httbr();
+       pgd_ptr = kvm_mmu_get_httbr();
        stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
        hyp_stack_ptr = stack_page + PAGE_SIZE;
        vector_ptr = (unsigned long)__kvm_hyp_vector;
@@ -825,7 +808,8 @@ static int hyp_init_cpu_notify(struct notifier_block *self,
        switch (action) {
        case CPU_STARTING:
        case CPU_STARTING_FROZEN:
-               cpu_init_hyp_mode(NULL);
+               if (__hyp_get_vectors() == hyp_default_vectors)
+                       cpu_init_hyp_mode(NULL);
                break;
        }
 
@@ -841,7 +825,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
                                    unsigned long cmd,
                                    void *v)
 {
-       if (cmd == CPU_PM_EXIT) {
+       if (cmd == CPU_PM_EXIT &&
+           __hyp_get_vectors() == hyp_default_vectors) {
                cpu_init_hyp_mode(NULL);
                return NOTIFY_OK;
        }
index db9cf692d4dded3e2a6cc7e5622ba90ee5bef2e8..7928dbdf210239a71f4f35e8ef289e9c2e8f0375 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/kvm_host.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
 #include <trace/events/kvm.h>
@@ -43,6 +44,31 @@ static u32 cache_levels;
 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
 #define CSSELR_MAX 12
 
+/*
+ * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
+ * of cp15 registers can be viewed either as couple of two u32 registers
+ * or one u64 register. Current u64 register encoding is that least
+ * significant u32 word is followed by most significant u32 word.
+ */
+static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
+                                      const struct coproc_reg *r,
+                                      u64 val)
+{
+       vcpu->arch.cp15[r->reg] = val & 0xffffffff;
+       vcpu->arch.cp15[r->reg + 1] = val >> 32;
+}
+
+static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
+                                     const struct coproc_reg *r)
+{
+       u64 val;
+
+       val = vcpu->arch.cp15[r->reg + 1];
+       val = val << 32;
+       val = val | vcpu->arch.cp15[r->reg];
+       return val;
+}
+
 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        kvm_inject_undefined(vcpu);
@@ -71,6 +97,98 @@ int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
        return 1;
 }
 
+static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+       /*
+        * Compute guest MPIDR. We build a virtual cluster out of the
+        * vcpu_id, but we read the 'U' bit from the underlying
+        * hardware directly.
+        */
+       vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
+                                    ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
+                                    (vcpu->vcpu_id & 3));
+}
+
+/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
+static bool access_actlr(struct kvm_vcpu *vcpu,
+                        const struct coproc_params *p,
+                        const struct coproc_reg *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+
+       *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
+       return true;
+}
+
+/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
+static bool access_cbar(struct kvm_vcpu *vcpu,
+                       const struct coproc_params *p,
+                       const struct coproc_reg *r)
+{
+       if (p->is_write)
+               return write_to_read_only(vcpu, p);
+       return read_zero(vcpu, p);
+}
+
+/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
+static bool access_l2ctlr(struct kvm_vcpu *vcpu,
+                         const struct coproc_params *p,
+                         const struct coproc_reg *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+
+       *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
+       return true;
+}
+
+static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+       u32 l2ctlr, ncores;
+
+       asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
+       l2ctlr &= ~(3 << 24);
+       ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
+       /* How many cores in the current cluster and the next ones */
+       ncores -= (vcpu->vcpu_id & ~3);
+       /* Cap it to the maximum number of cores in a single cluster */
+       ncores = min(ncores, 3U);
+       l2ctlr |= (ncores & 3) << 24;
+
+       vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
+}
+
+static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+       u32 actlr;
+
+       /* ACTLR contains SMP bit: make sure you create all cpus first! */
+       asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
+       /* Make the SMP bit consistent with the guest configuration */
+       if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
+               actlr |= 1U << 6;
+       else
+               actlr &= ~(1U << 6);
+
+       vcpu->arch.cp15[c1_ACTLR] = actlr;
+}
+
+/*
+ * TRM entries: A7:4.3.50, A15:4.3.49
+ * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
+ */
+static bool access_l2ectlr(struct kvm_vcpu *vcpu,
+                          const struct coproc_params *p,
+                          const struct coproc_reg *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+
+       *vcpu_reg(vcpu, p->Rt1) = 0;
+       return true;
+}
+
 /* See note at ARM ARM B1.14.4 */
 static bool access_dcsw(struct kvm_vcpu *vcpu,
                        const struct coproc_params *p,
@@ -112,6 +230,44 @@ done:
        return true;
 }
 
+/*
+ * Generic accessor for VM registers. Only called as long as HCR_TVM
+ * is set.
+ */
+static bool access_vm_reg(struct kvm_vcpu *vcpu,
+                         const struct coproc_params *p,
+                         const struct coproc_reg *r)
+{
+       BUG_ON(!p->is_write);
+
+       vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
+       if (p->is_64bit)
+               vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
+
+       return true;
+}
+
+/*
+ * SCTLR accessor. Only called as long as HCR_TVM is set.  If the
+ * guest enables the MMU, we stop trapping the VM sys_regs and leave
+ * it in complete control of the caches.
+ *
+ * Used by the cpu-specific code.
+ */
+bool access_sctlr(struct kvm_vcpu *vcpu,
+                 const struct coproc_params *p,
+                 const struct coproc_reg *r)
+{
+       access_vm_reg(vcpu, p, r);
+
+       if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
+               vcpu->arch.hcr &= ~HCR_TVM;
+               stage2_flush_vm(vcpu->kvm);
+       }
+
+       return true;
+}
+
 /*
  * We could trap ID_DFR0 and tell the guest we don't support performance
  * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
@@ -153,37 +309,52 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
  *            registers preceding 32-bit ones.
  */
 static const struct coproc_reg cp15_regs[] = {
+       /* MPIDR: we use VMPIDR for guest access. */
+       { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
+                       NULL, reset_mpidr, c0_MPIDR },
+
        /* CSSELR: swapped by interrupt.S. */
        { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
                        NULL, reset_unknown, c0_CSSELR },
 
-       /* TTBR0/TTBR1: swapped by interrupt.S. */
-       { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
-       { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+       /* ACTLR: trapped by HCR.TAC bit. */
+       { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
+                       access_actlr, reset_actlr, c1_ACTLR },
 
-       /* TTBCR: swapped by interrupt.S. */
+       /* CPACR: swapped by interrupt.S. */
+       { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
+                       NULL, reset_val, c1_CPACR, 0x00000000 },
+
+       /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
+       { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
+       { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
+                       access_vm_reg, reset_unknown, c2_TTBR0 },
+       { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
+                       access_vm_reg, reset_unknown, c2_TTBR1 },
        { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
-                       NULL, reset_val, c2_TTBCR, 0x00000000 },
+                       access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
+       { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
+
 
        /* DACR: swapped by interrupt.S. */
        { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
-                       NULL, reset_unknown, c3_DACR },
+                       access_vm_reg, reset_unknown, c3_DACR },
 
        /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
        { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
-                       NULL, reset_unknown, c5_DFSR },
+                       access_vm_reg, reset_unknown, c5_DFSR },
        { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
-                       NULL, reset_unknown, c5_IFSR },
+                       access_vm_reg, reset_unknown, c5_IFSR },
        { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
-                       NULL, reset_unknown, c5_ADFSR },
+                       access_vm_reg, reset_unknown, c5_ADFSR },
        { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
-                       NULL, reset_unknown, c5_AIFSR },
+                       access_vm_reg, reset_unknown, c5_AIFSR },
 
        /* DFAR/IFAR: swapped by interrupt.S. */
        { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
-                       NULL, reset_unknown, c6_DFAR },
+                       access_vm_reg, reset_unknown, c6_DFAR },
        { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
-                       NULL, reset_unknown, c6_IFAR },
+                       access_vm_reg, reset_unknown, c6_IFAR },
 
        /* PAR swapped by interrupt.S */
        { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
@@ -194,6 +365,13 @@ static const struct coproc_reg cp15_regs[] = {
        { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
        { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
        { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
+       /*
+        * L2CTLR access (guest wants to know #CPUs).
+        */
+       { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
+                       access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
+       { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
+
        /*
         * Dummy performance monitor implementation.
         */
@@ -213,9 +391,15 @@ static const struct coproc_reg cp15_regs[] = {
 
        /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
        { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
-                       NULL, reset_unknown, c10_PRRR},
+                       access_vm_reg, reset_unknown, c10_PRRR},
        { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
-                       NULL, reset_unknown, c10_NMRR},
+                       access_vm_reg, reset_unknown, c10_NMRR},
+
+       /* AMAIR0/AMAIR1: swapped by interrupt.S. */
+       { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
+                       access_vm_reg, reset_unknown, c10_AMAIR0},
+       { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
+                       access_vm_reg, reset_unknown, c10_AMAIR1},
 
        /* VBAR: swapped by interrupt.S. */
        { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
@@ -223,7 +407,7 @@ static const struct coproc_reg cp15_regs[] = {
 
        /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
        { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
-                       NULL, reset_val, c13_CID, 0x00000000 },
+                       access_vm_reg, reset_val, c13_CID, 0x00000000 },
        { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
                        NULL, reset_unknown, c13_TID_URW },
        { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
@@ -234,6 +418,9 @@ static const struct coproc_reg cp15_regs[] = {
        /* CNTKCTL: swapped by interrupt.S. */
        { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
                        NULL, reset_val, c14_CNTKCTL, 0x00000000 },
+
+       /* The Configuration Base Address Register. */
+       { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 };
 
 /* Target specific emulation tables */
@@ -241,6 +428,12 @@ static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 
 void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 {
+       unsigned int i;
+
+       for (i = 1; i < table->num; i++)
+               BUG_ON(cmp_reg(&table->table[i-1],
+                              &table->table[i]) >= 0);
+
        target_tables[table->target] = table;
 }
 
@@ -323,7 +516,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        struct coproc_params params;
 
-       params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
+       params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
        params.is_64bit = true;
@@ -331,7 +524,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
        params.Op2 = 0;
        params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
-       params.CRn = 0;
+       params.CRm = 0;
 
        return emulate_cp15(vcpu, &params);
 }
@@ -514,17 +707,23 @@ static struct coproc_reg invariant_cp15[] = {
        { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 };
 
+/*
+ * Reads a register value from a userspace address to a kernel
+ * variable. Make sure that register size matches sizeof(*__val).
+ */
 static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 {
-       /* This Just Works because we are little endian. */
        if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
                return -EFAULT;
        return 0;
 }
 
+/*
+ * Writes a register value to a userspace address from a kernel variable.
+ * Make sure that register size matches sizeof(*__val).
+ */
 static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 {
-       /* This Just Works because we are little endian. */
        if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
                return -EFAULT;
        return 0;
@@ -534,6 +733,7 @@ static int get_invariant_cp15(u64 id, void __user *uaddr)
 {
        struct coproc_params params;
        const struct coproc_reg *r;
+       int ret;
 
        if (!index_to_params(id, &params))
                return -ENOENT;
@@ -542,7 +742,15 @@ static int get_invariant_cp15(u64 id, void __user *uaddr)
        if (!r)
                return -ENOENT;
 
-       return reg_to_user(uaddr, &r->val, id);
+       ret = -ENOENT;
+       if (KVM_REG_SIZE(id) == 4) {
+               u32 val = r->val;
+
+               ret = reg_to_user(uaddr, &val, id);
+       } else if (KVM_REG_SIZE(id) == 8) {
+               ret = reg_to_user(uaddr, &r->val, id);
+       }
+       return ret;
 }
 
 static int set_invariant_cp15(u64 id, void __user *uaddr)
@@ -550,7 +758,7 @@ static int set_invariant_cp15(u64 id, void __user *uaddr)
        struct coproc_params params;
        const struct coproc_reg *r;
        int err;
-       u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
+       u64 val;
 
        if (!index_to_params(id, &params))
                return -ENOENT;
@@ -558,7 +766,16 @@ static int set_invariant_cp15(u64 id, void __user *uaddr)
        if (!r)
                return -ENOENT;
 
-       err = reg_from_user(&val, uaddr, id);
+       err = -ENOENT;
+       if (KVM_REG_SIZE(id) == 4) {
+               u32 val32;
+
+               err = reg_from_user(&val32, uaddr, id);
+               if (!err)
+                       val = val32;
+       } else if (KVM_REG_SIZE(id) == 8) {
+               err = reg_from_user(&val, uaddr, id);
+       }
        if (err)
                return err;
 
@@ -574,7 +791,7 @@ static bool is_valid_cache(u32 val)
        u32 level, ctype;
 
        if (val >= CSSELR_MAX)
-               return -ENOENT;
+               return false;
 
        /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
         level = (val >> 1);
@@ -836,6 +1053,7 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
        const struct coproc_reg *r;
        void __user *uaddr = (void __user *)(long)reg->addr;
+       int ret;
 
        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
                return demux_c15_get(reg->id, uaddr);
@@ -847,14 +1065,24 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        if (!r)
                return get_invariant_cp15(reg->id, uaddr);
 
-       /* Note: copies two regs if size is 64 bit. */
-       return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
+       ret = -ENOENT;
+       if (KVM_REG_SIZE(reg->id) == 8) {
+               u64 val;
+
+               val = vcpu_cp15_reg64_get(vcpu, r);
+               ret = reg_to_user(uaddr, &val, reg->id);
+       } else if (KVM_REG_SIZE(reg->id) == 4) {
+               ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
+       }
+
+       return ret;
 }
 
 int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
        const struct coproc_reg *r;
        void __user *uaddr = (void __user *)(long)reg->addr;
+       int ret;
 
        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
                return demux_c15_set(reg->id, uaddr);
@@ -866,8 +1094,18 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        if (!r)
                return set_invariant_cp15(reg->id, uaddr);
 
-       /* Note: copies two regs if size is 64 bit */
-       return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
+       ret = -ENOENT;
+       if (KVM_REG_SIZE(reg->id) == 8) {
+               u64 val;
+
+               ret = reg_from_user(&val, uaddr, reg->id);
+               if (!ret)
+                       vcpu_cp15_reg64_set(vcpu, r, val);
+       } else if (KVM_REG_SIZE(reg->id) == 4) {
+               ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
+       }
+
+       return ret;
 }
 
 static unsigned int num_demux_regs(void)
index 0461d5c8d3de4f99c3ecfef669340ec6fa8e0411..1a44bbe39643f519ec986d43dcd3e416881d13a9 100644 (file)
@@ -58,8 +58,8 @@ static inline void print_cp_instr(const struct coproc_params *p)
 {
        /* Look, we even formatted it for you to paste into the table! */
        if (p->is_64bit) {
-               kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
-                             p->CRm, p->Op1, p->is_write ? "write" : "read");
+               kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n",
+                             p->CRn, p->Op1, p->is_write ? "write" : "read");
        } else {
                kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
                              " func_%s },\n",
@@ -135,13 +135,13 @@ static inline int cmp_reg(const struct coproc_reg *i1,
                return -1;
        if (i1->CRn != i2->CRn)
                return i1->CRn - i2->CRn;
-       if (i1->is_64 != i2->is_64)
-               return i2->is_64 - i1->is_64;
        if (i1->CRm != i2->CRm)
                return i1->CRm - i2->CRm;
        if (i1->Op1 != i2->Op1)
                return i1->Op1 - i2->Op1;
-       return i1->Op2 - i2->Op2;
+       if (i1->Op2 != i2->Op2)
+               return i1->Op2 - i2->Op2;
+       return i2->is_64 - i1->is_64;
 }
 
 
@@ -153,4 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
 #define is64           .is_64 = true
 #define is32           .is_64 = false
 
+bool access_sctlr(struct kvm_vcpu *vcpu,
+                 const struct coproc_params *p,
+                 const struct coproc_reg *r);
+
 #endif /* __ARM_KVM_COPROC_LOCAL_H__ */
index cf93472b9dd60daf3da620cf3a44a9ff65a6eac6..e6f4ae48bda968f8cac7caf6c94ffd1413631436 100644 (file)
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
 #include <linux/kvm_host.h>
-#include <asm/cputype.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_host.h>
-#include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
 #include <linux/init.h>
 
-static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
-       /*
-        * Compute guest MPIDR:
-        * (Even if we present only one VCPU to the guest on an SMP
-        * host we don't set the U bit in the MPIDR, or vice versa, as
-        * revealing the underlying hardware properties is likely to
-        * be the best choice).
-        */
-       vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
-               | (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
-}
-
 #include "coproc.h"
 
-/* A15 TRM 4.3.28: RO WI */
-static bool access_actlr(struct kvm_vcpu *vcpu,
-                        const struct coproc_params *p,
-                        const struct coproc_reg *r)
-{
-       if (p->is_write)
-               return ignore_write(vcpu, p);
-
-       *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
-       return true;
-}
-
-/* A15 TRM 4.3.60: R/O. */
-static bool access_cbar(struct kvm_vcpu *vcpu,
-                       const struct coproc_params *p,
-                       const struct coproc_reg *r)
-{
-       if (p->is_write)
-               return write_to_read_only(vcpu, p);
-       return read_zero(vcpu, p);
-}
-
-/* A15 TRM 4.3.48: R/O WI. */
-static bool access_l2ctlr(struct kvm_vcpu *vcpu,
-                         const struct coproc_params *p,
-                         const struct coproc_reg *r)
-{
-       if (p->is_write)
-               return ignore_write(vcpu, p);
-
-       *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
-       return true;
-}
-
-static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
-       u32 l2ctlr, ncores;
-
-       asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
-       l2ctlr &= ~(3 << 24);
-       ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
-       l2ctlr |= (ncores & 3) << 24;
-
-       vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
-}
-
-static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
-       u32 actlr;
-
-       /* ACTLR contains SMP bit: make sure you create all cpus first! */
-       asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
-       /* Make the SMP bit consistent with the guest configuration */
-       if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
-               actlr |= 1U << 6;
-       else
-               actlr &= ~(1U << 6);
-
-       vcpu->arch.cp15[c1_ACTLR] = actlr;
-}
-
-/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
-static bool access_l2ectlr(struct kvm_vcpu *vcpu,
-                          const struct coproc_params *p,
-                          const struct coproc_reg *r)
-{
-       if (p->is_write)
-               return ignore_write(vcpu, p);
-
-       *vcpu_reg(vcpu, p->Rt1) = 0;
-       return true;
-}
-
 /*
  * A15-specific CP15 registers.
  * CRn denotes the primary register number, but is copied to the CRm in the
@@ -121,29 +32,9 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
  *            registers preceding 32-bit ones.
  */
 static const struct coproc_reg a15_regs[] = {
-       /* MPIDR: we use VMPIDR for guest access. */
-       { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
-                       NULL, reset_mpidr, c0_MPIDR },
-
        /* SCTLR: swapped by interrupt.S. */
        { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
-                       NULL, reset_val, c1_SCTLR, 0x00C50078 },
-       /* ACTLR: trapped by HCR.TAC bit. */
-       { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
-                       access_actlr, reset_actlr, c1_ACTLR },
-       /* CPACR: swapped by interrupt.S. */
-       { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
-                       NULL, reset_val, c1_CPACR, 0x00000000 },
-
-       /*
-        * L2CTLR access (guest wants to know #CPUs).
-        */
-       { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
-                       access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
-       { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
-
-       /* The Configuration Base Address Register. */
-       { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
+                       access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
 };
 
 static struct kvm_coproc_target_table a15_target_table = {
@@ -154,12 +45,6 @@ static struct kvm_coproc_target_table a15_target_table = {
 
 static int __init coproc_a15_init(void)
 {
-       unsigned int i;
-
-       for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
-               BUG_ON(cmp_reg(&a15_regs[i-1],
-                              &a15_regs[i]) >= 0);
-
        kvm_register_target_coproc_table(&a15_target_table);
        return 0;
 }
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
new file mode 100644 (file)
index 0000000..17fc7cd
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Copyright (C) 2013 - ARM Ltd
+ *
+ * Authors: Rusty Russell <rusty@rustcorp.au>
+ *          Christoffer Dall <c.dall@virtualopensystems.com>
+ *          Jonathan Austin <jonathan.austin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/kvm_host.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
+#include <linux/init.h>
+
+#include "coproc.h"
+
+/*
+ * Cortex-A7 specific CP15 registers.
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ *            registers preceding 32-bit ones.
+ */
+static const struct coproc_reg a7_regs[] = {
+       /* SCTLR: swapped by interrupt.S. */
+       { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
+                       access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
+};
+
+static struct kvm_coproc_target_table a7_target_table = {
+       .target = KVM_ARM_TARGET_CORTEX_A7,
+       .table = a7_regs,
+       .num = ARRAY_SIZE(a7_regs),
+};
+
+static int __init coproc_a7_init(void)
+{
+       kvm_register_target_coproc_table(&a7_target_table);
+       return 0;
+}
+late_initcall(coproc_a7_init);
index bdede9e7da516a43b5a3d681850727860f0534ab..d6c005283678fe5061a50cc8f5efd1febcc0f27b 100644 (file)
@@ -354,7 +354,7 @@ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
        *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
 
        if (is_pabt) {
-               /* Set DFAR and DFSR */
+               /* Set IFAR and IFSR */
                vcpu->arch.cp15[c6_IFAR] = addr;
                is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
                /* Always give debug fault for now - should give guest a clue */
index 152d03612181d16d5fef5e1e84da8d2c178fbf58..cc0b78769bd8ab40e5237b88aaedb14068f767e0 100644 (file)
@@ -38,6 +38,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
+       vcpu->arch.hcr = HCR_GUEST_MASK;
        return 0;
 }
 
@@ -109,6 +110,73 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        return -EINVAL;
 }
 
+#ifndef CONFIG_KVM_ARM_TIMER
+
+#define NUM_TIMER_REGS 0
+
+static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       return 0;
+}
+
+static bool is_timer_reg(u64 index)
+{
+       return false;
+}
+
+#else
+
+#define NUM_TIMER_REGS 3
+
+static bool is_timer_reg(u64 index)
+{
+       switch (index) {
+       case KVM_REG_ARM_TIMER_CTL:
+       case KVM_REG_ARM_TIMER_CNT:
+       case KVM_REG_ARM_TIMER_CVAL:
+               return true;
+       }
+       return false;
+}
+
+static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
+               return -EFAULT;
+       uindices++;
+       if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
+               return -EFAULT;
+       uindices++;
+       if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
+               return -EFAULT;
+
+       return 0;
+}
+
+#endif
+
+static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       void __user *uaddr = (void __user *)(long)reg->addr;
+       u64 val;
+       int ret;
+
+       ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
+       if (ret != 0)
+               return -EFAULT;
+
+       return kvm_arm_timer_set_reg(vcpu, reg->id, val);
+}
+
+static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       void __user *uaddr = (void __user *)(long)reg->addr;
+       u64 val;
+
+       val = kvm_arm_timer_get_reg(vcpu, reg->id);
+       return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+}
+
 static unsigned long num_core_regs(void)
 {
        return sizeof(struct kvm_regs) / sizeof(u32);
@@ -121,7 +189,8 @@ static unsigned long num_core_regs(void)
  */
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
 {
-       return num_core_regs() + kvm_arm_num_coproc_regs(vcpu);
+       return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+               + NUM_TIMER_REGS;
 }
 
 /**
@@ -133,6 +202,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 {
        unsigned int i;
        const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
+       int ret;
 
        for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
                if (put_user(core_reg | i, uindices))
@@ -140,6 +210,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
                uindices++;
        }
 
+       ret = copy_timer_indices(vcpu, uindices);
+       if (ret)
+               return ret;
+       uindices += NUM_TIMER_REGS;
+
        return kvm_arm_copy_coproc_indices(vcpu, uindices);
 }
 
@@ -153,6 +228,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
                return get_core_reg(vcpu, reg);
 
+       if (is_timer_reg(reg->id))
+               return get_timer_reg(vcpu, reg);
+
        return kvm_arm_coproc_get_reg(vcpu, reg);
 }
 
@@ -166,6 +244,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
                return set_core_reg(vcpu, reg);
 
+       if (is_timer_reg(reg->id))
+               return set_timer_reg(vcpu, reg);
+
        return kvm_arm_coproc_set_reg(vcpu, reg);
 }
 
@@ -183,13 +264,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
 int __attribute_const__ kvm_target_cpu(void)
 {
-       unsigned long implementor = read_cpuid_implementor();
-       unsigned long part_number = read_cpuid_part_number();
-
-       if (implementor != ARM_CPU_IMP_ARM)
-               return -EINVAL;
-
-       switch (part_number) {
+       switch (read_cpuid_part()) {
+       case ARM_CPU_PART_CORTEX_A7:
+               return KVM_ARM_TARGET_CORTEX_A7;
        case ARM_CPU_PART_CORTEX_A15:
                return KVM_ARM_TARGET_CORTEX_A15;
        default:
@@ -202,7 +279,7 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 {
        unsigned int i;
 
-       /* We can only do a cortex A15 for now. */
+       /* We can only cope with guest==host and only on A15/A7 (for now). */
        if (init->target != kvm_target_cpu())
                return -EINVAL;
 
@@ -222,6 +299,26 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
        return kvm_reset_vcpu(vcpu);
 }
 
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
+{
+       int target = kvm_target_cpu();
+
+       if (target < 0)
+               return -ENODEV;
+
+       memset(init, 0, sizeof(*init));
+
+       /*
+        * For now, we don't return any features.
+        * In future, we might use features to return target
+        * specific features available for the preferred
+        * target type.
+        */
+       init->target = (__u32)target;
+
+       return 0;
+}
+
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
        return -EINVAL;
index 3d74a0be47dbfd81bf3950ba4bad75ed1b1b0f76..4c979d466cc1681c4b3efc70623345eee5974b78 100644 (file)
@@ -26,8 +26,6 @@
 
 #include "trace.h"
 
-#include "trace.h"
-
 typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
 
 static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
@@ -40,21 +38,22 @@ static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
 static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
+       int ret;
+
        trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
                      kvm_vcpu_hvc_get_imm(vcpu));
 
-       if (kvm_psci_call(vcpu))
+       ret = kvm_psci_call(vcpu);
+       if (ret < 0) {
+               kvm_inject_undefined(vcpu);
                return 1;
+       }
 
-       kvm_inject_undefined(vcpu);
-       return 1;
+       return ret;
 }
 
 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-       if (kvm_psci_call(vcpu))
-               return 1;
-
        kvm_inject_undefined(vcpu);
        return 1;
 }
@@ -76,23 +75,29 @@ static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
 }
 
 /**
- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
  * @vcpu:      the vcpu pointer
  * @run:       the kvm_run structure pointer
  *
- * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
- * halt execution of world-switches and schedule other host processes until
- * there is an incoming IRQ or FIQ to the VM.
+ * WFE: Yield the CPU and come back to this vcpu when the scheduler
+ * decides to.
+ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * world-switches and schedule other host processes until there is an
+ * incoming IRQ or FIQ to the VM.
  */
-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        trace_kvm_wfi(*vcpu_pc(vcpu));
-       kvm_vcpu_block(vcpu);
+       if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE)
+               kvm_vcpu_on_spin(vcpu);
+       else
+               kvm_vcpu_block(vcpu);
+
        return 1;
 }
 
 static exit_handle_fn arm_exit_handlers[] = {
-       [HSR_EC_WFI]            = kvm_handle_wfi,
+       [HSR_EC_WFI]            = kvm_handle_wfx,
        [HSR_EC_CP15_32]        = kvm_handle_cp15_32,
        [HSR_EC_CP15_64]        = kvm_handle_cp15_64,
        [HSR_EC_CP14_MR]        = kvm_handle_cp14_access,
index f048338135f7a5b20bf52371d03a3af0b922b68f..2cc14dfad04991fb2670e0c9469462b2951eacc9 100644 (file)
@@ -71,7 +71,7 @@ __do_hyp_init:
        bne     phase2                  @ Yes, second stage init
 
        @ Set the HTTBR to point to the hypervisor PGD pointer passed
-       mcrr    p15, 4, r2, r3, c2
+       mcrr    p15, 4, rr_lo_hi(r2, r3), c2
 
        @ Set the HTCR and VTCR to the same shareability and cacheability
        @ settings as the non-secure TTBCR and with T0SZ == 0.
@@ -137,12 +137,12 @@ phase2:
        mov     pc, r0
 
 target:        @ We're now in the trampoline code, switch page tables
-       mcrr    p15, 4, r2, r3, c2
+       mcrr    p15, 4, rr_lo_hi(r2, r3), c2
        isb
 
        @ Invalidate the old TLBs
        mcr     p15, 4, r0, c8, c7, 0   @ TLBIALLH
-       dsb
+       dsb     ish
 
        eret
 
index 16cd4ba5d7fd6d0ff0781da759712167cbc17508..01dcb0e752d9f04cbb1492378cc6382397c9c8e8 100644 (file)
@@ -52,10 +52,10 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
        dsb     ishst
        add     r0, r0, #KVM_VTTBR
        ldrd    r2, r3, [r0]
-       mcrr    p15, 6, r2, r3, c2      @ Write VTTBR
+       mcrr    p15, 6, rr_lo_hi(r2, r3), c2    @ Write VTTBR
        isb
        mcr     p15, 0, r0, c8, c3, 0   @ TLBIALLIS (rt ignored)
-       dsb
+       dsb     ish
        isb
        mov     r2, #0
        mov     r3, #0
@@ -79,7 +79,7 @@ ENTRY(__kvm_flush_vm_context)
        mcr     p15, 4, r0, c8, c3, 4
        /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
        mcr     p15, 0, r0, c7, c1, 0
-       dsb
+       dsb     ish
        isb                             @ Not necessary if followed by eret
 
        bx      lr
@@ -135,7 +135,7 @@ ENTRY(__kvm_vcpu_run)
        ldr     r1, [vcpu, #VCPU_KVM]
        add     r1, r1, #KVM_VTTBR
        ldrd    r2, r3, [r1]
-       mcrr    p15, 6, r2, r3, c2      @ Write VTTBR
+       mcrr    p15, 6, rr_lo_hi(r2, r3), c2    @ Write VTTBR
 
        @ We're all done, just restore the GPRs and go to the guest
        restore_guest_regs
@@ -199,8 +199,13 @@ after_vfp_restore:
 
        restore_host_regs
        clrex                           @ Clear exclusive monitor
+#ifndef CONFIG_CPU_ENDIAN_BE8
        mov     r0, r1                  @ Return the return code
        mov     r1, #0                  @ Clear upper bits in return value
+#else
+       @ r1 already has return code
+       mov     r0, #0                  @ Clear upper bits in return value
+#endif /* CONFIG_CPU_ENDIAN_BE8 */
        bx      lr                      @ return to IOCTL
 
 /********************************************************************
@@ -220,6 +225,10 @@ after_vfp_restore:
  * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
  * passed in r0 and r1.
  *
+ * A function pointer with a value of 0xffffffff has a special meaning,
+ * and is used to implement __hyp_get_vectors in the same way as in
+ * arch/arm/kernel/hyp_stub.S.
+ *
  * The calling convention follows the standard AAPCS:
  *   r0 - r3: caller save
  *   r12:     caller save
@@ -363,6 +372,11 @@ hyp_hvc:
 host_switch_to_hyp:
        pop     {r0, r1, r2}
 
+       /* Check for __hyp_get_vectors */
+       cmp     r0, #-1
+       mrceq   p15, 4, r0, c12, c0, 0  @ get HVBAR
+       beq     1f
+
        push    {lr}
        mrs     lr, SPSR
        push    {lr}
@@ -378,7 +392,7 @@ THUMB(      orr     lr, #1)
        pop     {lr}
        msr     SPSR_csxf, lr
        pop     {lr}
-       eret
+1:     eret
 
 guest_trap:
        load_vcpu                       @ Load VCPU pointer to r0
@@ -492,10 +506,10 @@ __kvm_hyp_code_end:
        .section ".rodata"
 
 und_die_str:
-       .ascii  "unexpected undefined exception in Hyp mode at: %#08x"
+       .ascii  "unexpected undefined exception in Hyp mode at: %#08x\n"
 pabt_die_str:
-       .ascii  "unexpected prefetch abort in Hyp mode at: %#08x"
+       .ascii  "unexpected prefetch abort in Hyp mode at: %#08x\n"
 dabt_die_str:
-       .ascii  "unexpected data abort in Hyp mode at: %#08x"
+       .ascii  "unexpected data abort in Hyp mode at: %#08x\n"
 svc_die_str:
-       .ascii  "unexpected HVC/SVC trap in Hyp mode at: %#08x"
+       .ascii  "unexpected HVC/SVC trap in Hyp mode at: %#08x\n"
index 6f18695a09cb5b50d8d849c9f2b486e8a01529c9..98c8c5b9a87f392a0410a1ca51481e93172bad8f 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/irqchip/arm-gic.h>
+#include <asm/assembler.h>
 
 #define VCPU_USR_REG(_reg_nr)  (VCPU_USR_REGS + (_reg_nr * 4))
 #define VCPU_USR_SP            (VCPU_USR_REG(13))
@@ -303,13 +304,17 @@ vcpu      .req    r0              @ vcpu pointer always in r0
 
        mrc     p15, 0, r2, c14, c1, 0  @ CNTKCTL
        mrrc    p15, 0, r4, r5, c7      @ PAR
+       mrc     p15, 0, r6, c10, c3, 0  @ AMAIR0
+       mrc     p15, 0, r7, c10, c3, 1  @ AMAIR1
 
        .if \store_to_vcpu == 0
-       push    {r2,r4-r5}
+       push    {r2,r4-r7}
        .else
        str     r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
        add     r12, vcpu, #CP15_OFFSET(c7_PAR)
        strd    r4, r5, [r12]
+       str     r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
+       str     r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
        .endif
 .endm
 
@@ -322,15 +327,19 @@ vcpu      .req    r0              @ vcpu pointer always in r0
  */
 .macro write_cp15_state read_from_vcpu
        .if \read_from_vcpu == 0
-       pop     {r2,r4-r5}
+       pop     {r2,r4-r7}
        .else
        ldr     r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
        add     r12, vcpu, #CP15_OFFSET(c7_PAR)
        ldrd    r4, r5, [r12]
+       ldr     r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
+       ldr     r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
        .endif
 
        mcr     p15, 0, r2, c14, c1, 0  @ CNTKCTL
        mcrr    p15, 0, r4, r5, c7      @ PAR
+       mcr     p15, 0, r6, c10, c3, 0  @ AMAIR0
+       mcr     p15, 0, r7, c10, c3, 1  @ AMAIR1
 
        .if \read_from_vcpu == 0
        pop     {r2-r12}
@@ -412,15 +421,23 @@ vcpu      .req    r0              @ vcpu pointer always in r0
        ldr     r8, [r2, #GICH_ELRSR0]
        ldr     r9, [r2, #GICH_ELRSR1]
        ldr     r10, [r2, #GICH_APR]
-
-       str     r3, [r11, #VGIC_CPU_HCR]
-       str     r4, [r11, #VGIC_CPU_VMCR]
-       str     r5, [r11, #VGIC_CPU_MISR]
-       str     r6, [r11, #VGIC_CPU_EISR]
-       str     r7, [r11, #(VGIC_CPU_EISR + 4)]
-       str     r8, [r11, #VGIC_CPU_ELRSR]
-       str     r9, [r11, #(VGIC_CPU_ELRSR + 4)]
-       str     r10, [r11, #VGIC_CPU_APR]
+ARM_BE8(rev    r3, r3  )
+ARM_BE8(rev    r4, r4  )
+ARM_BE8(rev    r5, r5  )
+ARM_BE8(rev    r6, r6  )
+ARM_BE8(rev    r7, r7  )
+ARM_BE8(rev    r8, r8  )
+ARM_BE8(rev    r9, r9  )
+ARM_BE8(rev    r10, r10        )
+
+       str     r3, [r11, #VGIC_V2_CPU_HCR]
+       str     r4, [r11, #VGIC_V2_CPU_VMCR]
+       str     r5, [r11, #VGIC_V2_CPU_MISR]
+       str     r6, [r11, #VGIC_V2_CPU_EISR]
+       str     r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
+       str     r8, [r11, #VGIC_V2_CPU_ELRSR]
+       str     r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
+       str     r10, [r11, #VGIC_V2_CPU_APR]
 
        /* Clear GICH_HCR */
        mov     r5, #0
@@ -428,9 +445,10 @@ vcpu       .req    r0              @ vcpu pointer always in r0
 
        /* Save list registers */
        add     r2, r2, #GICH_LR0
-       add     r3, r11, #VGIC_CPU_LR
+       add     r3, r11, #VGIC_V2_CPU_LR
        ldr     r4, [r11, #VGIC_CPU_NR_LR]
 1:     ldr     r6, [r2], #4
+ARM_BE8(rev    r6, r6  )
        str     r6, [r3], #4
        subs    r4, r4, #1
        bne     1b
@@ -455,9 +473,12 @@ vcpu       .req    r0              @ vcpu pointer always in r0
        add     r11, vcpu, #VCPU_VGIC_CPU
 
        /* We only restore a minimal set of registers */
-       ldr     r3, [r11, #VGIC_CPU_HCR]
-       ldr     r4, [r11, #VGIC_CPU_VMCR]
-       ldr     r8, [r11, #VGIC_CPU_APR]
+       ldr     r3, [r11, #VGIC_V2_CPU_HCR]
+       ldr     r4, [r11, #VGIC_V2_CPU_VMCR]
+       ldr     r8, [r11, #VGIC_V2_CPU_APR]
+ARM_BE8(rev    r3, r3  )
+ARM_BE8(rev    r4, r4  )
+ARM_BE8(rev    r8, r8  )
 
        str     r3, [r2, #GICH_HCR]
        str     r4, [r2, #GICH_VMCR]
@@ -465,9 +486,10 @@ vcpu       .req    r0              @ vcpu pointer always in r0
 
        /* Restore list registers */
        add     r2, r2, #GICH_LR0
-       add     r3, r11, #VGIC_CPU_LR
+       add     r3, r11, #VGIC_V2_CPU_LR
        ldr     r4, [r11, #VGIC_CPU_NR_LR]
 1:     ldr     r6, [r3], #4
+ARM_BE8(rev    r6, r6  )
        str     r6, [r2], #4
        subs    r4, r4, #1
        bne     1b
@@ -498,7 +520,7 @@ vcpu        .req    r0              @ vcpu pointer always in r0
        mcr     p15, 0, r2, c14, c3, 1  @ CNTV_CTL
        isb
 
-       mrrc    p15, 3, r2, r3, c14     @ CNTV_CVAL
+       mrrc    p15, 3, rr_lo_hi(r2, r3), c14   @ CNTV_CVAL
        ldr     r4, =VCPU_TIMER_CNTV_CVAL
        add     r5, vcpu, r4
        strd    r2, r3, [r5]
@@ -538,12 +560,12 @@ vcpu      .req    r0              @ vcpu pointer always in r0
 
        ldr     r2, [r4, #KVM_TIMER_CNTVOFF]
        ldr     r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
-       mcrr    p15, 4, r2, r3, c14     @ CNTVOFF
+       mcrr    p15, 4, rr_lo_hi(r2, r3), c14   @ CNTVOFF
 
        ldr     r4, =VCPU_TIMER_CNTV_CVAL
        add     r5, vcpu, r4
        ldrd    r2, r3, [r5]
-       mcrr    p15, 3, r2, r3, c14     @ CNTV_CVAL
+       mcrr    p15, 3, rr_lo_hi(r2, r3), c14   @ CNTV_CVAL
        isb
 
        ldr     r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
@@ -597,17 +619,14 @@ vcpu      .req    r0              @ vcpu pointer always in r0
 
 /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
 .macro configure_hyp_role operation
-       mrc     p15, 4, r2, c1, c1, 0   @ HCR
-       bic     r2, r2, #HCR_VIRT_EXCP_MASK
-       ldr     r3, =HCR_GUEST_MASK
        .if \operation == vmentry
-       orr     r2, r2, r3
+       ldr     r2, [vcpu, #VCPU_HCR]
        ldr     r3, [vcpu, #VCPU_IRQ_LINES]
        orr     r2, r2, r3
        .else
-       bic     r2, r2, r3
+       mov     r2, #0
        .endif
-       mcr     p15, 4, r2, c1, c1, 0
+       mcr     p15, 4, r2, c1, c1, 0   @ HCR
 .endm
 
 .macro load_vcpu
index 72a12f2171b26bba2937e3c288f4bd51f1b5540b..4cb5a93182e9283f78f5ddd7c6b51c38a51c783b 100644 (file)
 
 #include "trace.h"
 
+static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
+{
+       void *datap = NULL;
+       union {
+               u8      byte;
+               u16     hword;
+               u32     word;
+               u64     dword;
+       } tmp;
+
+       switch (len) {
+       case 1:
+               tmp.byte        = data;
+               datap           = &tmp.byte;
+               break;
+       case 2:
+               tmp.hword       = data;
+               datap           = &tmp.hword;
+               break;
+       case 4:
+               tmp.word        = data;
+               datap           = &tmp.word;
+               break;
+       case 8:
+               tmp.dword       = data;
+               datap           = &tmp.dword;
+               break;
+       }
+
+       memcpy(buf, datap, len);
+}
+
+static unsigned long mmio_read_buf(char *buf, unsigned int len)
+{
+       unsigned long data = 0;
+       union {
+               u16     hword;
+               u32     word;
+               u64     dword;
+       } tmp;
+
+       switch (len) {
+       case 1:
+               data = buf[0];
+               break;
+       case 2:
+               memcpy(&tmp.hword, buf, len);
+               data = tmp.hword;
+               break;
+       case 4:
+               memcpy(&tmp.word, buf, len);
+               data = tmp.word;
+               break;
+       case 8:
+               memcpy(&tmp.dword, buf, len);
+               data = tmp.dword;
+               break;
+       }
+
+       return data;
+}
+
 /**
  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
  * @vcpu: The VCPU pointer
  */
 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-       unsigned long *dest;
+       unsigned long data;
        unsigned int len;
        int mask;
 
        if (!run->mmio.is_write) {
-               dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
-               *dest = 0;
-
                len = run->mmio.len;
                if (len > sizeof(unsigned long))
                        return -EINVAL;
 
-               memcpy(dest, run->mmio.data, len);
-
-               trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-                               *((u64 *)run->mmio.data));
+               data = mmio_read_buf(run->mmio.data, len);
 
                if (vcpu->arch.mmio_decode.sign_extend &&
                    len < sizeof(unsigned long)) {
                        mask = 1U << ((len * 8) - 1);
-                       *dest = (*dest ^ mask) - mask;
+                       data = (data ^ mask) - mask;
                }
+
+               trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+                              data);
+               data = vcpu_data_host_to_guest(vcpu, data, len);
+               *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
        }
 
        return 0;
@@ -63,7 +124,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                      struct kvm_exit_mmio *mmio)
 {
-       unsigned long rt, len;
+       unsigned long rt;
+       int len;
        bool is_write, sign_extend;
 
        if (kvm_vcpu_dabt_isextabt(vcpu)) {
@@ -86,12 +148,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        sign_extend = kvm_vcpu_dabt_issext(vcpu);
        rt = kvm_vcpu_dabt_get_rd(vcpu);
 
-       if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
-               /* IO memory trying to read/write pc */
-               kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-               return 1;
-       }
-
        mmio->is_write = is_write;
        mmio->phys_addr = fault_ipa;
        mmio->len = len;
@@ -110,6 +166,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
                 phys_addr_t fault_ipa)
 {
        struct kvm_exit_mmio mmio;
+       unsigned long data;
        unsigned long rt;
        int ret;
 
@@ -130,13 +187,15 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
        }
 
        rt = vcpu->arch.mmio_decode.rt;
+       data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);
+
        trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
                                         KVM_TRACE_MMIO_READ_UNSATISFIED,
                        mmio.len, fault_ipa,
-                       (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
+                       (mmio.is_write) ? data : 0);
 
        if (mmio.is_write)
-               memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
+               mmio_write_buf(mmio.data, mmio.len, data);
 
        if (vgic_handle_mmio(vcpu, run, &mmio))
                return 1;
index e04613906f1bbcee598feaab440da1e7e0452ed7..eea03069161b4b4c2824bd0908edac065f0959a8 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mman.h>
 #include <linux/kvm_host.h>
 #include <linux/io.h>
+#include <linux/hugetlb.h>
 #include <trace/events/kvm.h>
 #include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
@@ -41,6 +42,10 @@ static unsigned long hyp_idmap_start;
 static unsigned long hyp_idmap_end;
 static phys_addr_t hyp_idmap_vector;
 
+#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
+
+#define kvm_pmd_huge(_x)       (pmd_huge(_x) || pmd_trans_huge(_x))
+
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
        /*
@@ -85,9 +90,19 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
        return p;
 }
 
+static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
+{
+       pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
+       pgd_clear(pgd);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
+       pud_free(NULL, pud_table);
+       put_page(virt_to_page(pgd));
+}
+
 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
 {
        pmd_t *pmd_table = pmd_offset(pud, 0);
+       VM_BUG_ON(pud_huge(*pud));
        pud_clear(pud);
        kvm_tlb_flush_vmid_ipa(kvm, addr);
        pmd_free(NULL, pmd_table);
@@ -97,73 +112,186 @@ static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
 {
        pte_t *pte_table = pte_offset_kernel(pmd, 0);
+       VM_BUG_ON(kvm_pmd_huge(*pmd));
        pmd_clear(pmd);
        kvm_tlb_flush_vmid_ipa(kvm, addr);
        pte_free_kernel(NULL, pte_table);
        put_page(virt_to_page(pmd));
 }
 
-static bool pmd_empty(pmd_t *pmd)
+static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
+                      phys_addr_t addr, phys_addr_t end)
 {
-       struct page *pmd_page = virt_to_page(pmd);
-       return page_count(pmd_page) == 1;
+       phys_addr_t start_addr = addr;
+       pte_t *pte, *start_pte;
+
+       start_pte = pte = pte_offset_kernel(pmd, addr);
+       do {
+               if (!pte_none(*pte)) {
+                       kvm_set_pte(pte, __pte(0));
+                       put_page(virt_to_page(pte));
+                       kvm_tlb_flush_vmid_ipa(kvm, addr);
+               }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       if (kvm_pte_table_empty(start_pte))
+               clear_pmd_entry(kvm, pmd, start_addr);
 }
 
-static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
+static void unmap_pmds(struct kvm *kvm, pud_t *pud,
+                      phys_addr_t addr, phys_addr_t end)
 {
-       if (pte_present(*pte)) {
-               kvm_set_pte(pte, __pte(0));
-               put_page(virt_to_page(pte));
-               kvm_tlb_flush_vmid_ipa(kvm, addr);
-       }
+       phys_addr_t next, start_addr = addr;
+       pmd_t *pmd, *start_pmd;
+
+       start_pmd = pmd = pmd_offset(pud, addr);
+       do {
+               next = kvm_pmd_addr_end(addr, end);
+               if (!pmd_none(*pmd)) {
+                       if (kvm_pmd_huge(*pmd)) {
+                               pmd_clear(pmd);
+                               kvm_tlb_flush_vmid_ipa(kvm, addr);
+                               put_page(virt_to_page(pmd));
+                       } else {
+                               unmap_ptes(kvm, pmd, addr, next);
+                       }
+               }
+       } while (pmd++, addr = next, addr != end);
+
+       if (kvm_pmd_table_empty(start_pmd))
+               clear_pud_entry(kvm, pud, start_addr);
 }
 
-static bool pte_empty(pte_t *pte)
+static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
+                      phys_addr_t addr, phys_addr_t end)
 {
-       struct page *pte_page = virt_to_page(pte);
-       return page_count(pte_page) == 1;
+       phys_addr_t next, start_addr = addr;
+       pud_t *pud, *start_pud;
+
+       start_pud = pud = pud_offset(pgd, addr);
+       do {
+               next = kvm_pud_addr_end(addr, end);
+               if (!pud_none(*pud)) {
+                       if (pud_huge(*pud)) {
+                               pud_clear(pud);
+                               kvm_tlb_flush_vmid_ipa(kvm, addr);
+                               put_page(virt_to_page(pud));
+                       } else {
+                               unmap_pmds(kvm, pud, addr, next);
+                       }
+               }
+       } while (pud++, addr = next, addr != end);
+
+       if (kvm_pud_table_empty(start_pud))
+               clear_pgd_entry(kvm, pgd, start_addr);
 }
 
+
 static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
-                       unsigned long long start, u64 size)
+                       phys_addr_t start, u64 size)
 {
        pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
+       phys_addr_t addr = start, end = start + size;
+       phys_addr_t next;
+
+       pgd = pgdp + pgd_index(addr);
+       do {
+               next = kvm_pgd_addr_end(addr, end);
+               unmap_puds(kvm, pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
+static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
+                             phys_addr_t addr, phys_addr_t end)
+{
        pte_t *pte;
-       unsigned long long addr = start, end = start + size;
-       u64 range;
 
-       while (addr < end) {
-               pgd = pgdp + pgd_index(addr);
-               pud = pud_offset(pgd, addr);
-               if (pud_none(*pud)) {
-                       addr += PUD_SIZE;
-                       continue;
+       pte = pte_offset_kernel(pmd, addr);
+       do {
+               if (!pte_none(*pte)) {
+                       hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
+                       kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
                }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+}
 
-               pmd = pmd_offset(pud, addr);
-               if (pmd_none(*pmd)) {
-                       addr += PMD_SIZE;
-                       continue;
+static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pmd_t *pmd;
+       phys_addr_t next;
+
+       pmd = pmd_offset(pud, addr);
+       do {
+               next = kvm_pmd_addr_end(addr, end);
+               if (!pmd_none(*pmd)) {
+                       if (kvm_pmd_huge(*pmd)) {
+                               hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
+                               kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
+                       } else {
+                               stage2_flush_ptes(kvm, pmd, addr, next);
+                       }
                }
+       } while (pmd++, addr = next, addr != end);
+}
 
-               pte = pte_offset_kernel(pmd, addr);
-               clear_pte_entry(kvm, pte, addr);
-               range = PAGE_SIZE;
-
-               /* If we emptied the pte, walk back up the ladder */
-               if (pte_empty(pte)) {
-                       clear_pmd_entry(kvm, pmd, addr);
-                       range = PMD_SIZE;
-                       if (pmd_empty(pmd)) {
-                               clear_pud_entry(kvm, pud, addr);
-                               range = PUD_SIZE;
+static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pud_t *pud;
+       phys_addr_t next;
+
+       pud = pud_offset(pgd, addr);
+       do {
+               next = kvm_pud_addr_end(addr, end);
+               if (!pud_none(*pud)) {
+                       if (pud_huge(*pud)) {
+                               hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
+                               kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
+                       } else {
+                               stage2_flush_pmds(kvm, pud, addr, next);
                        }
                }
+       } while (pud++, addr = next, addr != end);
+}
 
-               addr += range;
-       }
+static void stage2_flush_memslot(struct kvm *kvm,
+                                struct kvm_memory_slot *memslot)
+{
+       phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
+       phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+       phys_addr_t next;
+       pgd_t *pgd;
+
+       pgd = kvm->arch.pgd + pgd_index(addr);
+       do {
+               next = kvm_pgd_addr_end(addr, end);
+               stage2_flush_puds(kvm, pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
+ * @kvm: The struct kvm pointer
+ *
+ * Go through the stage 2 page tables and invalidate any cache lines
+ * backing memory already mapped to the VM.
+ */
+void stage2_flush_vm(struct kvm *kvm)
+{
+       struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
+       int idx;
+
+       idx = srcu_read_lock(&kvm->srcu);
+       spin_lock(&kvm->mmu_lock);
+
+       slots = kvm_memslots(kvm);
+       kvm_for_each_memslot(memslot, slots)
+               stage2_flush_memslot(kvm, memslot);
+
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
 }
 
 /**
@@ -178,14 +306,14 @@ void free_boot_hyp_pgd(void)
        if (boot_hyp_pgd) {
                unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
                unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
-               kfree(boot_hyp_pgd);
+               free_pages((unsigned long)boot_hyp_pgd, pgd_order);
                boot_hyp_pgd = NULL;
        }
 
        if (hyp_pgd)
                unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 
-       kfree(init_bounce_page);
+       free_page((unsigned long)init_bounce_page);
        init_bounce_page = NULL;
 
        mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -215,7 +343,7 @@ void free_hyp_pgds(void)
                for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
                        unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
 
-               kfree(hyp_pgd);
+               free_pages((unsigned long)hyp_pgd, pgd_order);
                hyp_pgd = NULL;
        }
 
@@ -404,9 +532,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
        if (!pgd)
                return -ENOMEM;
 
-       /* stage-2 pgd must be aligned to its size */
-       VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
-
        memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
        kvm_clean_pgd(pgd);
        kvm->arch.pgd = pgd;
@@ -451,29 +576,71 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
        kvm->arch.pgd = NULL;
 }
 
-
-static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
-                         phys_addr_t addr, const pte_t *new_pte, bool iomap)
+static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+                            phys_addr_t addr)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
-       pte_t *pte, old_pte;
 
-       /* Create 2nd stage page table mapping - Level 1 */
        pgd = kvm->arch.pgd + pgd_index(addr);
        pud = pud_offset(pgd, addr);
        if (pud_none(*pud)) {
                if (!cache)
-                       return 0; /* ignore calls from kvm_set_spte_hva */
+                       return NULL;
                pmd = mmu_memory_cache_alloc(cache);
                pud_populate(NULL, pud, pmd);
                get_page(virt_to_page(pud));
        }
 
-       pmd = pmd_offset(pud, addr);
+       return pmd_offset(pud, addr);
+}
+
+static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
+                              *cache, phys_addr_t addr, const pmd_t *new_pmd)
+{
+       pmd_t *pmd, old_pmd;
 
-       /* Create 2nd stage page table mapping - Level 2 */
+       pmd = stage2_get_pmd(kvm, cache, addr);
+       VM_BUG_ON(!pmd);
+
+       /*
+        * Mapping in huge pages should only happen through a fault.  If a
+        * page is merged into a transparent huge page, the individual
+        * subpages of that huge page should be unmapped through MMU
+        * notifiers before we get here.
+        *
+        * Merging of CompoundPages is not supported; they should become
+        * splitting first, unmapped, merged, and mapped back in on-demand.
+        */
+       VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
+
+       old_pmd = *pmd;
+       kvm_set_pmd(pmd, *new_pmd);
+       if (pmd_present(old_pmd))
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
+       else
+               get_page(virt_to_page(pmd));
+       return 0;
+}
+
+static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+                         phys_addr_t addr, const pte_t *new_pte, bool iomap)
+{
+       pmd_t *pmd;
+       pte_t *pte, old_pte;
+
+       /* Create stage-2 page table mapping - Level 1 */
+       pmd = stage2_get_pmd(kvm, cache, addr);
+       if (!pmd) {
+               /*
+                * Ignore calls from kvm_set_spte_hva for unallocated
+                * address ranges.
+                */
+               return 0;
+       }
+
+       /* Create stage-2 page mappings - Level 2 */
        if (pmd_none(*pmd)) {
                if (!cache)
                        return 0; /* ignore calls from kvm_set_spte_hva */
@@ -520,7 +687,6 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 
        for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
                pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
-               kvm_set_s2pte_writable(&pte);
 
                ret = mmu_topup_memory_cache(&cache, 2, 2);
                if (ret)
@@ -539,23 +705,97 @@ out:
        return ret;
 }
 
+static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
+{
+       pfn_t pfn = *pfnp;
+       gfn_t gfn = *ipap >> PAGE_SHIFT;
+
+       if (PageTransCompound(pfn_to_page(pfn))) {
+               unsigned long mask;
+               /*
+                * The address we faulted on is backed by a transparent huge
+                * page.  However, because we map the compound huge page and
+                * not the individual tail page, we need to transfer the
+                * refcount to the head page.  We have to be careful that the
+                * THP doesn't start to split while we are adjusting the
+                * refcounts.
+                *
+                * We are sure this doesn't happen, because mmu_notifier_retry
+                * was successful and we are holding the mmu_lock, so if this
+                * THP is trying to split, it will be blocked in the mmu
+                * notifier before touching any of the pages, specifically
+                * before being able to call __split_huge_page_refcount().
+                *
+                * We can therefore safely transfer the refcount from PG_tail
+                * to PG_head and switch the pfn from a tail page to the head
+                * page accordingly.
+                */
+               mask = PTRS_PER_PMD - 1;
+               VM_BUG_ON((gfn & mask) != (pfn & mask));
+               if (pfn & mask) {
+                       *ipap &= PMD_MASK;
+                       kvm_release_pfn_clean(pfn);
+                       pfn &= ~mask;
+                       kvm_get_pfn(pfn);
+                       *pfnp = pfn;
+               }
+
+               return true;
+       }
+
+       return false;
+}
+
+static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+{
+       if (kvm_vcpu_trap_is_iabt(vcpu))
+               return false;
+
+       return kvm_vcpu_dabt_iswrite(vcpu);
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-                         gfn_t gfn, struct kvm_memory_slot *memslot,
+                         struct kvm_memory_slot *memslot, unsigned long hva,
                          unsigned long fault_status)
 {
-       pte_t new_pte;
-       pfn_t pfn;
        int ret;
-       bool write_fault, writable;
+       bool write_fault, writable, hugetlb = false, force_pte = false;
        unsigned long mmu_seq;
+       gfn_t gfn = fault_ipa >> PAGE_SHIFT;
+       struct kvm *kvm = vcpu->kvm;
        struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+       struct vm_area_struct *vma;
+       pfn_t pfn;
+       pgprot_t mem_type = PAGE_S2;
 
-       write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
+       write_fault = kvm_is_write_fault(vcpu);
        if (fault_status == FSC_PERM && !write_fault) {
                kvm_err("Unexpected L2 read permission error\n");
                return -EFAULT;
        }
 
+       /* Let's check if we will get back a huge page backed by hugetlbfs */
+       down_read(&current->mm->mmap_sem);
+       vma = find_vma_intersection(current->mm, hva, hva + 1);
+       if (is_vm_hugetlb_page(vma)) {
+               hugetlb = true;
+               gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
+       } else {
+               /*
+                * Pages belonging to memslots that don't have the same
+                * alignment for userspace and IPA cannot be mapped using
+                * block descriptors even if the pages belong to a THP for
+                * the process, because the stage-2 block descriptor will
+                * cover more than a single THP and we loose atomicity for
+                * unmapping, updates, and splits of the THP or other pages
+                * in the stage-2 block range.
+                */
+               if ((memslot->userspace_addr & ~PMD_MASK) !=
+                   ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
+                       force_pte = true;
+       }
+       up_read(&current->mm->mmap_sem);
+
        /* We need minimum second+third level pages */
        ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
        if (ret)
@@ -573,26 +813,44 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         */
        smp_rmb();
 
-       pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
+       pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
        if (is_error_pfn(pfn))
                return -EFAULT;
 
-       new_pte = pfn_pte(pfn, PAGE_S2);
-       coherent_icache_guest_page(vcpu->kvm, gfn);
+       if (kvm_is_mmio_pfn(pfn))
+               mem_type = PAGE_S2_DEVICE;
 
-       spin_lock(&vcpu->kvm->mmu_lock);
-       if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+       spin_lock(&kvm->mmu_lock);
+       if (mmu_notifier_retry(kvm, mmu_seq))
                goto out_unlock;
-       if (writable) {
-               kvm_set_s2pte_writable(&new_pte);
-               kvm_set_pfn_dirty(pfn);
+       if (!hugetlb && !force_pte)
+               hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
+
+       if (hugetlb) {
+               pmd_t new_pmd = pfn_pmd(pfn, mem_type);
+               new_pmd = pmd_mkhuge(new_pmd);
+               if (writable) {
+                       kvm_set_s2pmd_writable(&new_pmd);
+                       kvm_set_pfn_dirty(pfn);
+               }
+               coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
+               ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
+       } else {
+               pte_t new_pte = pfn_pte(pfn, mem_type);
+               if (writable) {
+                       kvm_set_s2pte_writable(&new_pte);
+                       kvm_set_pfn_dirty(pfn);
+               }
+               coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
+               ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
+                                    mem_type == PAGE_S2_DEVICE);
        }
-       stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
+
 
 out_unlock:
-       spin_unlock(&vcpu->kvm->mmu_lock);
+       spin_unlock(&kvm->mmu_lock);
        kvm_release_pfn_clean(pfn);
-       return 0;
+       return ret;
 }
 
 /**
@@ -612,7 +870,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
        unsigned long fault_status;
        phys_addr_t fault_ipa;
        struct kvm_memory_slot *memslot;
-       bool is_iabt;
+       unsigned long hva;
+       bool is_iabt, write_fault, writable;
        gfn_t gfn;
        int ret, idx;
 
@@ -623,17 +882,22 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
                              kvm_vcpu_get_hfar(vcpu), fault_ipa);
 
        /* Check the stage-2 fault is trans. fault or write fault */
-       fault_status = kvm_vcpu_trap_get_fault(vcpu);
+       fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
        if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
-               kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
-                       kvm_vcpu_trap_get_class(vcpu), fault_status);
+               kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
+                       kvm_vcpu_trap_get_class(vcpu),
+                       (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
+                       (unsigned long)kvm_vcpu_get_hsr(vcpu));
                return -EFAULT;
        }
 
        idx = srcu_read_lock(&vcpu->kvm->srcu);
 
        gfn = fault_ipa >> PAGE_SHIFT;
-       if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+       memslot = gfn_to_memslot(vcpu->kvm, gfn);
+       hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
+       write_fault = kvm_is_write_fault(vcpu);
+       if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
                if (is_iabt) {
                        /* Prefetch Abort on I/O address */
                        kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
@@ -641,13 +905,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
                        goto out_unlock;
                }
 
-               if (fault_status != FSC_FAULT) {
-                       kvm_err("Unsupported fault status on io memory: %#lx\n",
-                               fault_status);
-                       ret = -EFAULT;
-                       goto out_unlock;
-               }
-
                /*
                 * The IPA is reported as [MAX:12], so we need to
                 * complement it with the bottom 12 bits from the
@@ -659,9 +916,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
                goto out_unlock;
        }
 
-       memslot = gfn_to_memslot(vcpu->kvm, gfn);
-
-       ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
+       ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
        if (ret == 0)
                ret = 1;
 out_unlock:
@@ -779,9 +1034,9 @@ int kvm_mmu_init(void)
 {
        int err;
 
-       hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
-       hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
-       hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
+       hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
+       hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
+       hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
 
        if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
                /*
@@ -791,7 +1046,7 @@ int kvm_mmu_init(void)
                size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
                phys_addr_t phys_base;
 
-               init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
                if (!init_bounce_page) {
                        kvm_err("Couldn't allocate HYP init bounce page\n");
                        err = -ENOMEM;
@@ -808,7 +1063,7 @@ int kvm_mmu_init(void)
                 */
                kvm_flush_dcache_to_poc(init_bounce_page, len);
 
-               phys_base = virt_to_phys(init_bounce_page);
+               phys_base = kvm_virt_to_phys(init_bounce_page);
                hyp_idmap_vector += phys_base - hyp_idmap_start;
                hyp_idmap_start = phys_base;
                hyp_idmap_end = phys_base + len;
@@ -817,8 +1072,9 @@ int kvm_mmu_init(void)
                         (unsigned long)phys_base);
        }
 
-       hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
-       boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+       hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+       boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+
        if (!hyp_pgd || !boot_hyp_pgd) {
                kvm_err("Hyp mode PGD not allocated\n");
                err = -ENOMEM;
@@ -864,3 +1120,49 @@ out:
        free_hyp_pgds();
        return err;
 }
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                  struct kvm_userspace_memory_region *mem,
+                                  const struct kvm_memory_slot *old,
+                                  enum kvm_mr_change change)
+{
+       gpa_t gpa = old->base_gfn << PAGE_SHIFT;
+       phys_addr_t size = old->npages << PAGE_SHIFT;
+       if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
+               spin_lock(&kvm->mmu_lock);
+               unmap_stage2_range(kvm, gpa, size);
+               spin_unlock(&kvm->mmu_lock);
+       }
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                  struct kvm_memory_slot *memslot,
+                                  struct kvm_userspace_memory_region *mem,
+                                  enum kvm_mr_change change)
+{
+       return 0;
+}
+
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages)
+{
+       return 0;
+}
+
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+}
index 7ee5bb7a3667d8227a2d87b3e62239a4854cd933..09cf37737ee2ad24bda1251541689ea2f8bdb535 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kvm_host.h>
 #include <linux/wait.h>
 
+#include <asm/cputype.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_psci.h>
 
  * as described in ARM document number ARM DEN 0022A.
  */
 
+#define AFFINITY_MASK(level)   ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
+
+static unsigned long psci_affinity_mask(unsigned long affinity_level)
+{
+       if (affinity_level <= 3)
+               return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
+
+       return 0;
+}
+
+static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
+{
+       /*
+        * NOTE: For simplicity, we make VCPU suspend emulation to be
+        * same-as WFI (Wait-for-interrupt) emulation.
+        *
+        * This means for KVM the wakeup events are interrupts and
+        * this is consistent with intended use of StateID as described
+        * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
+        *
+        * Further, we also treat power-down request to be same as
+        * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
+        * specification (ARM DEN 0022A). This means all suspend states
+        * for KVM will preserve the register state.
+        */
+       kvm_vcpu_block(vcpu);
+
+       return PSCI_RET_SUCCESS;
+}
+
 static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.pause = true;
@@ -34,25 +65,41 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
 {
        struct kvm *kvm = source_vcpu->kvm;
-       struct kvm_vcpu *vcpu;
+       struct kvm_vcpu *vcpu = NULL, *tmp;
        wait_queue_head_t *wq;
        unsigned long cpu_id;
+       unsigned long context_id;
+       unsigned long mpidr;
        phys_addr_t target_pc;
+       int i;
 
        cpu_id = *vcpu_reg(source_vcpu, 1);
        if (vcpu_mode_is_32bit(source_vcpu))
                cpu_id &= ~((u32) 0);
 
-       if (cpu_id >= atomic_read(&kvm->online_vcpus))
-               return KVM_PSCI_RET_INVAL;
-
-       target_pc = *vcpu_reg(source_vcpu, 2);
+       kvm_for_each_vcpu(i, tmp, kvm) {
+               mpidr = kvm_vcpu_get_mpidr(tmp);
+               if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
+                       vcpu = tmp;
+                       break;
+               }
+       }
 
-       vcpu = kvm_get_vcpu(kvm, cpu_id);
+       /*
+        * Make sure the caller requested a valid CPU and that the CPU is
+        * turned off.
+        */
+       if (!vcpu)
+               return PSCI_RET_INVALID_PARAMS;
+       if (!vcpu->arch.pause) {
+               if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
+                       return PSCI_RET_ALREADY_ON;
+               else
+                       return PSCI_RET_INVALID_PARAMS;
+       }
 
-       wq = kvm_arch_vcpu_wq(vcpu);
-       if (!waitqueue_active(wq))
-               return KVM_PSCI_RET_INVAL;
+       target_pc = *vcpu_reg(source_vcpu, 2);
+       context_id = *vcpu_reg(source_vcpu, 3);
 
        kvm_reset_vcpu(vcpu);
 
@@ -62,26 +109,165 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
                vcpu_set_thumb(vcpu);
        }
 
+       /* Propagate caller endianness */
+       if (kvm_vcpu_is_be(source_vcpu))
+               kvm_vcpu_set_be(vcpu);
+
        *vcpu_pc(vcpu) = target_pc;
+       /*
+        * NOTE: We always update r0 (or x0) because for PSCI v0.1
+        * the general puspose registers are undefined upon CPU_ON.
+        */
+       *vcpu_reg(vcpu, 0) = context_id;
        vcpu->arch.pause = false;
        smp_mb();               /* Make sure the above is visible */
 
+       wq = kvm_arch_vcpu_wq(vcpu);
        wake_up_interruptible(wq);
 
-       return KVM_PSCI_RET_SUCCESS;
+       return PSCI_RET_SUCCESS;
 }
 
-/**
- * kvm_psci_call - handle PSCI call if r0 value is in range
- * @vcpu: Pointer to the VCPU struct
- *
- * Handle PSCI calls from guests through traps from HVC or SMC instructions.
- * The calling convention is similar to SMC calls to the secure world where
- * the function number is placed in r0 and this function returns true if the
- * function number specified in r0 is withing the PSCI range, and false
- * otherwise.
- */
-bool kvm_psci_call(struct kvm_vcpu *vcpu)
+static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
+{
+       int i;
+       unsigned long mpidr;
+       unsigned long target_affinity;
+       unsigned long target_affinity_mask;
+       unsigned long lowest_affinity_level;
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_vcpu *tmp;
+
+       target_affinity = *vcpu_reg(vcpu, 1);
+       lowest_affinity_level = *vcpu_reg(vcpu, 2);
+
+       /* Determine target affinity mask */
+       target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
+       if (!target_affinity_mask)
+               return PSCI_RET_INVALID_PARAMS;
+
+       /* Ignore other bits of target affinity */
+       target_affinity &= target_affinity_mask;
+
+       /*
+        * If one or more VCPU matching target affinity are running
+        * then ON else OFF
+        */
+       kvm_for_each_vcpu(i, tmp, kvm) {
+               mpidr = kvm_vcpu_get_mpidr(tmp);
+               if (((mpidr & target_affinity_mask) == target_affinity) &&
+                   !tmp->arch.pause) {
+                       return PSCI_0_2_AFFINITY_LEVEL_ON;
+               }
+       }
+
+       return PSCI_0_2_AFFINITY_LEVEL_OFF;
+}
+
+static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
+{
+       memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
+       vcpu->run->system_event.type = type;
+       vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+}
+
+static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
+{
+       kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
+}
+
+static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
+{
+       kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
+}
+
+int kvm_psci_version(struct kvm_vcpu *vcpu)
+{
+       if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+               return KVM_ARM_PSCI_0_2;
+
+       return KVM_ARM_PSCI_0_1;
+}
+
+static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+{
+       int ret = 1;
+       unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
+       unsigned long val;
+
+       switch (psci_fn) {
+       case PSCI_0_2_FN_PSCI_VERSION:
+               /*
+                * Bits[31:16] = Major Version = 0
+                * Bits[15:0] = Minor Version = 2
+                */
+               val = 2;
+               break;
+       case PSCI_0_2_FN_CPU_SUSPEND:
+       case PSCI_0_2_FN64_CPU_SUSPEND:
+               val = kvm_psci_vcpu_suspend(vcpu);
+               break;
+       case PSCI_0_2_FN_CPU_OFF:
+               kvm_psci_vcpu_off(vcpu);
+               val = PSCI_RET_SUCCESS;
+               break;
+       case PSCI_0_2_FN_CPU_ON:
+       case PSCI_0_2_FN64_CPU_ON:
+               val = kvm_psci_vcpu_on(vcpu);
+               break;
+       case PSCI_0_2_FN_AFFINITY_INFO:
+       case PSCI_0_2_FN64_AFFINITY_INFO:
+               val = kvm_psci_vcpu_affinity_info(vcpu);
+               break;
+       case PSCI_0_2_FN_MIGRATE:
+       case PSCI_0_2_FN64_MIGRATE:
+               val = PSCI_RET_NOT_SUPPORTED;
+               break;
+       case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+               /*
+                * Trusted OS is MP hence does not require migration
+                * or
+                * Trusted OS is not present
+                */
+               val = PSCI_0_2_TOS_MP;
+               break;
+       case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
+       case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
+               val = PSCI_RET_NOT_SUPPORTED;
+               break;
+       case PSCI_0_2_FN_SYSTEM_OFF:
+               kvm_psci_system_off(vcpu);
+               /*
+                * We should'nt be going back to guest VCPU after
+                * receiving SYSTEM_OFF request.
+                *
+                * If user space accidently/deliberately resumes
+                * guest VCPU after SYSTEM_OFF request then guest
+                * VCPU should see internal failure from PSCI return
+                * value. To achieve this, we preload r0 (or x0) with
+                * PSCI return value INTERNAL_FAILURE.
+                */
+               val = PSCI_RET_INTERNAL_FAILURE;
+               ret = 0;
+               break;
+       case PSCI_0_2_FN_SYSTEM_RESET:
+               kvm_psci_system_reset(vcpu);
+               /*
+                * Same reason as SYSTEM_OFF for preloading r0 (or x0)
+                * with PSCI return value INTERNAL_FAILURE.
+                */
+               val = PSCI_RET_INTERNAL_FAILURE;
+               ret = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       *vcpu_reg(vcpu, 0) = val;
+       return ret;
+}
+
+static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 {
        unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
        unsigned long val;
@@ -89,20 +275,45 @@ bool kvm_psci_call(struct kvm_vcpu *vcpu)
        switch (psci_fn) {
        case KVM_PSCI_FN_CPU_OFF:
                kvm_psci_vcpu_off(vcpu);
-               val = KVM_PSCI_RET_SUCCESS;
+               val = PSCI_RET_SUCCESS;
                break;
        case KVM_PSCI_FN_CPU_ON:
                val = kvm_psci_vcpu_on(vcpu);
                break;
        case KVM_PSCI_FN_CPU_SUSPEND:
        case KVM_PSCI_FN_MIGRATE:
-               val = KVM_PSCI_RET_NI;
+               val = PSCI_RET_NOT_SUPPORTED;
                break;
-
        default:
-               return false;
+               return -EINVAL;
        }
 
        *vcpu_reg(vcpu, 0) = val;
-       return true;
+       return 1;
+}
+
+/**
+ * kvm_psci_call - handle PSCI call if r0 value is in range
+ * @vcpu: Pointer to the VCPU struct
+ *
+ * Handle PSCI calls from guests through traps from HVC instructions.
+ * The calling convention is similar to SMC calls to the secure world
+ * where the function number is placed in r0.
+ *
+ * This function returns: > 0 (success), 0 (success but exit to user
+ * space), and < 0 (errors)
+ *
+ * Errors:
+ * -EINVAL: Unrecognized PSCI function
+ */
+int kvm_psci_call(struct kvm_vcpu *vcpu)
+{
+       switch (kvm_psci_version(vcpu)) {
+       case KVM_ARM_PSCI_0_2:
+               return kvm_psci_0_2_call(vcpu);
+       case KVM_ARM_PSCI_0_1:
+               return kvm_psci_0_1_call(vcpu);
+       default:
+               return -EINVAL;
+       };
 }
index b80256b554cd428d1c4f8e83e453fd33b6a1edcc..f558c073c02378a449a05d337d47a8161ae5c51d 100644 (file)
 #include <asm/kvm_arm.h>
 #include <asm/kvm_coproc.h>
 
+#include <kvm/arm_arch_timer.h>
+
 /******************************************************************************
- * Cortex-A15 Reset Values
+ * Cortex-A15 and Cortex-A7 Reset Values
  */
 
-static const int a15_max_cpu_idx = 3;
-
-static struct kvm_regs a15_regs_reset = {
+static struct kvm_regs cortexa_regs_reset = {
        .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
 };
 
+static const struct kvm_irq_level cortexa_vtimer_irq = {
+       { .irq = 27 },
+       .level = 1,
+};
+
 
 /*******************************************************************************
  * Exported reset function
@@ -51,24 +56,28 @@ static struct kvm_regs a15_regs_reset = {
  */
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 {
-       struct kvm_regs *cpu_reset;
+       struct kvm_regs *reset_regs;
+       const struct kvm_irq_level *cpu_vtimer_irq;
 
        switch (vcpu->arch.target) {
+       case KVM_ARM_TARGET_CORTEX_A7:
        case KVM_ARM_TARGET_CORTEX_A15:
-               if (vcpu->vcpu_id > a15_max_cpu_idx)
-                       return -EINVAL;
-               cpu_reset = &a15_regs_reset;
+               reset_regs = &cortexa_regs_reset;
                vcpu->arch.midr = read_cpuid_id();
+               cpu_vtimer_irq = &cortexa_vtimer_irq;
                break;
        default:
                return -ENODEV;
        }
 
        /* Reset core registers */
-       memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
+       memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
 
        /* Reset CP15 registers */
        kvm_reset_coprocs(vcpu);
 
+       /* Reset arch_timer context */
+       kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
+
        return 0;
 }
index a8e73ed5ad5b710fc73de0006255bd1dfd6713ef..b1d640f78623971337ad08072efdcc981c124c0f 100644 (file)
@@ -59,10 +59,9 @@ TRACE_EVENT(kvm_guest_fault,
                __entry->ipa                    = ipa;
        ),
 
-       TP_printk("guest fault at PC %#08lx (hxfar %#08lx, "
-                 "ipa %#16llx, hsr %#08lx",
-                 __entry->vcpu_pc, __entry->hxfar,
-                 __entry->ipa, __entry->hsr)
+       TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
+                 __entry->ipa, __entry->hsr,
+                 __entry->hxfar, __entry->vcpu_pc)
 );
 
 TRACE_EVENT(kvm_irq_line,
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
deleted file mode 100644 (file)
index 17c5ac7..0000000
+++ /dev/null
@@ -1,1499 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/cpu.h>
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#include <linux/irqchip/arm-gic.h>
-
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-
-/*
- * How the whole thing works (courtesy of Christoffer Dall):
- *
- * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
- *   something is pending
- * - VGIC pending interrupts are stored on the vgic.irq_state vgic
- *   bitmap (this bitmap is updated by both user land ioctls and guest
- *   mmio ops, and other in-kernel peripherals such as the
- *   arch. timers) and indicate the 'wire' state.
- * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
- *   recalculated
- * - To calculate the oracle, we need info for each cpu from
- *   compute_pending_for_cpu, which considers:
- *   - PPI: dist->irq_state & dist->irq_enable
- *   - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
- *   - irq_spi_target is a 'formatted' version of the GICD_ICFGR
- *     registers, stored on each vcpu. We only keep one bit of
- *     information per interrupt, making sure that only one vcpu can
- *     accept the interrupt.
- * - The same is true when injecting an interrupt, except that we only
- *   consider a single interrupt at a time. The irq_spi_cpu array
- *   contains the target CPU for each SPI.
- *
- * The handling of level interrupts adds some extra complexity. We
- * need to track when the interrupt has been EOIed, so we can sample
- * the 'line' again. This is achieved as such:
- *
- * - When a level interrupt is moved onto a vcpu, the corresponding
- *   bit in irq_active is set. As long as this bit is set, the line
- *   will be ignored for further interrupts. The interrupt is injected
- *   into the vcpu with the GICH_LR_EOI bit set (generate a
- *   maintenance interrupt on EOI).
- * - When the interrupt is EOIed, the maintenance interrupt fires,
- *   and clears the corresponding bit in irq_active. This allow the
- *   interrupt line to be sampled again.
- */
-
-#define VGIC_ADDR_UNDEF                (-1)
-#define IS_VGIC_ADDR_UNDEF(_x)  ((_x) == VGIC_ADDR_UNDEF)
-
-/* Physical address of vgic virtual cpu interface */
-static phys_addr_t vgic_vcpu_base;
-
-/* Virtual control interface base address */
-static void __iomem *vgic_vctrl_base;
-
-static struct device_node *vgic_node;
-
-#define ACCESS_READ_VALUE      (1 << 0)
-#define ACCESS_READ_RAZ                (0 << 0)
-#define ACCESS_READ_MASK(x)    ((x) & (1 << 0))
-#define ACCESS_WRITE_IGNORED   (0 << 1)
-#define ACCESS_WRITE_SETBIT    (1 << 1)
-#define ACCESS_WRITE_CLEARBIT  (2 << 1)
-#define ACCESS_WRITE_VALUE     (3 << 1)
-#define ACCESS_WRITE_MASK(x)   ((x) & (3 << 1))
-
-static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
-static void vgic_update_state(struct kvm *kvm);
-static void vgic_kick_vcpus(struct kvm *kvm);
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
-static u32 vgic_nr_lr;
-
-static unsigned int vgic_maint_irq;
-
-static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
-                               int cpuid, u32 offset)
-{
-       offset >>= 2;
-       if (!offset)
-               return x->percpu[cpuid].reg;
-       else
-               return x->shared.reg + offset - 1;
-}
-
-static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
-                                  int cpuid, int irq)
-{
-       if (irq < VGIC_NR_PRIVATE_IRQS)
-               return test_bit(irq, x->percpu[cpuid].reg_ul);
-
-       return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
-}
-
-static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
-                                   int irq, int val)
-{
-       unsigned long *reg;
-
-       if (irq < VGIC_NR_PRIVATE_IRQS) {
-               reg = x->percpu[cpuid].reg_ul;
-       } else {
-               reg =  x->shared.reg_ul;
-               irq -= VGIC_NR_PRIVATE_IRQS;
-       }
-
-       if (val)
-               set_bit(irq, reg);
-       else
-               clear_bit(irq, reg);
-}
-
-static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
-{
-       if (unlikely(cpuid >= VGIC_MAX_CPUS))
-               return NULL;
-       return x->percpu[cpuid].reg_ul;
-}
-
-static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
-{
-       return x->shared.reg_ul;
-}
-
-static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
-{
-       offset >>= 2;
-       BUG_ON(offset > (VGIC_NR_IRQS / 4));
-       if (offset < 4)
-               return x->percpu[cpuid] + offset;
-       else
-               return x->shared + offset - 8;
-}
-
-#define VGIC_CFG_LEVEL 0
-#define VGIC_CFG_EDGE  1
-
-static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-       int irq_val;
-
-       irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
-       return irq_val == VGIC_CFG_EDGE;
-}
-
-static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
-}
-
-static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
-}
-
-static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
-}
-
-static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
-}
-
-static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
-}
-
-static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
-}
-
-static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0);
-}
-
-static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
-{
-       if (irq < VGIC_NR_PRIVATE_IRQS)
-               set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
-       else
-               set_bit(irq - VGIC_NR_PRIVATE_IRQS,
-                       vcpu->arch.vgic_cpu.pending_shared);
-}
-
-static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
-{
-       if (irq < VGIC_NR_PRIVATE_IRQS)
-               clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
-       else
-               clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
-                         vcpu->arch.vgic_cpu.pending_shared);
-}
-
-static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
-{
-       return *((u32 *)mmio->data) & mask;
-}
-
-static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
-{
-       *((u32 *)mmio->data) = value & mask;
-}
-
-/**
- * vgic_reg_access - access vgic register
- * @mmio:   pointer to the data describing the mmio access
- * @reg:    pointer to the virtual backing of vgic distributor data
- * @offset: least significant 2 bits used for word offset
- * @mode:   ACCESS_ mode (see defines above)
- *
- * Helper to make vgic register access easier using one of the access
- * modes defined for vgic register access
- * (read,raz,write-ignored,setbit,clearbit,write)
- */
-static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
-                           phys_addr_t offset, int mode)
-{
-       int word_offset = (offset & 3) * 8;
-       u32 mask = (1UL << (mmio->len * 8)) - 1;
-       u32 regval;
-
-       /*
-        * Any alignment fault should have been delivered to the guest
-        * directly (ARM ARM B3.12.7 "Prioritization of aborts").
-        */
-
-       if (reg) {
-               regval = *reg;
-       } else {
-               BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
-               regval = 0;
-       }
-
-       if (mmio->is_write) {
-               u32 data = mmio_data_read(mmio, mask) << word_offset;
-               switch (ACCESS_WRITE_MASK(mode)) {
-               case ACCESS_WRITE_IGNORED:
-                       return;
-
-               case ACCESS_WRITE_SETBIT:
-                       regval |= data;
-                       break;
-
-               case ACCESS_WRITE_CLEARBIT:
-                       regval &= ~data;
-                       break;
-
-               case ACCESS_WRITE_VALUE:
-                       regval = (regval & ~(mask << word_offset)) | data;
-                       break;
-               }
-               *reg = regval;
-       } else {
-               switch (ACCESS_READ_MASK(mode)) {
-               case ACCESS_READ_RAZ:
-                       regval = 0;
-                       /* fall through */
-
-               case ACCESS_READ_VALUE:
-                       mmio_data_write(mmio, mask, regval >> word_offset);
-               }
-       }
-}
-
-static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
-                            struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
-       u32 reg;
-       u32 word_offset = offset & 3;
-
-       switch (offset & ~3) {
-       case 0:                 /* CTLR */
-               reg = vcpu->kvm->arch.vgic.enabled;
-               vgic_reg_access(mmio, &reg, word_offset,
-                               ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
-               if (mmio->is_write) {
-                       vcpu->kvm->arch.vgic.enabled = reg & 1;
-                       vgic_update_state(vcpu->kvm);
-                       return true;
-               }
-               break;
-
-       case 4:                 /* TYPER */
-               reg  = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
-               reg |= (VGIC_NR_IRQS >> 5) - 1;
-               vgic_reg_access(mmio, &reg, word_offset,
-                               ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
-               break;
-
-       case 8:                 /* IIDR */
-               reg = 0x4B00043B;
-               vgic_reg_access(mmio, &reg, word_offset,
-                               ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
-               break;
-       }
-
-       return false;
-}
-
-static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
-                              struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
-       vgic_reg_access(mmio, NULL, offset,
-                       ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
-       return false;
-}
-
-static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
-                                      struct kvm_exit_mmio *mmio,
-                                      phys_addr_t offset)
-{
-       u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
-                                      vcpu->vcpu_id, offset);
-       vgic_reg_access(mmio, reg, offset,
-                       ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
-       if (mmio->is_write) {
-               vgic_update_state(vcpu->kvm);
-               return true;
-       }
-
-       return false;
-}
-
-static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
-                                        struct kvm_exit_mmio *mmio,
-                                        phys_addr_t offset)
-{
-       u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
-                                      vcpu->vcpu_id, offset);
-       vgic_reg_access(mmio, reg, offset,
-                       ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
-       if (mmio->is_write) {
-               if (offset < 4) /* Force SGI enabled */
-                       *reg |= 0xffff;
-               vgic_retire_disabled_irqs(vcpu);
-               vgic_update_state(vcpu->kvm);
-               return true;
-       }
-
-       return false;
-}
-
-static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
-                                       struct kvm_exit_mmio *mmio,
-                                       phys_addr_t offset)
-{
-       u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
-                                      vcpu->vcpu_id, offset);
-       vgic_reg_access(mmio, reg, offset,
-                       ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
-       if (mmio->is_write) {
-               vgic_update_state(vcpu->kvm);
-               return true;
-       }
-
-       return false;
-}
-
-static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
-                                         struct kvm_exit_mmio *mmio,
-                                         phys_addr_t offset)
-{
-       u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
-                                      vcpu->vcpu_id, offset);
-       vgic_reg_access(mmio, reg, offset,
-                       ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
-       if (mmio->is_write) {
-               vgic_update_state(vcpu->kvm);
-               return true;
-       }
-
-       return false;
-}
-
-static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
-                                    struct kvm_exit_mmio *mmio,
-                                    phys_addr_t offset)
-{
-       u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
-                                       vcpu->vcpu_id, offset);
-       vgic_reg_access(mmio, reg, offset,
-                       ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
-       return false;
-}
-
-#define GICD_ITARGETSR_SIZE    32
-#define GICD_CPUTARGETS_BITS   8
-#define GICD_IRQS_PER_ITARGETSR        (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
-static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
-{
-       struct vgic_dist *dist = &kvm->arch.vgic;
-       struct kvm_vcpu *vcpu;
-       int i, c;
-       unsigned long *bmap;
-       u32 val = 0;
-
-       irq -= VGIC_NR_PRIVATE_IRQS;
-
-       kvm_for_each_vcpu(c, vcpu, kvm) {
-               bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
-               for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
-                       if (test_bit(irq + i, bmap))
-                               val |= 1 << (c + i * 8);
-       }
-
-       return val;
-}
-
-static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
-{
-       struct vgic_dist *dist = &kvm->arch.vgic;
-       struct kvm_vcpu *vcpu;
-       int i, c;
-       unsigned long *bmap;
-       u32 target;
-
-       irq -= VGIC_NR_PRIVATE_IRQS;
-
-       /*
-        * Pick the LSB in each byte. This ensures we target exactly
-        * one vcpu per IRQ. If the byte is null, assume we target
-        * CPU0.
-        */
-       for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
-               int shift = i * GICD_CPUTARGETS_BITS;
-               target = ffs((val >> shift) & 0xffU);
-               target = target ? (target - 1) : 0;
-               dist->irq_spi_cpu[irq + i] = target;
-               kvm_for_each_vcpu(c, vcpu, kvm) {
-                       bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
-                       if (c == target)
-                               set_bit(irq + i, bmap);
-                       else
-                               clear_bit(irq + i, bmap);
-               }
-       }
-}
-
-static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
-                                  struct kvm_exit_mmio *mmio,
-                                  phys_addr_t offset)
-{
-       u32 reg;
-
-       /* We treat the banked interrupts targets as read-only */
-       if (offset < 32) {
-               u32 roreg = 1 << vcpu->vcpu_id;
-               roreg |= roreg << 8;
-               roreg |= roreg << 16;
-
-               vgic_reg_access(mmio, &roreg, offset,
-                               ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
-               return false;
-       }
-
-       reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
-       vgic_reg_access(mmio, &reg, offset,
-                       ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
-       if (mmio->is_write) {
-               vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
-               vgic_update_state(vcpu->kvm);
-               return true;
-       }
-
-       return false;
-}
-
-static u32 vgic_cfg_expand(u16 val)
-{
-       u32 res = 0;
-       int i;
-
-       /*
-        * Turn a 16bit value like abcd...mnop into a 32bit word
-        * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
-        */
-       for (i = 0; i < 16; i++)
-               res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
-
-       return res;
-}
-
-static u16 vgic_cfg_compress(u32 val)
-{
-       u16 res = 0;
-       int i;
-
-       /*
-        * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
-        * abcd...mnop which is what we really care about.
-        */
-       for (i = 0; i < 16; i++)
-               res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
-
-       return res;
-}
-
-/*
- * The distributor uses 2 bits per IRQ for the CFG register, but the
- * LSB is always 0. As such, we only keep the upper bit, and use the
- * two above functions to compress/expand the bits
- */
-static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
-                               struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
-       u32 val;
-       u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
-                                      vcpu->vcpu_id, offset >> 1);
-       if (offset & 2)
-               val = *reg >> 16;
-       else
-               val = *reg & 0xffff;
-
-       val = vgic_cfg_expand(val);
-       vgic_reg_access(mmio, &val, offset,
-                       ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
-       if (mmio->is_write) {
-               if (offset < 4) {
-                       *reg = ~0U; /* Force PPIs/SGIs to 1 */
-                       return false;
-               }
-
-               val = vgic_cfg_compress(val);
-               if (offset & 2) {
-                       *reg &= 0xffff;
-                       *reg |= val << 16;
-               } else {
-                       *reg &= 0xffff << 16;
-                       *reg |= val;
-               }
-       }
-
-       return false;
-}
-
-static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
-                               struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
-       u32 reg;
-       vgic_reg_access(mmio, &reg, offset,
-                       ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
-       if (mmio->is_write) {
-               vgic_dispatch_sgi(vcpu, reg);
-               vgic_update_state(vcpu->kvm);
-               return true;
-       }
-
-       return false;
-}
-
-/*
- * I would have liked to use the kvm_bus_io_*() API instead, but it
- * cannot cope with banked registers (only the VM pointer is passed
- * around, and we need the vcpu). One of these days, someone please
- * fix it!
- */
-struct mmio_range {
-       phys_addr_t base;
-       unsigned long len;
-       bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
-                           phys_addr_t offset);
-};
-
-static const struct mmio_range vgic_ranges[] = {
-       {
-               .base           = GIC_DIST_CTRL,
-               .len            = 12,
-               .handle_mmio    = handle_mmio_misc,
-       },
-       {
-               .base           = GIC_DIST_IGROUP,
-               .len            = VGIC_NR_IRQS / 8,
-               .handle_mmio    = handle_mmio_raz_wi,
-       },
-       {
-               .base           = GIC_DIST_ENABLE_SET,
-               .len            = VGIC_NR_IRQS / 8,
-               .handle_mmio    = handle_mmio_set_enable_reg,
-       },
-       {
-               .base           = GIC_DIST_ENABLE_CLEAR,
-               .len            = VGIC_NR_IRQS / 8,
-               .handle_mmio    = handle_mmio_clear_enable_reg,
-       },
-       {
-               .base           = GIC_DIST_PENDING_SET,
-               .len            = VGIC_NR_IRQS / 8,
-               .handle_mmio    = handle_mmio_set_pending_reg,
-       },
-       {
-               .base           = GIC_DIST_PENDING_CLEAR,
-               .len            = VGIC_NR_IRQS / 8,
-               .handle_mmio    = handle_mmio_clear_pending_reg,
-       },
-       {
-               .base           = GIC_DIST_ACTIVE_SET,
-               .len            = VGIC_NR_IRQS / 8,
-               .handle_mmio    = handle_mmio_raz_wi,
-       },
-       {
-               .base           = GIC_DIST_ACTIVE_CLEAR,
-               .len            = VGIC_NR_IRQS / 8,
-               .handle_mmio    = handle_mmio_raz_wi,
-       },
-       {
-               .base           = GIC_DIST_PRI,
-               .len            = VGIC_NR_IRQS,
-               .handle_mmio    = handle_mmio_priority_reg,
-       },
-       {
-               .base           = GIC_DIST_TARGET,
-               .len            = VGIC_NR_IRQS,
-               .handle_mmio    = handle_mmio_target_reg,
-       },
-       {
-               .base           = GIC_DIST_CONFIG,
-               .len            = VGIC_NR_IRQS / 4,
-               .handle_mmio    = handle_mmio_cfg_reg,
-       },
-       {
-               .base           = GIC_DIST_SOFTINT,
-               .len            = 4,
-               .handle_mmio    = handle_mmio_sgi_reg,
-       },
-       {}
-};
-
-static const
-struct mmio_range *find_matching_range(const struct mmio_range *ranges,
-                                      struct kvm_exit_mmio *mmio,
-                                      phys_addr_t base)
-{
-       const struct mmio_range *r = ranges;
-       phys_addr_t addr = mmio->phys_addr - base;
-
-       while (r->len) {
-               if (addr >= r->base &&
-                   (addr + mmio->len) <= (r->base + r->len))
-                       return r;
-               r++;
-       }
-
-       return NULL;
-}
-
-/**
- * vgic_handle_mmio - handle an in-kernel MMIO access
- * @vcpu:      pointer to the vcpu performing the access
- * @run:       pointer to the kvm_run structure
- * @mmio:      pointer to the data describing the access
- *
- * returns true if the MMIO access has been performed in kernel space,
- * and false if it needs to be emulated in user space.
- */
-bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
-                     struct kvm_exit_mmio *mmio)
-{
-       const struct mmio_range *range;
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-       unsigned long base = dist->vgic_dist_base;
-       bool updated_state;
-       unsigned long offset;
-
-       if (!irqchip_in_kernel(vcpu->kvm) ||
-           mmio->phys_addr < base ||
-           (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
-               return false;
-
-       /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
-       if (mmio->len > 4) {
-               kvm_inject_dabt(vcpu, mmio->phys_addr);
-               return true;
-       }
-
-       range = find_matching_range(vgic_ranges, mmio, base);
-       if (unlikely(!range || !range->handle_mmio)) {
-               pr_warn("Unhandled access %d %08llx %d\n",
-                       mmio->is_write, mmio->phys_addr, mmio->len);
-               return false;
-       }
-
-       spin_lock(&vcpu->kvm->arch.vgic.lock);
-       offset = mmio->phys_addr - range->base - base;
-       updated_state = range->handle_mmio(vcpu, mmio, offset);
-       spin_unlock(&vcpu->kvm->arch.vgic.lock);
-       kvm_prepare_mmio(run, mmio);
-       kvm_handle_mmio_return(vcpu, run);
-
-       if (updated_state)
-               vgic_kick_vcpus(vcpu->kvm);
-
-       return true;
-}
-
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
-{
-       struct kvm *kvm = vcpu->kvm;
-       struct vgic_dist *dist = &kvm->arch.vgic;
-       int nrcpus = atomic_read(&kvm->online_vcpus);
-       u8 target_cpus;
-       int sgi, mode, c, vcpu_id;
-
-       vcpu_id = vcpu->vcpu_id;
-
-       sgi = reg & 0xf;
-       target_cpus = (reg >> 16) & 0xff;
-       mode = (reg >> 24) & 3;
-
-       switch (mode) {
-       case 0:
-               if (!target_cpus)
-                       return;
-
-       case 1:
-               target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
-               break;
-
-       case 2:
-               target_cpus = 1 << vcpu_id;
-               break;
-       }
-
-       kvm_for_each_vcpu(c, vcpu, kvm) {
-               if (target_cpus & 1) {
-                       /* Flag the SGI as pending */
-                       vgic_dist_irq_set(vcpu, sgi);
-                       dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
-                       kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
-               }
-
-               target_cpus >>= 1;
-       }
-}
-
-static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-       unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
-       unsigned long pending_private, pending_shared;
-       int vcpu_id;
-
-       vcpu_id = vcpu->vcpu_id;
-       pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
-       pend_shared = vcpu->arch.vgic_cpu.pending_shared;
-
-       pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
-       enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
-       bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
-
-       pending = vgic_bitmap_get_shared_map(&dist->irq_state);
-       enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
-       bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
-       bitmap_and(pend_shared, pend_shared,
-                  vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
-                  VGIC_NR_SHARED_IRQS);
-
-       pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
-       pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
-       return (pending_private < VGIC_NR_PRIVATE_IRQS ||
-               pending_shared < VGIC_NR_SHARED_IRQS);
-}
-
-/*
- * Update the interrupt state and determine which CPUs have pending
- * interrupts. Must be called with distributor lock held.
- */
-static void vgic_update_state(struct kvm *kvm)
-{
-       struct vgic_dist *dist = &kvm->arch.vgic;
-       struct kvm_vcpu *vcpu;
-       int c;
-
-       if (!dist->enabled) {
-               set_bit(0, &dist->irq_pending_on_cpu);
-               return;
-       }
-
-       kvm_for_each_vcpu(c, vcpu, kvm) {
-               if (compute_pending_for_cpu(vcpu)) {
-                       pr_debug("CPU%d has pending interrupts\n", c);
-                       set_bit(c, &dist->irq_pending_on_cpu);
-               }
-       }
-}
-
-#define LR_CPUID(lr)   \
-       (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
-#define MK_LR_PEND(src, irq)   \
-       (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
-
-/*
- * An interrupt may have been disabled after being made pending on the
- * CPU interface (the classic case is a timer running while we're
- * rebooting the guest - the interrupt would kick as soon as the CPU
- * interface gets enabled, with deadly consequences).
- *
- * The solution is to examine already active LRs, and check the
- * interrupt is still enabled. If not, just retire it.
- */
-static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
-{
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       int lr;
-
-       for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
-               int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
-
-               if (!vgic_irq_is_enabled(vcpu, irq)) {
-                       vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
-                       clear_bit(lr, vgic_cpu->lr_used);
-                       vgic_cpu->vgic_lr[lr] &= ~GICH_LR_STATE;
-                       if (vgic_irq_is_active(vcpu, irq))
-                               vgic_irq_clear_active(vcpu, irq);
-               }
-       }
-}
-
-/*
- * Queue an interrupt to a CPU virtual interface. Return true on success,
- * or false if it wasn't possible to queue it.
- */
-static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
-{
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       int lr;
-
-       /* Sanitize the input... */
-       BUG_ON(sgi_source_id & ~7);
-       BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
-       BUG_ON(irq >= VGIC_NR_IRQS);
-
-       kvm_debug("Queue IRQ%d\n", irq);
-
-       lr = vgic_cpu->vgic_irq_lr_map[irq];
-
-       /* Do we have an active interrupt for the same CPUID? */
-       if (lr != LR_EMPTY &&
-           (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) {
-               kvm_debug("LR%d piggyback for IRQ%d %x\n",
-                         lr, irq, vgic_cpu->vgic_lr[lr]);
-               BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
-               vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
-               return true;
-       }
-
-       /* Try to use another LR for this interrupt */
-       lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
-                              vgic_cpu->nr_lr);
-       if (lr >= vgic_cpu->nr_lr)
-               return false;
-
-       kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
-       vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
-       vgic_cpu->vgic_irq_lr_map[irq] = lr;
-       set_bit(lr, vgic_cpu->lr_used);
-
-       if (!vgic_irq_is_edge(vcpu, irq))
-               vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
-
-       return true;
-}
-
-static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-       unsigned long sources;
-       int vcpu_id = vcpu->vcpu_id;
-       int c;
-
-       sources = dist->irq_sgi_sources[vcpu_id][irq];
-
-       for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
-               if (vgic_queue_irq(vcpu, c, irq))
-                       clear_bit(c, &sources);
-       }
-
-       dist->irq_sgi_sources[vcpu_id][irq] = sources;
-
-       /*
-        * If the sources bitmap has been cleared it means that we
-        * could queue all the SGIs onto link registers (see the
-        * clear_bit above), and therefore we are done with them in
-        * our emulated gic and can get rid of them.
-        */
-       if (!sources) {
-               vgic_dist_irq_clear(vcpu, irq);
-               vgic_cpu_irq_clear(vcpu, irq);
-               return true;
-       }
-
-       return false;
-}
-
-static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
-{
-       if (vgic_irq_is_active(vcpu, irq))
-               return true; /* level interrupt, already queued */
-
-       if (vgic_queue_irq(vcpu, 0, irq)) {
-               if (vgic_irq_is_edge(vcpu, irq)) {
-                       vgic_dist_irq_clear(vcpu, irq);
-                       vgic_cpu_irq_clear(vcpu, irq);
-               } else {
-                       vgic_irq_set_active(vcpu, irq);
-               }
-
-               return true;
-       }
-
-       return false;
-}
-
-/*
- * Fill the list registers with pending interrupts before running the
- * guest.
- */
-static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
-{
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-       int i, vcpu_id;
-       int overflow = 0;
-
-       vcpu_id = vcpu->vcpu_id;
-
-       /*
-        * We may not have any pending interrupt, or the interrupts
-        * may have been serviced from another vcpu. In all cases,
-        * move along.
-        */
-       if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
-               pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
-               goto epilog;
-       }
-
-       /* SGIs */
-       for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
-               if (!vgic_queue_sgi(vcpu, i))
-                       overflow = 1;
-       }
-
-       /* PPIs */
-       for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
-               if (!vgic_queue_hwirq(vcpu, i))
-                       overflow = 1;
-       }
-
-       /* SPIs */
-       for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
-               if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
-                       overflow = 1;
-       }
-
-epilog:
-       if (overflow) {
-               vgic_cpu->vgic_hcr |= GICH_HCR_UIE;
-       } else {
-               vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
-               /*
-                * We're about to run this VCPU, and we've consumed
-                * everything the distributor had in store for
-                * us. Claim we don't have anything pending. We'll
-                * adjust that if needed while exiting.
-                */
-               clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
-       }
-}
-
-static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
-{
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       bool level_pending = false;
-
-       kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
-
-       if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
-               /*
-                * Some level interrupts have been EOIed. Clear their
-                * active bit.
-                */
-               int lr, irq;
-
-               for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr,
-                                vgic_cpu->nr_lr) {
-                       irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
-
-                       vgic_irq_clear_active(vcpu, irq);
-                       vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI;
-
-                       /* Any additional pending interrupt? */
-                       if (vgic_dist_irq_is_pending(vcpu, irq)) {
-                               vgic_cpu_irq_set(vcpu, irq);
-                               level_pending = true;
-                       } else {
-                               vgic_cpu_irq_clear(vcpu, irq);
-                       }
-
-                       /*
-                        * Despite being EOIed, the LR may not have
-                        * been marked as empty.
-                        */
-                       set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
-                       vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
-               }
-       }
-
-       if (vgic_cpu->vgic_misr & GICH_MISR_U)
-               vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
-
-       return level_pending;
-}
-
-/*
- * Sync back the VGIC state after a guest run. The distributor lock is
- * needed so we don't get preempted in the middle of the state processing.
- */
-static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
-{
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-       int lr, pending;
-       bool level_pending;
-
-       level_pending = vgic_process_maintenance(vcpu);
-
-       /* Clear mappings for empty LRs */
-       for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr,
-                        vgic_cpu->nr_lr) {
-               int irq;
-
-               if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
-                       continue;
-
-               irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
-
-               BUG_ON(irq >= VGIC_NR_IRQS);
-               vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
-       }
-
-       /* Check if we still have something up our sleeve... */
-       pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr,
-                                     vgic_cpu->nr_lr);
-       if (level_pending || pending < vgic_cpu->nr_lr)
-               set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
-}
-
-void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       if (!irqchip_in_kernel(vcpu->kvm))
-               return;
-
-       spin_lock(&dist->lock);
-       __kvm_vgic_flush_hwstate(vcpu);
-       spin_unlock(&dist->lock);
-}
-
-void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       if (!irqchip_in_kernel(vcpu->kvm))
-               return;
-
-       spin_lock(&dist->lock);
-       __kvm_vgic_sync_hwstate(vcpu);
-       spin_unlock(&dist->lock);
-}
-
-int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
-{
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
-       if (!irqchip_in_kernel(vcpu->kvm))
-               return 0;
-
-       return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
-}
-
-static void vgic_kick_vcpus(struct kvm *kvm)
-{
-       struct kvm_vcpu *vcpu;
-       int c;
-
-       /*
-        * We've injected an interrupt, time to find out who deserves
-        * a good kick...
-        */
-       kvm_for_each_vcpu(c, vcpu, kvm) {
-               if (kvm_vgic_vcpu_pending_irq(vcpu))
-                       kvm_vcpu_kick(vcpu);
-       }
-}
-
-static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
-{
-       int is_edge = vgic_irq_is_edge(vcpu, irq);
-       int state = vgic_dist_irq_is_pending(vcpu, irq);
-
-       /*
-        * Only inject an interrupt if:
-        * - edge triggered and we have a rising edge
-        * - level triggered and we change level
-        */
-       if (is_edge)
-               return level > state;
-       else
-               return level != state;
-}
-
-static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
-                                 unsigned int irq_num, bool level)
-{
-       struct vgic_dist *dist = &kvm->arch.vgic;
-       struct kvm_vcpu *vcpu;
-       int is_edge, is_level;
-       int enabled;
-       bool ret = true;
-
-       spin_lock(&dist->lock);
-
-       vcpu = kvm_get_vcpu(kvm, cpuid);
-       is_edge = vgic_irq_is_edge(vcpu, irq_num);
-       is_level = !is_edge;
-
-       if (!vgic_validate_injection(vcpu, irq_num, level)) {
-               ret = false;
-               goto out;
-       }
-
-       if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
-               cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
-               vcpu = kvm_get_vcpu(kvm, cpuid);
-       }
-
-       kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
-
-       if (level)
-               vgic_dist_irq_set(vcpu, irq_num);
-       else
-               vgic_dist_irq_clear(vcpu, irq_num);
-
-       enabled = vgic_irq_is_enabled(vcpu, irq_num);
-
-       if (!enabled) {
-               ret = false;
-               goto out;
-       }
-
-       if (is_level && vgic_irq_is_active(vcpu, irq_num)) {
-               /*
-                * Level interrupt in progress, will be picked up
-                * when EOId.
-                */
-               ret = false;
-               goto out;
-       }
-
-       if (level) {
-               vgic_cpu_irq_set(vcpu, irq_num);
-               set_bit(cpuid, &dist->irq_pending_on_cpu);
-       }
-
-out:
-       spin_unlock(&dist->lock);
-
-       return ret;
-}
-
-/**
- * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
- * @kvm:     The VM structure pointer
- * @cpuid:   The CPU for PPIs
- * @irq_num: The IRQ number that is assigned to the device
- * @level:   Edge-triggered:  true:  to trigger the interrupt
- *                           false: to ignore the call
- *          Level-sensitive  true:  activates an interrupt
- *                           false: deactivates an interrupt
- *
- * The GIC is not concerned with devices being active-LOW or active-HIGH for
- * level-sensitive interrupts.  You can think of the level parameter as 1
- * being HIGH and 0 being LOW and all devices being active-HIGH.
- */
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
-                       bool level)
-{
-       if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
-               vgic_kick_vcpus(kvm);
-
-       return 0;
-}
-
-static irqreturn_t vgic_maintenance_handler(int irq, void *data)
-{
-       /*
-        * We cannot rely on the vgic maintenance interrupt to be
-        * delivered synchronously. This means we can only use it to
-        * exit the VM, and we perform the handling of EOIed
-        * interrupts on the exit path (see vgic_process_maintenance).
-        */
-       return IRQ_HANDLED;
-}
-
-int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
-{
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-       int i;
-
-       if (!irqchip_in_kernel(vcpu->kvm))
-               return 0;
-
-       if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
-               return -EBUSY;
-
-       for (i = 0; i < VGIC_NR_IRQS; i++) {
-               if (i < VGIC_NR_PPIS)
-                       vgic_bitmap_set_irq_val(&dist->irq_enabled,
-                                               vcpu->vcpu_id, i, 1);
-               if (i < VGIC_NR_PRIVATE_IRQS)
-                       vgic_bitmap_set_irq_val(&dist->irq_cfg,
-                                               vcpu->vcpu_id, i, VGIC_CFG_EDGE);
-
-               vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
-       }
-
-       /*
-        * By forcing VMCR to zero, the GIC will restore the binary
-        * points to their reset values. Anything else resets to zero
-        * anyway.
-        */
-       vgic_cpu->vgic_vmcr = 0;
-
-       vgic_cpu->nr_lr = vgic_nr_lr;
-       vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
-
-       return 0;
-}
-
-static void vgic_init_maintenance_interrupt(void *info)
-{
-       enable_percpu_irq(vgic_maint_irq, 0);
-}
-
-static int vgic_cpu_notify(struct notifier_block *self,
-                          unsigned long action, void *cpu)
-{
-       switch (action) {
-       case CPU_STARTING:
-       case CPU_STARTING_FROZEN:
-               vgic_init_maintenance_interrupt(NULL);
-               break;
-       case CPU_DYING:
-       case CPU_DYING_FROZEN:
-               disable_percpu_irq(vgic_maint_irq);
-               break;
-       }
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block vgic_cpu_nb = {
-       .notifier_call = vgic_cpu_notify,
-};
-
-int kvm_vgic_hyp_init(void)
-{
-       int ret;
-       struct resource vctrl_res;
-       struct resource vcpu_res;
-
-       vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
-       if (!vgic_node) {
-               kvm_err("error: no compatible vgic node in DT\n");
-               return -ENODEV;
-       }
-
-       vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0);
-       if (!vgic_maint_irq) {
-               kvm_err("error getting vgic maintenance irq from DT\n");
-               ret = -ENXIO;
-               goto out;
-       }
-
-       ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler,
-                                "vgic", kvm_get_running_vcpus());
-       if (ret) {
-               kvm_err("Cannot register interrupt %d\n", vgic_maint_irq);
-               goto out;
-       }
-
-       ret = register_cpu_notifier(&vgic_cpu_nb);
-       if (ret) {
-               kvm_err("Cannot register vgic CPU notifier\n");
-               goto out_free_irq;
-       }
-
-       ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
-       if (ret) {
-               kvm_err("Cannot obtain VCTRL resource\n");
-               goto out_free_irq;
-       }
-
-       vgic_vctrl_base = of_iomap(vgic_node, 2);
-       if (!vgic_vctrl_base) {
-               kvm_err("Cannot ioremap VCTRL\n");
-               ret = -ENOMEM;
-               goto out_free_irq;
-       }
-
-       vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
-       vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
-
-       ret = create_hyp_io_mappings(vgic_vctrl_base,
-                                    vgic_vctrl_base + resource_size(&vctrl_res),
-                                    vctrl_res.start);
-       if (ret) {
-               kvm_err("Cannot map VCTRL into hyp\n");
-               goto out_unmap;
-       }
-
-       kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
-                vctrl_res.start, vgic_maint_irq);
-       on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
-
-       if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
-               kvm_err("Cannot obtain VCPU resource\n");
-               ret = -ENXIO;
-               goto out_unmap;
-       }
-       vgic_vcpu_base = vcpu_res.start;
-
-       goto out;
-
-out_unmap:
-       iounmap(vgic_vctrl_base);
-out_free_irq:
-       free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus());
-out:
-       of_node_put(vgic_node);
-       return ret;
-}
-
-int kvm_vgic_init(struct kvm *kvm)
-{
-       int ret = 0, i;
-
-       mutex_lock(&kvm->lock);
-
-       if (vgic_initialized(kvm))
-               goto out;
-
-       if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
-           IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
-               kvm_err("Need to set vgic cpu and dist addresses first\n");
-               ret = -ENXIO;
-               goto out;
-       }
-
-       ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
-                                   vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE);
-       if (ret) {
-               kvm_err("Unable to remap VGIC CPU to VCPU\n");
-               goto out;
-       }
-
-       for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
-               vgic_set_target_reg(kvm, 0, i);
-
-       kvm_timer_init(kvm);
-       kvm->arch.vgic.ready = true;
-out:
-       mutex_unlock(&kvm->lock);
-       return ret;
-}
-
-int kvm_vgic_create(struct kvm *kvm)
-{
-       int ret = 0;
-
-       mutex_lock(&kvm->lock);
-
-       if (atomic_read(&kvm->online_vcpus) || kvm->arch.vgic.vctrl_base) {
-               ret = -EEXIST;
-               goto out;
-       }
-
-       spin_lock_init(&kvm->arch.vgic.lock);
-       kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
-       kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
-       kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
-
-out:
-       mutex_unlock(&kvm->lock);
-       return ret;
-}
-
-static bool vgic_ioaddr_overlap(struct kvm *kvm)
-{
-       phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
-       phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
-
-       if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
-               return 0;
-       if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
-           (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
-               return -EBUSY;
-       return 0;
-}
-
-static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
-                             phys_addr_t addr, phys_addr_t size)
-{
-       int ret;
-
-       if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
-               return -EEXIST;
-       if (addr + size < addr)
-               return -EINVAL;
-
-       ret = vgic_ioaddr_overlap(kvm);
-       if (ret)
-               return ret;
-       *ioaddr = addr;
-       return ret;
-}
-
-int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
-{
-       int r = 0;
-       struct vgic_dist *vgic = &kvm->arch.vgic;
-
-       if (addr & ~KVM_PHYS_MASK)
-               return -E2BIG;
-
-       if (addr & (SZ_4K - 1))
-               return -EINVAL;
-
-       mutex_lock(&kvm->lock);
-       switch (type) {
-       case KVM_VGIC_V2_ADDR_TYPE_DIST:
-               r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
-                                      addr, KVM_VGIC_V2_DIST_SIZE);
-               break;
-       case KVM_VGIC_V2_ADDR_TYPE_CPU:
-               r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
-                                      addr, KVM_VGIC_V2_CPU_SIZE);
-               break;
-       default:
-               r = -ENODEV;
-       }
-
-       mutex_unlock(&kvm->lock);
-       return r;
-}
index 805e3f8fb00786f00f62820c57b02852ad043aaa..3bc8eb811a732cda131927a5c009bf7d34e2b987 100644 (file)
 
 12:    PLD(    pld     [r1, #124]              )
 13:            ldr4w   r1, r4, r5, r6, r7, abort=19f
-               mov     r3, lr, pull #\pull
+               mov     r3, lr, lspull #\pull
                subs    r2, r2, #32
                ldr4w   r1, r8, r9, ip, lr, abort=19f
-               orr     r3, r3, r4, push #\push
-               mov     r4, r4, pull #\pull
-               orr     r4, r4, r5, push #\push
-               mov     r5, r5, pull #\pull
-               orr     r5, r5, r6, push #\push
-               mov     r6, r6, pull #\pull
-               orr     r6, r6, r7, push #\push
-               mov     r7, r7, pull #\pull
-               orr     r7, r7, r8, push #\push
-               mov     r8, r8, pull #\pull
-               orr     r8, r8, r9, push #\push
-               mov     r9, r9, pull #\pull
-               orr     r9, r9, ip, push #\push
-               mov     ip, ip, pull #\pull
-               orr     ip, ip, lr, push #\push
+               orr     r3, r3, r4, lspush #\push
+               mov     r4, r4, lspull #\pull
+               orr     r4, r4, r5, lspush #\push
+               mov     r5, r5, lspull #\pull
+               orr     r5, r5, r6, lspush #\push
+               mov     r6, r6, lspull #\pull
+               orr     r6, r6, r7, lspush #\push
+               mov     r7, r7, lspull #\pull
+               orr     r7, r7, r8, lspush #\push
+               mov     r8, r8, lspull #\pull
+               orr     r8, r8, r9, lspush #\push
+               mov     r9, r9, lspull #\pull
+               orr     r9, r9, ip, lspush #\push
+               mov     ip, ip, lspull #\pull
+               orr     ip, ip, lr, lspush #\push
                str8w   r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f
                bge     12b
        PLD(    cmn     r2, #96                 )
 14:            ands    ip, r2, #28
                beq     16f
 
-15:            mov     r3, lr, pull #\pull
+15:            mov     r3, lr, lspull #\pull
                ldr1w   r1, lr, abort=21f
                subs    ip, ip, #4
-               orr     r3, r3, lr, push #\push
+               orr     r3, r3, lr, lspush #\push
                str1w   r0, r3, abort=21f
                bgt     15b
        CALGN(  cmp     r2, #0                  )
index d620a5f22a09d4a683b884d9d6836171ded4d5f1..d6e742d240075a05c35902d21f86979b054fb928 100644 (file)
@@ -141,7 +141,7 @@ FN_ENTRY
                tst     len, #2
                mov     r5, r4, get_byte_0
                beq     .Lexit
-               adcs    sum, sum, r4, push #16
+               adcs    sum, sum, r4, lspush #16
                strb    r5, [dst], #1
                mov     r5, r4, get_byte_1
                strb    r5, [dst], #1
@@ -171,23 +171,23 @@ FN_ENTRY
                cmp     ip, #2
                beq     .Lsrc2_aligned
                bhi     .Lsrc3_aligned
-               mov     r4, r5, pull #8         @ C = 0
+               mov     r4, r5, lspull #8               @ C = 0
                bics    ip, len, #15
                beq     2f
 1:             load4l  r5, r6, r7, r8
-               orr     r4, r4, r5, push #24
-               mov     r5, r5, pull #8
-               orr     r5, r5, r6, push #24
-               mov     r6, r6, pull #8
-               orr     r6, r6, r7, push #24
-               mov     r7, r7, pull #8
-               orr     r7, r7, r8, push #24
+               orr     r4, r4, r5, lspush #24
+               mov     r5, r5, lspull #8
+               orr     r5, r5, r6, lspush #24
+               mov     r6, r6, lspull #8
+               orr     r6, r6, r7, lspush #24
+               mov     r7, r7, lspull #8
+               orr     r7, r7, r8, lspush #24
                stmia   dst!, {r4, r5, r6, r7}
                adcs    sum, sum, r4
                adcs    sum, sum, r5
                adcs    sum, sum, r6
                adcs    sum, sum, r7
-               mov     r4, r8, pull #8
+               mov     r4, r8, lspull #8
                sub     ip, ip, #16
                teq     ip, #0
                bne     1b
@@ -196,50 +196,50 @@ FN_ENTRY
                tst     ip, #8
                beq     3f
                load2l  r5, r6
-               orr     r4, r4, r5, push #24
-               mov     r5, r5, pull #8
-               orr     r5, r5, r6, push #24
+               orr     r4, r4, r5, lspush #24
+               mov     r5, r5, lspull #8
+               orr     r5, r5, r6, lspush #24
                stmia   dst!, {r4, r5}
                adcs    sum, sum, r4
                adcs    sum, sum, r5
-               mov     r4, r6, pull #8
+               mov     r4, r6, lspull #8
                tst     ip, #4
                beq     4f
 3:             load1l  r5
-               orr     r4, r4, r5, push #24
+               orr     r4, r4, r5, lspush #24
                str     r4, [dst], #4
                adcs    sum, sum, r4
-               mov     r4, r5, pull #8
+               mov     r4, r5, lspull #8
 4:             ands    len, len, #3
                beq     .Ldone
                mov     r5, r4, get_byte_0
                tst     len, #2
                beq     .Lexit
-               adcs    sum, sum, r4, push #16
+               adcs    sum, sum, r4, lspush #16
                strb    r5, [dst], #1
                mov     r5, r4, get_byte_1
                strb    r5, [dst], #1
                mov     r5, r4, get_byte_2
                b       .Lexit
 
-.Lsrc2_aligned:        mov     r4, r5, pull #16
+.Lsrc2_aligned:        mov     r4, r5, lspull #16
                adds    sum, sum, #0
                bics    ip, len, #15
                beq     2f
 1:             load4l  r5, r6, r7, r8
-               orr     r4, r4, r5, push #16
-               mov     r5, r5, pull #16
-               orr     r5, r5, r6, push #16
-               mov     r6, r6, pull #16
-               orr     r6, r6, r7, push #16
-               mov     r7, r7, pull #16
-               orr     r7, r7, r8, push #16
+               orr     r4, r4, r5, lspush #16
+               mov     r5, r5, lspull #16
+               orr     r5, r5, r6, lspush #16
+               mov     r6, r6, lspull #16
+               orr     r6, r6, r7, lspush #16
+               mov     r7, r7, lspull #16
+               orr     r7, r7, r8, lspush #16
                stmia   dst!, {r4, r5, r6, r7}
                adcs    sum, sum, r4
                adcs    sum, sum, r5
                adcs    sum, sum, r6
                adcs    sum, sum, r7
-               mov     r4, r8, pull #16
+               mov     r4, r8, lspull #16
                sub     ip, ip, #16
                teq     ip, #0
                bne     1b
@@ -248,20 +248,20 @@ FN_ENTRY
                tst     ip, #8
                beq     3f
                load2l  r5, r6
-               orr     r4, r4, r5, push #16
-               mov     r5, r5, pull #16
-               orr     r5, r5, r6, push #16
+               orr     r4, r4, r5, lspush #16
+               mov     r5, r5, lspull #16
+               orr     r5, r5, r6, lspush #16
                stmia   dst!, {r4, r5}
                adcs    sum, sum, r4
                adcs    sum, sum, r5
-               mov     r4, r6, pull #16
+               mov     r4, r6, lspull #16
                tst     ip, #4
                beq     4f
 3:             load1l  r5
-               orr     r4, r4, r5, push #16
+               orr     r4, r4, r5, lspush #16
                str     r4, [dst], #4
                adcs    sum, sum, r4
-               mov     r4, r5, pull #16
+               mov     r4, r5, lspull #16
 4:             ands    len, len, #3
                beq     .Ldone
                mov     r5, r4, get_byte_0
@@ -276,24 +276,24 @@ FN_ENTRY
                load1b  r5
                b       .Lexit
 
-.Lsrc3_aligned:        mov     r4, r5, pull #24
+.Lsrc3_aligned:        mov     r4, r5, lspull #24
                adds    sum, sum, #0
                bics    ip, len, #15
                beq     2f
 1:             load4l  r5, r6, r7, r8
-               orr     r4, r4, r5, push #8
-               mov     r5, r5, pull #24
-               orr     r5, r5, r6, push #8
-               mov     r6, r6, pull #24
-               orr     r6, r6, r7, push #8
-               mov     r7, r7, pull #24
-               orr     r7, r7, r8, push #8
+               orr     r4, r4, r5, lspush #8
+               mov     r5, r5, lspull #24
+               orr     r5, r5, r6, lspush #8
+               mov     r6, r6, lspull #24
+               orr     r6, r6, r7, lspush #8
+               mov     r7, r7, lspull #24
+               orr     r7, r7, r8, lspush #8
                stmia   dst!, {r4, r5, r6, r7}
                adcs    sum, sum, r4
                adcs    sum, sum, r5
                adcs    sum, sum, r6
                adcs    sum, sum, r7
-               mov     r4, r8, pull #24
+               mov     r4, r8, lspull #24
                sub     ip, ip, #16
                teq     ip, #0
                bne     1b
@@ -302,20 +302,20 @@ FN_ENTRY
                tst     ip, #8
                beq     3f
                load2l  r5, r6
-               orr     r4, r4, r5, push #8
-               mov     r5, r5, pull #24
-               orr     r5, r5, r6, push #8
+               orr     r4, r4, r5, lspush #8
+               mov     r5, r5, lspull #24
+               orr     r5, r5, r6, lspush #8
                stmia   dst!, {r4, r5}
                adcs    sum, sum, r4
                adcs    sum, sum, r5
-               mov     r4, r6, pull #24
+               mov     r4, r6, lspull #24
                tst     ip, #4
                beq     4f
 3:             load1l  r5
-               orr     r4, r4, r5, push #8
+               orr     r4, r4, r5, lspush #8
                str     r4, [dst], #4
                adcs    sum, sum, r4
-               mov     r4, r5, pull #24
+               mov     r4, r5, lspull #24
 4:             ands    len, len, #3
                beq     .Ldone
                mov     r5, r4, get_byte_0
@@ -326,7 +326,7 @@ FN_ENTRY
                load1l  r4
                mov     r5, r4, get_byte_0
                strb    r5, [dst], #1
-               adcs    sum, sum, r4, push #24
+               adcs    sum, sum, r4, lspush #24
                mov     r5, r4, get_byte_1
                b       .Lexit
 FN_EXIT
index 5fb97e7f9f4bd9a8cbc2e40ee6ebaea3d273e732..7a7430950c7974621eccc31e65e08294a1492d1e 100644 (file)
@@ -47,25 +47,25 @@ ENTRY(__raw_readsl)
                strb    ip, [r1], #1
 
 4:             subs    r2, r2, #1
-               mov     ip, r3, pull #24
+               mov     ip, r3, lspull #24
                ldrne   r3, [r0]
-               orrne   ip, ip, r3, push #8
+               orrne   ip, ip, r3, lspush #8
                strne   ip, [r1], #4
                bne     4b
                b       8f
 
 5:             subs    r2, r2, #1
-               mov     ip, r3, pull #16
+               mov     ip, r3, lspull #16
                ldrne   r3, [r0]
-               orrne   ip, ip, r3, push #16
+               orrne   ip, ip, r3, lspush #16
                strne   ip, [r1], #4
                bne     5b
                b       7f
 
 6:             subs    r2, r2, #1
-               mov     ip, r3, pull #8
+               mov     ip, r3, lspull #8
                ldrne   r3, [r0]
-               orrne   ip, ip, r3, push #24
+               orrne   ip, ip, r3, lspush #24
                strne   ip, [r1], #4
                bne     6b
 
index 8d3b7813725cde5b877a896f4ad4780fff663781..d0d104a0dd116890db92e91e75bd6ef32a2d00e9 100644 (file)
@@ -41,26 +41,26 @@ ENTRY(__raw_writesl)
                blt     5f
                bgt     6f
 
-4:             mov     ip, r3, pull #16
+4:             mov     ip, r3, lspull #16
                ldr     r3, [r1], #4
                subs    r2, r2, #1
-               orr     ip, ip, r3, push #16
+               orr     ip, ip, r3, lspush #16
                str     ip, [r0]
                bne     4b
                mov     pc, lr
 
-5:             mov     ip, r3, pull #8
+5:             mov     ip, r3, lspull #8
                ldr     r3, [r1], #4
                subs    r2, r2, #1
-               orr     ip, ip, r3, push #24
+               orr     ip, ip, r3, lspush #24
                str     ip, [r0]
                bne     5b
                mov     pc, lr
 
-6:             mov     ip, r3, pull #24
+6:             mov     ip, r3, lspull #24
                ldr     r3, [r1], #4
                subs    r2, r2, #1
-               orr     ip, ip, r3, push #8
+               orr     ip, ip, r3, lspush #8
                str     ip, [r0]
                bne     6b
                mov     pc, lr
index 938fc14f962d35693cc96c9d3f8899ae1b5bd193..d1fc0c0c342cff0a13e6d07ae8b6af76f609ffdc 100644 (file)
@@ -147,24 +147,24 @@ ENTRY(memmove)
 
 12:    PLD(    pld     [r1, #-128]             )
 13:            ldmdb   r1!, {r7, r8, r9, ip}
-               mov     lr, r3, push #\push
+               mov     lr, r3, lspush #\push
                subs    r2, r2, #32
                ldmdb   r1!, {r3, r4, r5, r6}
-               orr     lr, lr, ip, pull #\pull
-               mov     ip, ip, push #\push
-               orr     ip, ip, r9, pull #\pull
-               mov     r9, r9, push #\push
-               orr     r9, r9, r8, pull #\pull
-               mov     r8, r8, push #\push
-               orr     r8, r8, r7, pull #\pull
-               mov     r7, r7, push #\push
-               orr     r7, r7, r6, pull #\pull
-               mov     r6, r6, push #\push
-               orr     r6, r6, r5, pull #\pull
-               mov     r5, r5, push #\push
-               orr     r5, r5, r4, pull #\pull
-               mov     r4, r4, push #\push
-               orr     r4, r4, r3, pull #\pull
+               orr     lr, lr, ip, lspull #\pull
+               mov     ip, ip, lspush #\push
+               orr     ip, ip, r9, lspull #\pull
+               mov     r9, r9, lspush #\push
+               orr     r9, r9, r8, lspull #\pull
+               mov     r8, r8, lspush #\push
+               orr     r8, r8, r7, lspull #\pull
+               mov     r7, r7, lspush #\push
+               orr     r7, r7, r6, lspull #\pull
+               mov     r6, r6, lspush #\push
+               orr     r6, r6, r5, lspull #\pull
+               mov     r5, r5, lspush #\push
+               orr     r5, r5, r4, lspull #\pull
+               mov     r4, r4, lspush #\push
+               orr     r4, r4, r3, lspull #\pull
                stmdb   r0!, {r4 - r9, ip, lr}
                bge     12b
        PLD(    cmn     r2, #96                 )
@@ -175,10 +175,10 @@ ENTRY(memmove)
 14:            ands    ip, r2, #28
                beq     16f
 
-15:            mov     lr, r3, push #\push
+15:            mov     lr, r3, lspush #\push
                ldr     r3, [r1, #-4]!
                subs    ip, ip, #4
-               orr     lr, lr, r3, pull #\pull
+               orr     lr, lr, r3, lspull #\pull
                str     lr, [r0, #-4]!
                bgt     15b
        CALGN(  cmp     r2, #0                  )
index 5c908b1cb8ed5db3eeabfb89f7f659f99d2d6f76..e50520904b76416cc97274465efa445170fe3fe1 100644 (file)
@@ -117,9 +117,9 @@ USER(       TUSER(  strgtb) r3, [r0], #1)                   @ May fault
 .Lc2u_1fupi:   subs    r2, r2, #4
                addmi   ip, r2, #4
                bmi     .Lc2u_1nowords
-               mov     r3, r7, pull #8
+               mov     r3, r7, lspull #8
                ldr     r7, [r1], #4
-               orr     r3, r3, r7, push #24
+               orr     r3, r3, r7, lspush #24
 USER(  TUSER(  str)    r3, [r0], #4)                   @ May fault
                mov     ip, r0, lsl #32 - PAGE_SHIFT
                rsb     ip, ip, #0
@@ -131,30 +131,30 @@ USER(     TUSER(  str)    r3, [r0], #4)                   @ May fault
                subs    ip, ip, #16
                blt     .Lc2u_1rem8lp
 
-.Lc2u_1cpy8lp: mov     r3, r7, pull #8
+.Lc2u_1cpy8lp: mov     r3, r7, lspull #8
                ldmia   r1!, {r4 - r7}
                subs    ip, ip, #16
-               orr     r3, r3, r4, push #24
-               mov     r4, r4, pull #8
-               orr     r4, r4, r5, push #24
-               mov     r5, r5, pull #8
-               orr     r5, r5, r6, push #24
-               mov     r6, r6, pull #8
-               orr     r6, r6, r7, push #24
+               orr     r3, r3, r4, lspush #24
+               mov     r4, r4, lspull #8
+               orr     r4, r4, r5, lspush #24
+               mov     r5, r5, lspull #8
+               orr     r5, r5, r6, lspush #24
+               mov     r6, r6, lspull #8
+               orr     r6, r6, r7, lspush #24
                stmia   r0!, {r3 - r6}                  @ Shouldnt fault
                bpl     .Lc2u_1cpy8lp
 
 .Lc2u_1rem8lp: tst     ip, #8
-               movne   r3, r7, pull #8
+               movne   r3, r7, lspull #8
                ldmneia r1!, {r4, r7}
-               orrne   r3, r3, r4, push #24
-               movne   r4, r4, pull #8
-               orrne   r4, r4, r7, push #24
+               orrne   r3, r3, r4, lspush #24
+               movne   r4, r4, lspull #8
+               orrne   r4, r4, r7, lspush #24
                stmneia r0!, {r3 - r4}                  @ Shouldnt fault
                tst     ip, #4
-               movne   r3, r7, pull #8
+               movne   r3, r7, lspull #8
                ldrne   r7, [r1], #4
-               orrne   r3, r3, r7, push #24
+               orrne   r3, r3, r7, lspush #24
        TUSER(  strne) r3, [r0], #4                     @ Shouldnt fault
                ands    ip, ip, #3
                beq     .Lc2u_1fupi
@@ -172,9 +172,9 @@ USER(       TUSER(  strgtb) r3, [r0], #1)                   @ May fault
 .Lc2u_2fupi:   subs    r2, r2, #4
                addmi   ip, r2, #4
                bmi     .Lc2u_2nowords
-               mov     r3, r7, pull #16
+               mov     r3, r7, lspull #16
                ldr     r7, [r1], #4
-               orr     r3, r3, r7, push #16
+               orr     r3, r3, r7, lspush #16
 USER(  TUSER(  str)    r3, [r0], #4)                   @ May fault
                mov     ip, r0, lsl #32 - PAGE_SHIFT
                rsb     ip, ip, #0
@@ -186,30 +186,30 @@ USER(     TUSER(  str)    r3, [r0], #4)                   @ May fault
                subs    ip, ip, #16
                blt     .Lc2u_2rem8lp
 
-.Lc2u_2cpy8lp: mov     r3, r7, pull #16
+.Lc2u_2cpy8lp: mov     r3, r7, lspull #16
                ldmia   r1!, {r4 - r7}
                subs    ip, ip, #16
-               orr     r3, r3, r4, push #16
-               mov     r4, r4, pull #16
-               orr     r4, r4, r5, push #16
-               mov     r5, r5, pull #16
-               orr     r5, r5, r6, push #16
-               mov     r6, r6, pull #16
-               orr     r6, r6, r7, push #16
+               orr     r3, r3, r4, lspush #16
+               mov     r4, r4, lspull #16
+               orr     r4, r4, r5, lspush #16
+               mov     r5, r5, lspull #16
+               orr     r5, r5, r6, lspush #16
+               mov     r6, r6, lspull #16
+               orr     r6, r6, r7, lspush #16
                stmia   r0!, {r3 - r6}                  @ Shouldnt fault
                bpl     .Lc2u_2cpy8lp
 
 .Lc2u_2rem8lp: tst     ip, #8
-               movne   r3, r7, pull #16
+               movne   r3, r7, lspull #16
                ldmneia r1!, {r4, r7}
-               orrne   r3, r3, r4, push #16
-               movne   r4, r4, pull #16
-               orrne   r4, r4, r7, push #16
+               orrne   r3, r3, r4, lspush #16
+               movne   r4, r4, lspull #16
+               orrne   r4, r4, r7, lspush #16
                stmneia r0!, {r3 - r4}                  @ Shouldnt fault
                tst     ip, #4
-               movne   r3, r7, pull #16
+               movne   r3, r7, lspull #16
                ldrne   r7, [r1], #4
-               orrne   r3, r3, r7, push #16
+               orrne   r3, r3, r7, lspush #16
        TUSER(  strne) r3, [r0], #4                     @ Shouldnt fault
                ands    ip, ip, #3
                beq     .Lc2u_2fupi
@@ -227,9 +227,9 @@ USER(       TUSER(  strgtb) r3, [r0], #1)                   @ May fault
 .Lc2u_3fupi:   subs    r2, r2, #4
                addmi   ip, r2, #4
                bmi     .Lc2u_3nowords
-               mov     r3, r7, pull #24
+               mov     r3, r7, lspull #24
                ldr     r7, [r1], #4
-               orr     r3, r3, r7, push #8
+               orr     r3, r3, r7, lspush #8
 USER(  TUSER(  str)    r3, [r0], #4)                   @ May fault
                mov     ip, r0, lsl #32 - PAGE_SHIFT
                rsb     ip, ip, #0
@@ -241,30 +241,30 @@ USER(     TUSER(  str)    r3, [r0], #4)                   @ May fault
                subs    ip, ip, #16
                blt     .Lc2u_3rem8lp
 
-.Lc2u_3cpy8lp: mov     r3, r7, pull #24
+.Lc2u_3cpy8lp: mov     r3, r7, lspull #24
                ldmia   r1!, {r4 - r7}
                subs    ip, ip, #16
-               orr     r3, r3, r4, push #8
-               mov     r4, r4, pull #24
-               orr     r4, r4, r5, push #8
-               mov     r5, r5, pull #24
-               orr     r5, r5, r6, push #8
-               mov     r6, r6, pull #24
-               orr     r6, r6, r7, push #8
+               orr     r3, r3, r4, lspush #8
+               mov     r4, r4, lspull #24
+               orr     r4, r4, r5, lspush #8
+               mov     r5, r5, lspull #24
+               orr     r5, r5, r6, lspush #8
+               mov     r6, r6, lspull #24
+               orr     r6, r6, r7, lspush #8
                stmia   r0!, {r3 - r6}                  @ Shouldnt fault
                bpl     .Lc2u_3cpy8lp
 
 .Lc2u_3rem8lp: tst     ip, #8
-               movne   r3, r7, pull #24
+               movne   r3, r7, lspull #24
                ldmneia r1!, {r4, r7}
-               orrne   r3, r3, r4, push #8
-               movne   r4, r4, pull #24
-               orrne   r4, r4, r7, push #8
+               orrne   r3, r3, r4, lspush #8
+               movne   r4, r4, lspull #24
+               orrne   r4, r4, r7, lspush #8
                stmneia r0!, {r3 - r4}                  @ Shouldnt fault
                tst     ip, #4
-               movne   r3, r7, pull #24
+               movne   r3, r7, lspull #24
                ldrne   r7, [r1], #4
-               orrne   r3, r3, r7, push #8
+               orrne   r3, r3, r7, lspush #8
        TUSER(  strne) r3, [r0], #4                     @ Shouldnt fault
                ands    ip, ip, #3
                beq     .Lc2u_3fupi
@@ -382,9 +382,9 @@ USER(       TUSER(  ldr)    r7, [r1], #4)                   @ May fault
 .Lcfu_1fupi:   subs    r2, r2, #4
                addmi   ip, r2, #4
                bmi     .Lcfu_1nowords
-               mov     r3, r7, pull #8
+               mov     r3, r7, lspull #8
 USER(  TUSER(  ldr)    r7, [r1], #4)                   @ May fault
-               orr     r3, r3, r7, push #24
+               orr     r3, r3, r7, lspush #24
                str     r3, [r0], #4
                mov     ip, r1, lsl #32 - PAGE_SHIFT
                rsb     ip, ip, #0
@@ -396,30 +396,30 @@ USER(     TUSER(  ldr)    r7, [r1], #4)                   @ May fault
                subs    ip, ip, #16
                blt     .Lcfu_1rem8lp
 
-.Lcfu_1cpy8lp: mov     r3, r7, pull #8
+.Lcfu_1cpy8lp: mov     r3, r7, lspull #8
                ldmia   r1!, {r4 - r7}                  @ Shouldnt fault
                subs    ip, ip, #16
-               orr     r3, r3, r4, push #24
-               mov     r4, r4, pull #8
-               orr     r4, r4, r5, push #24
-               mov     r5, r5, pull #8
-               orr     r5, r5, r6, push #24
-               mov     r6, r6, pull #8
-               orr     r6, r6, r7, push #24
+               orr     r3, r3, r4, lspush #24
+               mov     r4, r4, lspull #8
+               orr     r4, r4, r5, lspush #24
+               mov     r5, r5, lspull #8
+               orr     r5, r5, r6, lspush #24
+               mov     r6, r6, lspull #8
+               orr     r6, r6, r7, lspush #24
                stmia   r0!, {r3 - r6}
                bpl     .Lcfu_1cpy8lp
 
 .Lcfu_1rem8lp: tst     ip, #8
-               movne   r3, r7, pull #8
+               movne   r3, r7, lspull #8
                ldmneia r1!, {r4, r7}                   @ Shouldnt fault
-               orrne   r3, r3, r4, push #24
-               movne   r4, r4, pull #8
-               orrne   r4, r4, r7, push #24
+               orrne   r3, r3, r4, lspush #24
+               movne   r4, r4, lspull #8
+               orrne   r4, r4, r7, lspush #24
                stmneia r0!, {r3 - r4}
                tst     ip, #4
-               movne   r3, r7, pull #8
+               movne   r3, r7, lspull #8
 USER(  TUSER(  ldrne) r7, [r1], #4)                    @ May fault
-               orrne   r3, r3, r7, push #24
+               orrne   r3, r3, r7, lspush #24
                strne   r3, [r0], #4
                ands    ip, ip, #3
                beq     .Lcfu_1fupi
@@ -437,9 +437,9 @@ USER(       TUSER(  ldrne) r7, [r1], #4)                    @ May fault
 .Lcfu_2fupi:   subs    r2, r2, #4
                addmi   ip, r2, #4
                bmi     .Lcfu_2nowords
-               mov     r3, r7, pull #16
+               mov     r3, r7, lspull #16
 USER(  TUSER(  ldr)    r7, [r1], #4)                   @ May fault
-               orr     r3, r3, r7, push #16
+               orr     r3, r3, r7, lspush #16
                str     r3, [r0], #4
                mov     ip, r1, lsl #32 - PAGE_SHIFT
                rsb     ip, ip, #0
@@ -452,30 +452,30 @@ USER(     TUSER(  ldr)    r7, [r1], #4)                   @ May fault
                blt     .Lcfu_2rem8lp
 
 
-.Lcfu_2cpy8lp: mov     r3, r7, pull #16
+.Lcfu_2cpy8lp: mov     r3, r7, lspull #16
                ldmia   r1!, {r4 - r7}                  @ Shouldnt fault
                subs    ip, ip, #16
-               orr     r3, r3, r4, push #16
-               mov     r4, r4, pull #16
-               orr     r4, r4, r5, push #16
-               mov     r5, r5, pull #16
-               orr     r5, r5, r6, push #16
-               mov     r6, r6, pull #16
-               orr     r6, r6, r7, push #16
+               orr     r3, r3, r4, lspush #16
+               mov     r4, r4, lspull #16
+               orr     r4, r4, r5, lspush #16
+               mov     r5, r5, lspull #16
+               orr     r5, r5, r6, lspush #16
+               mov     r6, r6, lspull #16
+               orr     r6, r6, r7, lspush #16
                stmia   r0!, {r3 - r6}
                bpl     .Lcfu_2cpy8lp
 
 .Lcfu_2rem8lp: tst     ip, #8
-               movne   r3, r7, pull #16
+               movne   r3, r7, lspull #16
                ldmneia r1!, {r4, r7}                   @ Shouldnt fault
-               orrne   r3, r3, r4, push #16
-               movne   r4, r4, pull #16
-               orrne   r4, r4, r7, push #16
+               orrne   r3, r3, r4, lspush #16
+               movne   r4, r4, lspull #16
+               orrne   r4, r4, r7, lspush #16
                stmneia r0!, {r3 - r4}
                tst     ip, #4
-               movne   r3, r7, pull #16
+               movne   r3, r7, lspull #16
 USER(  TUSER(  ldrne) r7, [r1], #4)                    @ May fault
-               orrne   r3, r3, r7, push #16
+               orrne   r3, r3, r7, lspush #16
                strne   r3, [r0], #4
                ands    ip, ip, #3
                beq     .Lcfu_2fupi
@@ -493,9 +493,9 @@ USER(       TUSER(  ldrgtb) r3, [r1], #0)                   @ May fault
 .Lcfu_3fupi:   subs    r2, r2, #4
                addmi   ip, r2, #4
                bmi     .Lcfu_3nowords
-               mov     r3, r7, pull #24
+               mov     r3, r7, lspull #24
 USER(  TUSER(  ldr)    r7, [r1], #4)                   @ May fault
-               orr     r3, r3, r7, push #8
+               orr     r3, r3, r7, lspush #8
                str     r3, [r0], #4
                mov     ip, r1, lsl #32 - PAGE_SHIFT
                rsb     ip, ip, #0
@@ -507,30 +507,30 @@ USER(     TUSER(  ldr)    r7, [r1], #4)                   @ May fault
                subs    ip, ip, #16
                blt     .Lcfu_3rem8lp
 
-.Lcfu_3cpy8lp: mov     r3, r7, pull #24
+.Lcfu_3cpy8lp: mov     r3, r7, lspull #24
                ldmia   r1!, {r4 - r7}                  @ Shouldnt fault
-               orr     r3, r3, r4, push #8
-               mov     r4, r4, pull #24
-               orr     r4, r4, r5, push #8
-               mov     r5, r5, pull #24
-               orr     r5, r5, r6, push #8
-               mov     r6, r6, pull #24
-               orr     r6, r6, r7, push #8
+               orr     r3, r3, r4, lspush #8
+               mov     r4, r4, lspull #24
+               orr     r4, r4, r5, lspush #8
+               mov     r5, r5, lspull #24
+               orr     r5, r5, r6, lspush #8
+               mov     r6, r6, lspull #24
+               orr     r6, r6, r7, lspush #8
                stmia   r0!, {r3 - r6}
                subs    ip, ip, #16
                bpl     .Lcfu_3cpy8lp
 
 .Lcfu_3rem8lp: tst     ip, #8
-               movne   r3, r7, pull #24
+               movne   r3, r7, lspull #24
                ldmneia r1!, {r4, r7}                   @ Shouldnt fault
-               orrne   r3, r3, r4, push #8
-               movne   r4, r4, pull #24
-               orrne   r4, r4, r7, push #8
+               orrne   r3, r3, r4, lspush #8
+               movne   r4, r4, lspull #24
+               orrne   r4, r4, r7, lspush #8
                stmneia r0!, {r3 - r4}
                tst     ip, #4
-               movne   r3, r7, pull #24
+               movne   r3, r7, lspull #24
 USER(  TUSER(  ldrne) r7, [r1], #4)                    @ May fault
-               orrne   r3, r3, r7, push #8
+               orrne   r3, r3, r7, lspush #8
                strne   r3, [r0], #4
                ands    ip, ip, #3
                beq     .Lcfu_3fupi
index da841885d01c724c9b399de0080137a5c7be7ef3..64f9f1045539bf366470570fb82b4f40ec934a81 100644 (file)
@@ -947,6 +947,7 @@ static int __init at91_clock_reset(void)
        }
 
        at91_pmc_write(AT91_PMC_SCDR, scdr);
+       at91_pmc_write(AT91_PMC_PCDR, pcdr);
        if (cpu_is_sama5d3())
                at91_pmc_write(AT91_PMC_PCDR1, pcdr1);
 
index 2adb2683f074de2e5846d06a0b370b8ff2b99a38..6124da1a07d4f8198ab29ccf93211c6ed7c012f7 100644 (file)
@@ -323,7 +323,8 @@ void omap3_save_scratchpad_contents(void)
                scratchpad_contents.public_restore_ptr =
                        virt_to_phys(omap3_restore_3630);
        else if (omap_rev() != OMAP3430_REV_ES3_0 &&
-                                       omap_rev() != OMAP3430_REV_ES3_1)
+                                       omap_rev() != OMAP3430_REV_ES3_1 &&
+                                       omap_rev() != OMAP3430_REV_ES3_1_2)
                scratchpad_contents.public_restore_ptr =
                        virt_to_phys(omap3_restore);
        else
index 44c609a1ec5dba4b99dcbf9cfdd3286d8132464c..62e40a9fffa91c99f9149144155dda7d8d046b73 100644 (file)
@@ -2177,6 +2177,8 @@ static int _enable(struct omap_hwmod *oh)
                         oh->mux->pads_dynamic))) {
                omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
                _reconfigure_io_chain();
+       } else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+               _reconfigure_io_chain();
        }
 
        _add_initiator_dep(oh, mpu_oh);
@@ -2283,6 +2285,8 @@ static int _idle(struct omap_hwmod *oh)
        if (oh->mux && oh->mux->pads_dynamic) {
                omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
                _reconfigure_io_chain();
+       } else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+               _reconfigure_io_chain();
        }
 
        oh->_state = _HWMOD_STATE_IDLE;
index 5ecf911bdfd823a9eaa2bae06d6068868a06298e..30f5dc3b0e52e2bfa0a14f28688bf5b492ba459f 100755 (executable)
@@ -22,64 +22,4 @@ config ARCH_ROCKCHIP
        select FIQ_GLUE
        select ARM_ERRATA_818325
 
-if ARCH_ROCKCHIP
-
-config RK_LAST_LOG
-       bool "Save the last kernel log on /proc/last_log"
-       depends on DEBUG_KERNEL && PRINTK
-       default y
-       help
-         It is only intended for debugging.
-
-config RK_DEBUG_UART
-       int "Debug UART"
-       default 2
-       help
-         Select a UART for debugging. -1 disable.
-
-config RK_USB_UART
-        bool "Support USB UART Bypass Function"
-        depends on (RK_DEBUG_UART = 2) 
-
-config RK_CONSOLE_THREAD
-       bool "Console write by thread"
-       depends on FIQ_DEBUGGER_CONSOLE
-       default y
-       help
-         Normal kernel printk will write out to UART by "kconsole" kthread
-
-config BLOCK_RKNAND
-       tristate "RK NAND Device Support"
-       default n
-       help
-         RK NAND Device Support.
-
-config RK_FPGA
-       bool "FPGA Board"
-
-config DVFS
-        bool "Enable dvfs"
-       default y
-       select PM_OPP
-       select CPU_FREQ
-
-config RK_PM_TESTS
-       bool "/sys/pm_tests/ support"
-       default n
-       select DVFS
-       select WATCHDOG
-
-config DDR_TEST
-       bool "DDR Test"
-       select CRC32
-       default n
-
-config RK_VCODEC
-       tristate "VCODEC (VPU HEVC) service driver in kernel"
-       depends on ARCH_ROCKCHIP
-       default y
-
-config RK_PL330_DMA_TEST
-       bool "pl330 DMA memcpy test"
-
-endif
+source "arch/arm/mach-rockchip/Kconfig.common"
diff --git a/arch/arm/mach-rockchip/Kconfig.common b/arch/arm/mach-rockchip/Kconfig.common
new file mode 100644 (file)
index 0000000..f0a5f3a
--- /dev/null
@@ -0,0 +1,61 @@
+if ARCH_ROCKCHIP
+
+config RK_LAST_LOG
+       bool "Save the last kernel log on /proc/last_log"
+       depends on DEBUG_KERNEL && PRINTK
+       default y
+       help
+         It is only intended for debugging.
+
+config RK_DEBUG_UART
+       int "Debug UART"
+       default 2
+       help
+         Select a UART for debugging. -1 disable.
+
+config RK_USB_UART
+        bool "Support USB UART Bypass Function"
+        depends on (RK_DEBUG_UART = 2) 
+
+config RK_CONSOLE_THREAD
+       bool "Console write by thread"
+       depends on FIQ_DEBUGGER_CONSOLE
+       default y
+       help
+         Normal kernel printk will write out to UART by "kconsole" kthread
+
+config BLOCK_RKNAND
+       tristate "RK NAND Device Support"
+       default n
+       help
+         RK NAND Device Support.
+
+config RK_FPGA
+       bool "FPGA Board"
+
+config DVFS
+        bool "Enable dvfs"
+       default y
+       select PM_OPP
+       select CPU_FREQ
+
+config RK_PM_TESTS
+       bool "/sys/pm_tests/ support"
+       default n
+       select DVFS
+       select WATCHDOG
+
+config DDR_TEST
+       bool "DDR Test"
+       select CRC32
+       default n
+
+config RK_VCODEC
+       tristate "VCODEC (VPU HEVC) service driver in kernel"
+       depends on ARCH_ROCKCHIP
+       default y
+
+config RK_PL330_DMA_TEST
+       bool "pl330 DMA memcpy test"
+
+endif
index e2fa463f8c56a1ce065b94ac8dbd93ba5f8a5c22..a3d129a0a2728c9be80a85ce4a28e2d14ab37fea 100755 (executable)
@@ -21,7 +21,9 @@
 #include <linux/of_platform.h>
 #include <linux/of_fdt.h>
 #include <asm/cputype.h>
+#ifdef CONFIG_CACHE_L2X0
 #include <asm/hardware/cache-l2x0.h>
+#endif
 #include <linux/rockchip/common.h>
 #include <linux/rockchip/pmu.h>
 #include <linux/memblock.h>
@@ -103,7 +105,7 @@ static int __init rockchip_cpu_axi_init(void)
                                iounmap(base);
                }
        }
-       dsb();
+       dsb(sy);
 
 #undef MAP
 
@@ -111,6 +113,7 @@ static int __init rockchip_cpu_axi_init(void)
 }
 early_initcall(rockchip_cpu_axi_init);
 
+#ifdef CONFIG_CACHE_L2X0
 static int __init rockchip_pl330_l2_cache_init(void)
 {
        struct device_node *np;
@@ -150,6 +153,7 @@ static int __init rockchip_pl330_l2_cache_init(void)
        return 0;
 }
 early_initcall(rockchip_pl330_l2_cache_init);
+#endif
 
 struct gen_pool *rockchip_sram_pool = NULL;
 struct pie_chunk *rockchip_pie_chunk = NULL;
index 235e124e9be656e464e7cf471ea15f2787bb3281..dbb2325ebbd35eddb27495ed89ac4546f6314c27 100644 (file)
@@ -10,7 +10,9 @@
 #include <linux/delay.h>
 #include <linux/rockchip/cpu.h>
 #include <linux/rockchip/iomap.h>
+#ifdef CONFIG_ARM
 #include <asm/system_info.h>
+#endif
 #include "efuse.h"
 
 #define efuse_readl(offset) readl_relaxed(RK_EFUSE_VIRT + offset)
@@ -83,6 +85,7 @@ static int rk3288_get_leakage(int ch)
        return efuse_buf[23+ch];
 }
 
+#ifdef CONFIG_ARM
 static void __init rk3288_set_system_serial(void)
 {
        int i;
@@ -96,6 +99,9 @@ static void __init rk3288_set_system_serial(void)
        system_serial_low = crc32(0, buf, 8);
        system_serial_high = crc32(system_serial_low, buf + 8, 8);
 }
+#else
+static inline void __init rk3288_set_system_serial(void) {}
+#endif
 
 int rk312x_efuse_readregs(u32 addr, u32 length, u8 *buf)
 {
index e3c1d5fbf8453881a3b4d6db837560daa37c040e..2907bab41febc148e6bbf6a948da663dce0b69a1 100644 (file)
@@ -85,7 +85,8 @@ static int __init rk_last_log_init(void)
 
        log_buf = last_log_vmap(virt_to_phys(buf), 1 << LOG_BUF_PAGE_ORDER);
        if (!log_buf) {
-               pr_err("failed to map %d pages at 0x%08x\n", 1 << LOG_BUF_PAGE_ORDER, virt_to_phys(buf));
+               pr_err("failed to map %d pages at 0x%08llx\n", 1 << LOG_BUF_PAGE_ORDER,
+                      (unsigned long long)virt_to_phys(buf));
                return 0;
        }
 
@@ -100,7 +101,7 @@ static int __init rk_last_log_init(void)
        memcpy(log_buf, early_log_buf, early_log_size);
        memset(log_buf + early_log_size, 0, LOG_BUF_LEN - early_log_size);
 
-       pr_info("0x%08x map to 0x%p and copy to 0x%p, size 0x%x early 0x%x (version 3.0)\n", virt_to_phys(buf), log_buf, last_log_buf, LOG_BUF_LEN, early_log_size);
+       pr_info("0x%08llx map to 0x%p and copy to 0x%p, size 0x%x early 0x%zx (version 3.0)\n", (unsigned long long)virt_to_phys(buf), log_buf, last_log_buf, LOG_BUF_LEN, early_log_size);
 
        entry = proc_create("last_kmsg", S_IRUSR, NULL, &last_log_fops);
        if (!entry) {
index 24d43619aa6df7f4c0dfd8247fc0c0c227b844d1..b8ab1a787f1166cd922bb6df07952a235a821e38 100755 (executable)
@@ -141,7 +141,11 @@ EXPORT_SYMBOL(rk_nand_get_device);
 \r
 unsigned long rknand_dma_flush_dcache(unsigned long ptr,int size,int dir)\r
 {\r
+#ifdef CONFIG_ARM64\r
+       __flush_dcache_area((void *)ptr, size + 63);\r
+#else\r
      __cpuc_flush_dcache_area((void*)ptr, size + 63);\r
+#endif\r
     return ((unsigned long )virt_to_phys((void *)ptr));\r
 }\r
 EXPORT_SYMBOL(rknand_dma_flush_dcache);\r
index 74ac848c2c649bf4e8eccb3cec5ca73aa5931df4..d3e8a1ead7f9e0b200141d3db689db8109cba2ce 100755 (executable)
@@ -11,6 +11,7 @@
 #include <asm/io.h>
 #include "pm.h"
 
+#ifdef CONFIG_ARM
 /*************************dump reg********************************************/
 
 void rkpm_ddr_reg_offset_dump(void __iomem * base_addr,u32 _offset)
@@ -567,6 +568,7 @@ void __init rockchip_suspend_init(void)
     suspend_set_ops(&rockchip_suspend_ops);
     return;
 }
+#endif /* CONFIG_ARM */
 
 static enum rockchip_pm_policy pm_policy;
 static BLOCKING_NOTIFIER_HEAD(policy_notifier_list);
index 91149ae23097f5e9ef5996d2fbfd330c175fd87a..f7863de25bdadde72f2e5c8fae949a014a7ec409 100644 (file)
@@ -2,7 +2,9 @@
 #define __MACH_ROCKCHIP_SRAM_H
 
 #include <linux/pie.h>
+#ifdef CONFIG_PIE
 #include <asm/pie.h>
+#endif
 
 extern char __pie_common_start[];
 extern char __pie_common_end[];
index 36e9f24e03b0213cb1682ab131a0f0b9c2176998..2e719593c5cb7cbf0bb9789e9ef0266495e94db9 100644 (file)
@@ -778,6 +778,7 @@ config NEED_KUSER_HELPERS
 
 config KUSER_HELPERS
        bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+       depends on MMU
        default y
        help
          Warning: disabling this option may break user programs.
index 8045a48c8476ed8c0ad8e0601561bb3d03d2c5a1..2d861a210ac56d14955e5a2a230471e5427af62c 100644 (file)
@@ -17,6 +17,7 @@ obj-$(CONFIG_MODULES)         += proc-syms.o
 
 obj-$(CONFIG_ALIGNMENT_TRAP)   += alignment.o
 obj-$(CONFIG_HIGHMEM)          += highmem.o
+obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
 
 obj-$(CONFIG_CPU_ABRT_NOMMU)   += abort-nommu.o
 obj-$(CONFIG_CPU_ABRT_EV4)     += abort-ev4.o
index 3815a8262af070b98f33d61ac31908961ee06eb2..8c48c5c22a331aac8f547335d6990c598457ef0b 100644 (file)
  */
        .align  5
 ENTRY(v6_early_abort)
-#ifdef CONFIG_CPU_V6
-       sub     r1, sp, #4                      @ Get unused stack location
-       strex   r0, r1, [r1]                    @ Clear the exclusive monitor
-#elif defined(CONFIG_CPU_32v6K)
-       clrex
-#endif
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
 /*
index 703375277ba6d3dcdae7f93404d2d19e531aad68..4812ad054214572ba6e7198247e2c190e469897d 100644 (file)
  */
        .align  5
 ENTRY(v7_early_abort)
-       /*
-        * The effect of data aborts on on the exclusive access monitor are
-        * UNPREDICTABLE. Do a CLREX to clear the state
-        */
-       clrex
-
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
 
index 924036473b16f437019002b8afac36a2a41e4010..d301662b7b329e95981b8c5ceb812856c18b68c3 100644 (file)
@@ -40,6 +40,7 @@
  * This code is not portable to processors with late data abort handling.
  */
 #define CODING_BITS(i) (i & 0x0e000000)
+#define COND_BITS(i)   (i & 0xf0000000)
 
 #define LDST_I_BIT(i)  (i & (1 << 26))         /* Immediate constant   */
 #define LDST_P_BIT(i)  (i & (1 << 24))         /* Preindex             */
@@ -817,6 +818,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                break;
 
        case 0x04000000:        /* ldr or str immediate */
+               if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
+                       goto bad;
                offset.un = OFFSET_BITS(instr);
                handler = do_alignment_ldrstr;
                break;
index 1b5b8ed71532222069216e622bcaa59f7c59a7a9..9de8940c29e6247d6c76e8d7daee06c43c1b1641 100644 (file)
@@ -250,7 +250,7 @@ static void __dma_free_buffer(struct page *page, size_t size)
 
 #ifdef CONFIG_MMU
 #ifdef CONFIG_HUGETLB_PAGE
-#error ARM Coherent DMA allocator does not (yet) support huge TLB
+#warning ARM Coherent DMA allocator does not (yet) support huge TLB
 #endif
 
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
index 56059a5be9a89adaa588d7275beb3362f7d2cc03..9820ad4b80c027514c19a472a3bf785bb22a0202 100644 (file)
@@ -261,9 +261,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        struct task_struct *tsk;
        struct mm_struct *mm;
        int fault, sig, code;
-       int write = fsr & FSR_WRITE;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                               (write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        if (notify_page_fault(regs, fsr))
                return 0;
@@ -282,6 +280,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        if (in_atomic() || irqs_disabled() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+       if (fsr & FSR_WRITE)
+               flags |= FAULT_FLAG_WRITE;
+
        /*
         * As per x86, we may deadlock here.  However, since the kernel only
         * validly references user space from well defined areas of the code,
@@ -349,6 +352,13 @@ retry:
        if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
                return 0;
 
+       /*
+        * If we are in kernel mode at this point, we
+        * have no context to handle this fault with.
+        */
+       if (!user_mode(regs))
+               goto no_context;
+
        if (fault & VM_FAULT_OOM) {
                /*
                 * We ran out of memory, call the OOM killer, and return to
@@ -359,13 +369,6 @@ retry:
                return 0;
        }
 
-       /*
-        * If we are in kernel mode at this point, we
-        * have no context to handle this fault with.
-        */
-       if (!user_mode(regs))
-               goto no_context;
-
        if (fault & VM_FAULT_SIGBUS) {
                /*
                 * We had some memory, but were unable to
index 32aa5861119f2bdd353468114462dd8a680a60cb..c9e37aac450b02603895f055fb61b652b50828c1 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/highmem.h>
 #include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
+#include <linux/hugetlb.h>
 
 #include "mm.h"
 
@@ -168,19 +169,23 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
         * coherent with the kernels mapping.
         */
        if (!PageHighMem(page)) {
-               __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+               size_t page_size = PAGE_SIZE << compound_order(page);
+               __cpuc_flush_dcache_area(page_address(page), page_size);
        } else {
-               void *addr;
-
+               unsigned long i;
                if (cache_is_vipt_nonaliasing()) {
-                       addr = kmap_atomic(page);
-                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
-                       kunmap_atomic(addr);
-               } else {
-                       addr = kmap_high_get(page);
-                       if (addr) {
+                       for (i = 0; i < (1 << compound_order(page)); i++) {
+                               void *addr = kmap_atomic(page);
                                __cpuc_flush_dcache_area(addr, PAGE_SIZE);
-                               kunmap_high(page);
+                               kunmap_atomic(addr);
+                       }
+               } else {
+                       for (i = 0; i < (1 << compound_order(page)); i++) {
+                               void *addr = kmap_high_get(page);
+                               if (addr) {
+                                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                                       kunmap_high(page);
+                               }
                        }
                }
        }
index 05a4e943183650ddba75efd5727d2196a01aeb94..ab4409a2307e07a602f1fcba08519cbbf2201d00 100644 (file)
@@ -9,11 +9,11 @@ static struct fsr_info fsr_info[] = {
        { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "level 3 translation fault"     },
        { do_bad,               SIGBUS,  0,             "reserved access flag fault"    },
        { do_bad,               SIGSEGV, SEGV_ACCERR,   "level 1 access flag fault"     },
-       { do_bad,               SIGSEGV, SEGV_ACCERR,   "level 2 access flag fault"     },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 access flag fault"     },
        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 3 access flag fault"     },
        { do_bad,               SIGBUS,  0,             "reserved permission fault"     },
        { do_bad,               SIGSEGV, SEGV_ACCERR,   "level 1 permission fault"      },
-       { do_sect_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 permission fault"      },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 permission fault"      },
        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 3 permission fault"      },
        { do_bad,               SIGBUS,  0,             "synchronous external abort"    },
        { do_bad,               SIGBUS,  0,             "asynchronous external abort"   },
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
new file mode 100644 (file)
index 0000000..3d1e4a2
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * arch/arm/mm/hugetlbpage.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/sysctl.h>
+#include <asm/mman.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+/*
+ * On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot
+ * of type casting from pmd_t * to pte_t *.
+ */
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd = NULL;
+
+       pgd = pgd_offset(mm, addr);
+       if (pgd_present(*pgd)) {
+               pud = pud_offset(pgd, addr);
+               if (pud_present(*pud))
+                       pmd = pmd_offset(pud, addr);
+       }
+
+       return (pte_t *)pmd;
+}
+
+struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+                             int write)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+int pud_huge(pud_t pud)
+{
+       return 0;
+}
+
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       return 0;
+}
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+                       unsigned long addr, unsigned long sz)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pte_t *pte = NULL;
+
+       pgd = pgd_offset(mm, addr);
+       pud = pud_alloc(mm, pgd, addr);
+       if (pud)
+               pte = (pte_t *)pmd_alloc(mm, pud, addr);
+
+       return pte;
+}
+
+struct page *
+follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+               pmd_t *pmd, int write)
+{
+       struct page *page;
+
+       page = pte_page(*(pte_t *)pmd);
+       if (page)
+               page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+       return page;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+       return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
+}
index 83cb3ac27095146f3f60c04047c6b212856a73b2..99083737911237eac0d6781a20d120852a0c7a09 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/system_info.h>
 
 pgd_t *idmap_pgd;
+phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
 
 #ifdef CONFIG_ARM_LPAE
 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
@@ -24,6 +25,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
                        pr_warning("Failed to allocate identity pmd.\n");
                        return;
                }
+               /*
+                * Copy the original PMD to ensure that the PMD entries for
+                * the kernel image are preserved.
+                */
+               if (!pud_none(*pud))
+                       memcpy(pmd, pmd_offset(pud, 0),
+                              PTRS_PER_PMD * sizeof(pmd_t));
                pud_populate(&init_mm, pud, pmd);
                pmd += pmd_index(addr);
        } else
@@ -67,8 +75,8 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start,
        unsigned long addr, end;
        unsigned long next;
 
-       addr = virt_to_phys(text_start);
-       end = virt_to_phys(text_end);
+       addr = virt_to_idmap(text_start);
+       end = virt_to_idmap(text_end);
 
        prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
 
index df8591954ab9f6b584836711a0b33700b4e1877b..2f95b7434bd3e09bd485772ccf856d8029f5a90c 100644 (file)
@@ -4,13 +4,16 @@ config ARM64
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_HAS_OPP
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+       select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        select ARCH_WANT_FRAME_POINTERS
        select ARM_AMBA
        select ARM_ARCH_TIMER
        select ARM_GIC
+       select ARM_GIC_V3
        select BUILDTIME_EXTABLE_SORT
+       select AUDIT_ARCH_COMPAT_GENERIC
        select CLONE_BACKWARDS
        select COMMON_CLK
        select CPU_PM if (SUSPEND || CPU_IDLE)
@@ -29,6 +32,8 @@ config ARM64
        select HARDIRQS_SW_RESEND
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KGDB
+       select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_C_RECORDMCOUNT
        select HAVE_DEBUG_BUGVERBOSE
@@ -55,6 +60,7 @@ config ARM64
        select NO_BOOTMEM
        select OF
        select OF_EARLY_FLATTREE
+       select OF_RESERVED_MEM
        select PERF_USE_VMALLOC
        select POWER_RESET
        select POWER_SUPPLY
@@ -124,6 +130,14 @@ source "kernel/Kconfig.freezer"
 
 menu "Platform selection"
 
+config ARCH_ROCKCHIP
+       bool "Rockchip SoCs"
+       select PINCTRL
+       select PINCTRL_RK3368
+       select ARCH_REQUIRE_GPIOLIB
+
+source "arch/arm64/mach-rockchip/Kconfig"
+
 config ARCH_VEXPRESS
        bool "ARMv8 software model (Versatile Express)"
        select ARCH_REQUIRE_GPIOLIB
@@ -303,6 +317,31 @@ config HOTPLUG_CPU
          Say Y here to experiment with turning CPUs off and on.  CPUs
          can be controlled through /sys/devices/system/cpu.
 
+config SWP_EMULATE
+       bool "Emulate SWP/SWPB instructions"
+       help
+         ARMv6 architecture deprecates use of the SWP/SWPB instructions. ARMv8
+         oblosetes the use of SWP/SWPB instructions. ARMv7 multiprocessing
+         extensions introduce the ability to disable these instructions,
+         triggering an undefined instruction exception when executed. Say Y
+         here to enable software emulation of these instructions for userspace
+         (not kernel) using LDREX/STREX. Also creates /proc/cpu/swp_emulation
+         for statistics.
+
+         In some older versions of glibc [<=2.8] SWP is used during futex
+         trylock() operations with the assumption that the code will not
+         be preempted. This invalid assumption may be more likely to fail
+         with SWP emulation enabled, leading to deadlock of the user
+         application.
+
+         NOTE: when accessing uncached shared regions, LDREX/STREX rely
+         on an external transaction monitoring block called a global
+         monitor to maintain update atomicity. If your system does not
+         implement a global monitor, this option can cause programs that
+         perform SWP operations to uncached memory to deadlock.
+
+         If unsure, say Y.
+
 source kernel/Kconfig.preempt
 
 config HZ
@@ -345,6 +384,27 @@ config ARCH_WANT_HUGE_PMD_SHARE
 config HAVE_ARCH_TRANSPARENT_HUGEPAGE
        def_bool y
 
+config ARMV7_COMPAT
+       bool "Kernel support for ARMv7 applications"
+       depends on COMPAT
+       select SWP_EMULATE
+       help
+        This option enables features that allow that ran on an ARMv7 or older
+        processor to continue functioning.
+
+        If you want to execute ARMv7 applications, say Y
+
+config ARMV7_COMPAT_CPUINFO
+       bool "Report backwards compatible cpu features in /proc/cpuinfo"
+       depends on ARMV7_COMPAT
+       default y
+       help
+        This option makes /proc/cpuinfo list CPU features that an ARMv7 or
+        earlier kernel would report, but are not optional on an ARMv8 or later
+        processor.
+
+        If you want to execute ARMv7 applications, say Y
+
 source "mm/Kconfig"
 
 config FORCE_MAX_ZONEORDER
@@ -352,6 +412,19 @@ config FORCE_MAX_ZONEORDER
        default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
        default "11"
 
+config SECCOMP
+       bool "Enable seccomp to safely compute untrusted bytecode"
+       ---help---
+         This kernel feature is useful for number crunching applications
+         that may need to compute untrusted bytecode during their
+         execution. By using pipes or other transports made available to
+         the process as file descriptors supporting the read/write
+         syscalls, it's possible to isolate those applications in
+         their own address space using seccomp. Once seccomp is
+         enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
+         and the task is only allowed to execute a few safe syscalls
+         defined by each seccomp mode.
+
 endmenu
 
 menu "Boot options"
@@ -473,6 +546,8 @@ source "drivers/firmware/Kconfig"
 
 source "fs/Kconfig"
 
+source "arch/arm64/kvm/Kconfig"
+
 source "arch/arm64/Kconfig.debug"
 
 source "security/Kconfig"
index 28750a191dd86716da13e26ea3271cc7b3e61b9e..67a6d3dead4a888b8471e67388b8a501ce56c735 100644 (file)
@@ -43,6 +43,8 @@ TEXT_OFFSET := 0x00080000
 export TEXT_OFFSET GZFLAGS
 
 core-y         += arch/arm64/kernel/ arch/arm64/mm/
+core-$(CONFIG_ARCH_ROCKCHIP) += arch/arm64/mach-rockchip/
+core-$(CONFIG_KVM) += arch/arm64/kvm/
 core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
 libs-y         := arch/arm64/lib/ $(libs-y)
 libs-y         += $(LIBGCC)
@@ -93,3 +95,12 @@ define archhelp
   echo  '                  (distribution) /sbin/installkernel or'
   echo  '                  install to $$(INSTALL_PATH) and run lilo'
 endef
+
+kernel.img: Image
+       $(Q)$(srctree)/mkkrnlimg $(objtree)/arch/arm64/boot/Image $(objtree)/kernel.img >/dev/null
+       @echo '  Image:  kernel.img is ready'
+
+LOGO := $(notdir $(wildcard $(srctree)/logo.bmp))
+%.img: %.dtb kernel.img $(LOGO)
+       $(Q)$(srctree)/resource_tool $(objtree)/arch/arm64/boot/dts/$*.dtb $(LOGO)
+       @echo '  Image:  resource.img (with $*.dtb $(LOGO)) is ready'
diff --git a/arch/arm64/boot/dts/rk3368-clocks.dtsi b/arch/arm64/boot/dts/rk3368-clocks.dtsi
new file mode 100644 (file)
index 0000000..29b1773
--- /dev/null
@@ -0,0 +1,2724 @@
+/*
+ * Copyright (C) 2014-2015 ROCKCHIP, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <dt-bindings/clock/rockchip,rk3368.h>
+
+/{
+       clocks {
+               compatible = "rockchip,rk-clocks";
+               rockchip,grf = <&grf>;
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               fixed_rate_cons {
+                       compatible = "rockchip,rk-fixed-rate-cons";
+
+                       xin24m: xin24m {
+                               compatible = "rockchip,rk-fixed-clock";
+                               clock-output-names = "xin24m";
+                               clock-frequency = <24000000>;
+                               #clock-cells = <0>;
+                       };
+
+                       xin12m: xin12m {
+                               compatible = "rockchip,rk-fixed-clock";
+                               clocks = <&xin24m>;
+                               clock-output-names = "xin12m";
+                               clock-frequency = <12000000>;
+                               #clock-cells = <0>;
+                       };
+
+                       xin32k: xin32k {
+                               compatible = "rockchip,rk-fixed-clock";
+                               clock-output-names = "xin32k";
+                               clock-frequency = <32000>;
+                               #clock-cells = <0>;
+                       };
+
+                       dummy: dummy {
+                               compatible = "rockchip,rk-fixed-clock";
+                               clock-output-names = "dummy";
+                               clock-frequency = <0>;
+                               #clock-cells = <0>;
+                       };
+
+                       jtag_clkin: jtag_clkin {
+                               compatible = "rockchip,rk-fixed-clock";
+                               clock-output-names = "jtag_clkin";
+                               clock-frequency = <0>;
+                               #clock-cells = <0>;
+                       };
+
+                       gmac_clkin: gmac_clkin {
+                               compatible = "rockchip,rk-fixed-clock";
+                               clock-output-names = "gmac_clkin";
+                               clock-frequency = <0>;
+                               #clock-cells = <0>;
+                       };
+
+                       pclkin_isp: pclkin_isp {
+                               compatible = "rockchip,rk-fixed-clock";
+                               clock-output-names = "pclkin_isp";
+                               clock-frequency = <0>;
+                               #clock-cells = <0>;
+                       };
+
+                       pclkin_vip: pclkin_vip {
+                               compatible = "rockchip,rk-fixed-clock";
+                               clock-output-names = "pclkin_vip";
+                               clock-frequency = <0>;
+                               #clock-cells = <0>;
+                       };
+
+                       clkin_hsadc_tsp: clkin_hsadc_tsp {
+                               compatible = "rockchip,rk-fixed-clock";
+                               clock-output-names = "clkin_hsadc_tsp";
+                               clock-frequency = <0>;
+                               #clock-cells = <0>;
+                       };
+
+                       i2s_clkin: i2s_clkin {
+                               compatible = "rockchip,rk-fixed-clock";
+                               clock-output-names = "i2s_clkin";
+                               clock-frequency = <0>;
+                               #clock-cells = <0>;
+                       };
+               };
+
+               fixed_factor_cons {
+                       compatible = "rockchip,rk-fixed-factor-cons";
+
+                       hclk_vepu: hclk_vepu {
+                               compatible = "rockchip,rk-fixed-factor-clock";
+                               clocks = <&aclk_vepu>;
+                               clock-output-names = "hclk_vepu";
+                               clock-div = <4>;
+                               clock-mult = <1>;
+                               #clock-cells = <0>;
+                       };
+
+                       hclk_vdpu: hclk_vdpu {
+                               compatible = "rockchip,rk-fixed-factor-clock";
+                               clocks = <&aclk_vdpu>;
+                               clock-output-names = "hclk_vdpu";
+                               clock-div = <4>;
+                               clock-mult = <1>;
+                               #clock-cells = <0>;
+                       };
+
+                       usbotg_480m_out: usbotg_480m_out {
+                               compatible = "rockchip,rk-fixed-factor-clock";
+                               clocks = <&clk_gates8 1>;
+                               clock-output-names = "usbotg_480m_out";
+                               clock-div = <1>;
+                               clock-mult = <20>;
+                               #clock-cells = <0>;
+                       };
+
+                       pclkin_isp_inv: pclkin_isp_inv {
+                               compatible = "rockchip,rk-fixed-factor-clock";
+                               clocks = <&clk_gates17 2>;
+                               clock-output-names = "pclkin_isp_inv";
+                               clock-div = <1>;
+                               clock-mult = <1>;
+                               #clock-cells = <0>;
+                       };
+
+                       pclkin_vip_inv: pclkin_vip_inv {
+                               compatible = "rockchip,rk-fixed-factor-clock";
+                               clocks = <&clk_gates16 13>;
+                               clock-output-names = "pclkin_vip_inv";
+                               clock-div = <1>;
+                               clock-mult = <1>;
+                               #clock-cells = <0>;
+                       };
+
+                       pclk_vio: pclk_vio {
+                               compatible = "rockchip,rk-fixed-factor-clock";
+                               clocks = <&clk_gates16 8>;
+                               clock-output-names = "pclk_vio";
+                               clock-div = <1>;
+                               clock-mult = <1>;
+                               #clock-cells = <0>;
+                       };
+               };
+
+               clock_regs {
+                       compatible = "rockchip,rk-clock-regs";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges = <0x0 0x0 0xff760000 0x1000>;
+                       reg = <0x0 0xff760000 0x0 0x1000>;
+
+                       /* PLL control regs */
+                       pll_cons {
+                               compatible = "rockchip,rk-pll-cons";
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               ranges;
+
+                               clk_apllb: pll-clk@0000 {
+                                       compatible = "rockchip,rk3188-pll-clk";
+                                       reg = <0x0000 0x10>;
+                                       mode-reg = <0x000c 8>;
+                                       status-reg = <0x0480 1>;
+                                       clocks = <&xin24m>;
+                                       clock-output-names = "clk_apllb";
+                                       rockchip,pll-type = <CLK_PLL_3368_APLLB>;
+                                       #clock-cells = <0>;
+                               };
+
+
+                               clk_aplll: pll-clk@0010 {
+                                       compatible = "rockchip,rk3188-pll-clk";
+                                       reg = <0x0010 0x10>;
+                                       mode-reg = <0x001c 8>;
+                                       status-reg = <0x0480 0>;
+                                       clocks = <&xin24m>;
+                                       clock-output-names = "clk_aplll";
+                                       rockchip,pll-type = <CLK_PLL_3368_APLLL>;
+                                       #clock-cells = <0>;
+                               };
+
+                               clk_dpll: pll-clk@0020 {
+                                       compatible = "rockchip,rk3188-pll-clk";
+                                       reg = <0x0020 0x10>;
+                                       mode-reg = <0x002c 8>;
+                                       status-reg = <0x0480 2>;
+                                       clocks = <&xin24m>;
+                                       clock-output-names = "clk_dpll";
+                                       rockchip,pll-type = <CLK_PLL_3188PLUS>;
+                                       #clock-cells = <0>;
+                               };
+
+
+                               clk_cpll: pll-clk@0030 {
+                                       compatible = "rockchip,rk3188-pll-clk";
+                                       reg = <0x0030 0x10>;
+                                       mode-reg = <0x003c 8>;
+                                       status-reg = <0x0480 3>;
+                                       clocks = <&xin24m>;
+                                       clock-output-names = "clk_cpll";
+                                       rockchip,pll-type = <CLK_PLL_3188PLUS>;
+                                       #clock-cells = <0>;
+                                       #clock-init-cells = <1>;
+                               };
+
+                               clk_gpll: pll-clk@0040 {
+                                       compatible = "rockchip,rk3188-pll-clk";
+                                       reg = <0x0040 0x10>;
+                                       mode-reg = <0x004c 8>;
+                                       status-reg = <0x0480 4>;
+                                       clocks = <&xin24m>;
+                                       clock-output-names = "clk_gpll";
+                                       rockchip,pll-type = <CLK_PLL_3188PLUS>;
+                                       #clock-cells = <0>;
+                                       #clock-init-cells = <1>;
+                               };
+
+                               clk_npll: pll-clk@0050 {
+                                       compatible = "rockchip,rk3188-pll-clk";
+                                       reg = <0x0050 0x10>;
+                                       mode-reg = <0x005c 8>;
+                                       status-reg = <0x0480 5>;
+                                       clocks = <&xin24m>;
+                                       clock-output-names = "clk_npll";
+                                       rockchip,pll-type = <CLK_PLL_3188PLUS_AUTO>;
+                                       #clock-cells = <0>;
+                                       #clock-init-cells = <1>;
+                               };
+                       };
+
+                       /* Select control regs */
+                       clk_sel_cons {
+                               compatible = "rockchip,rk-sel-cons";
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               ranges;
+
+                               clk_sel_con0: sel-con@0100 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0100 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_core_b_div: clk_core_b_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_core_b>;
+                                               clock-output-names = "clk_core_b";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_CORE>;
+                                               rockchip,flags = <(CLK_GET_RATE_NOCACHE |
+                                                                       CLK_SET_RATE_NO_REPARENT)>;
+                                       };
+
+                                       /* 6:5 reserved */
+
+                                       clk_core_b: clk_core_b_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <7 1>;
+                                               clocks = <&clk_apllb>, <&clk_gpll>;
+                                               clock-output-names = "clk_core_b";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       aclkm_core_b: aclkm_core_b_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&clk_core_b>;
+                                               clock-output-names = "aclkm_core_b";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_CORE_CHILD>;
+                                       };
+
+                                       /* 15:13 reserved */
+                               };
+
+                               clk_sel_con1: sel-con@0104 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0104 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       atclk_core_b: atclk_core_b_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_core_b>;
+                                               clock-output-names = "atclk_core_b";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_CORE_CHILD>;
+                                       };
+
+                                       /* 7:5 reserved */
+
+                                       pclk_dbg_b: pclk_dbg_b_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&clk_core_b>;
+                                               clock-output-names = "pclk_dbg_b";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_CORE_CHILD>;
+                                       };
+                               };
+
+                               clk_sel_con2: sel-con@0108 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0108 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_core_l_div: clk_core_l_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_core_l>;
+                                               clock-output-names = "clk_core_l";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_CORE>;
+                                               rockchip,flags = <(CLK_GET_RATE_NOCACHE |
+                                                                       CLK_SET_RATE_NO_REPARENT)>;
+                                       };
+
+                                       /* 6:5 reserved */
+
+                                       clk_core_l: clk_core_l_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <7 1>;
+                                               clocks = <&clk_aplll>, <&clk_gpll>;
+                                               clock-output-names = "clk_core_l";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       aclkm_core_l: aclkm_core_l_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&clk_core_l>;
+                                               clock-output-names = "aclkm_core_l";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_CORE_CHILD>;
+                                       };
+
+                                       /* 15:13 reserved */
+                               };
+
+                               clk_sel_con3: sel-con@010c {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x010c 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       atclk_core_l: atclk_core_l_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_core_l>;
+                                               clock-output-names = "atclk_core_l";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_CORE_CHILD>;
+                                       };
+
+                                       /* 7:5 reserved */
+
+                                       pclk_dbg_l: pclk_dbg_l_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&clk_core_l>;
+                                               clock-output-names = "pclk_dbg_l";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_CORE_CHILD>;
+                                       };
+                               };
+
+                               clk_sel_con4: sel-con@0110 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0110 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_cs_div: clk_cs_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_cs>;
+                                               clock-output-names = "clk_cs";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                               rockchip,flags = <CLK_SET_RATE_NO_REPARENT>;
+                                       };
+
+                                       /* 5 reserved */
+
+                                       clk_cs: clk_cs_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <6 2>;
+                                               clocks = <&clk_gates0 9>, <&clk_gates0 10>, <&clk_gates0 8>, <&dummy>;
+                                               clock-output-names = "clk_cs";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       clkin_trace: clkin_trace_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&clk_cs>;
+                                               clock-output-names = "clkin_trace";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                               };
+
+                               clk_sel_con5: sel-con@0114 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0114 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       aclk_cci_div: aclk_cci_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&aclk_cci>;
+                                               clock-output-names = "aclk_cci";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                       };
+
+                                       /* 5 reserved */
+
+                                       aclk_cci: aclk_cci_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <6 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&usbphy_480m>, <&clk_npll>;
+                                               clock-output-names = "aclk_cci";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               /* sel[7:6] reserved */
+
+                               clk_sel_con8: sel-con@0120 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0120 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       aclk_bus_div: aclk_bus_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&aclk_bus>;
+                                               clock-output-names = "aclk_bus";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       /* 6:5 reserved */
+
+                                       aclk_bus: aclk_bus_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <7 1>;
+                                               clocks = <&clk_gates1 11>, <&clk_gates1 10>;
+                                               clock-output-names = "aclk_bus";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       hclk_bus: hclk_bus_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 2>;
+                                               clocks = <&aclk_bus>;
+                                               clock-output-names = "hclk_bus";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       /* 11:10 reserved */
+
+                                       pclk_bus: pclk_bus_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <12 3>;
+                                               clocks = <&aclk_bus>;
+                                               clock-output-names = "pclk_bus";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               clk_sel_con9: sel-con@0124 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0124 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       aclk_peri_div: aclk_peri_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&aclk_peri>;
+                                               clock-output-names = "aclk_peri";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       /* 6:5 reserved */
+
+                                       aclk_peri: aclk_peri_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <7 1>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>;
+                                               clock-output-names = "aclk_peri";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       hclk_peri: hclk_peri_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 2>;
+                                               clocks = <&aclk_peri>;
+                                               clock-output-names = "hclk_peri";
+                                               rockchip,div-type = <CLK_DIVIDER_USER_DEFINE>;
+                                               rockchip,div-relations =
+                                                               <0x0 1
+                                                                0x1 2
+                                                                0x2 4>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       /* 11:10 reserved */
+
+                                       pclk_peri: pclk_peri_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <12 2>;
+                                               clocks = <&aclk_peri>;
+                                               clock-output-names = "pclk_peri";
+                                               rockchip,div-type = <CLK_DIVIDER_USER_DEFINE>;
+                                               rockchip,div-relations =
+                                                               <0x0 1
+                                                                0x1 2
+                                                                0x2 4
+                                                                0x3 8>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               clk_sel_con10: sel-con@0128 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0128 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       pclk_pmu_pre: pclk_pmu_pre_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_gpll>;
+                                               clock-output-names = "pclk_pmu_pre";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       /* 7:5 reserved */
+
+                                       pclk_alive_pre: pclk_alive_pre_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&clk_gpll>;
+                                               clock-output-names = "pclk_alive_pre";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       /* 13 reserved */
+
+                                       clk_crypto: clk_crypto_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <14 2>;
+                                               clocks = <&aclk_bus>;
+                                               clock-output-names = "clk_crypto";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               /* sel[11]: reserved */
+
+                               clk_sel_con12: sel-con@0130 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0130 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       fclk_mcu_div: fclk_mcu_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&fclk_mcu>;
+                                               clock-output-names = "fclk_mcu";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       /* 6:5 reserved */
+
+                                       fclk_mcu: fclk_mcu_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <7 1>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>;
+                                               clock-output-names = "fclk_mcu";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       stclk_mcu: stclk_mcu_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 3>;
+                                               clocks = <&fclk_mcu>;
+                                               clock-output-names = "stclk_mcu";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con13: sel-con@0134 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0134 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_ddr_div: clk_ddr_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 2>;
+                                               clocks = <&clk_ddr>;
+                                               clock-output-names = "clk_ddr";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,flags = <(CLK_GET_RATE_NOCACHE |
+                                                                       CLK_SET_RATE_NO_REPARENT)>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_DDR_DIV4>;
+                                       };
+
+                                       /* 3:2 reserved */
+
+                                       clk_ddr: clk_ddr_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <4 1>;
+                                               clocks = <&clk_dpll>, <&clk_gpll>;
+                                               clock-output-names = "clk_ddr";
+                                               #clock-cells = <0>;
+                                       };
+
+                                       /* 7:5 reserved */
+
+                                       usbphy_480m: usbphy_480m_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 1>;
+                                               clocks = <&xin24m>, <&usbotg_480m_out>;
+                                               clock-output-names = "usbphy_480m";
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_RK3288_USB480M>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       clk4x_ddr: clk4x_ddr_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <4 1>;
+                                               clocks = <&clk_dpll>, <&clk_gpll>;
+                                               clock-output-names = "clk4x_ddr";
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con14: sel-con@0138 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0138 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_gpu_core_div: clk_gpu_core_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_gpu_core>;
+                                               clock-output-names = "clk_gpu_core";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                               rockchip,flags = <CLK_SET_RATE_PARENT_IN_ORDER>;
+                                       };
+
+                                       /* 5 reserved */
+
+                                       clk_gpu_core: clk_gpu_core_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <6 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&usbphy_480m>, <&clk_npll>;
+                                               clock-output-names = "clk_gpu_core";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       aclk_gpu_mem: aclk_gpu_mem_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&aclk_gpu>;
+                                               clock-output-names = "aclk_gpu_mem";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       /* 13 reserved */
+
+                                       aclk_gpu: aclk_gpu_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <14 1>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>;
+                                               clock-output-names = "aclk_gpu";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               clk_sel_con15: sel-con@013c {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x013c 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       aclk_vepu_div: aclk_vepu_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&aclk_vepu>;
+                                               clock-output-names = "aclk_vepu";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                       };
+
+                                       /* 5 reserved */
+
+                                       aclk_vepu: aclk_vepu_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <6 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&clk_npll>, <&usbphy_480m>;
+                                               clock-output-names = "aclk_vepu";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       aclk_vdpu_div: aclk_vdpu_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&aclk_vdpu>;
+                                               clock-output-names = "aclk_vdpu";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                       };
+
+                                       /* 13 reserved */
+
+                                       aclk_vdpu: aclk_vdpu_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <14 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&clk_npll>, <&usbphy_480m>;
+                                               clock-output-names = "aclk_vdpu";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               clk_sel_con16: sel-con@0140 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0140 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       aclk_gpu_cfg: aclk_gpu_cfg_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&aclk_gpu>;
+                                               clock-output-names = "aclk_gpu_cfg";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               clk_sel_con17: sel-con@0144 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0144 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_hevc_cabac_div: clk_hevc_cabac_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_hevc_cabac>;
+                                               clock-output-names = "clk_hevc_cabac";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                       };
+
+                                       /* 5 reserved */
+
+                                       clk_hevc_cabac: clk_hevc_cabac_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <6 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&clk_npll>, <&usbphy_480m>;
+                                               clock-output-names = "clk_hevc_cabac";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       clk_hevc_core_div: clk_hevc_core_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&clk_hevc_core>;
+                                               clock-output-names = "clk_hevc_core";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                       };
+
+                                       /* 13 reserved */
+
+                                       clk_hevc_core: clk_hevc_core_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <14 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&clk_npll>, <&usbphy_480m>;
+                                               clock-output-names = "clk_hevc_core";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               clk_sel_con18: sel-con@0148 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0148 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_rga_div: clk_rga_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_rga>;
+                                               clock-output-names = "clk_rga";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       /* 5 reserved */
+
+                                       clk_rga: clk_rga_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <6 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&usbphy_480m>, <&usbphy_480m>;
+                                               clock-output-names = "clk_rga";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       aclk_rga_div: aclk_rga_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&aclk_rga_pre>;
+                                               clock-output-names = "aclk_rga_pre";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       /* 13 reserved */
+
+                                       aclk_rga_pre: aclk_rga_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <14 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&usbphy_480m>, <&usbphy_480m>;
+                                               clock-output-names = "aclk_rga_pre";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               clk_sel_con19: sel-con@014c {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x014c 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       aclk_vio0_div: aclk_vio0_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&aclk_vio0>;
+                                               clock-output-names = "aclk_vio0";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       /* 5 reserved */
+
+                                       aclk_vio0: aclk_vio0_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <6 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&usbphy_480m>, <&usbphy_480m>;
+                                               clock-output-names = "aclk_vio0";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               clk_sel_con20: sel-con@0150 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0150 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       dclk_vop0_div: dclk_vop0_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 8>;
+                                               clocks = <&dclk_vop0>;
+                                               clock-output-names = "dclk_vop0";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                               rockchip,flags = <CLK_SET_RATE_NO_REPARENT>;
+                                       };
+
+                                       dclk_vop0: dclk_vop0_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&clk_npll>, <&dummy>;
+                                               clock-output-names = "dclk_vop0";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       /* 15:10 reserved */
+                               };
+
+                               clk_sel_con21: sel-con@0154 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0154 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       hclk_vio: hclk_vio_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&aclk_vio0>;
+                                               clock-output-names = "hclk_vio";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       /* 5 reserved */
+
+                                       pclk_isp: pclk_isp_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <6 1>;
+                                               clocks = <&clk_gates17 2>, <&pclkin_isp_inv>;
+                                               clock-output-names = "pclk_isp";
+                                               #clock-cells = <0>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_vip_div: clk_vip_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 5>;
+                                               clocks = <&clk_vip>;
+                                               clock-output-names = "clk_vip";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       pclk_vip: pclk_vip_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <13 1>;
+                                               clocks = <&clk_gates16 13>, <&pclkin_vip_inv>;
+                                               clock-output-names = "pclk_vip";
+                                               #clock-cells = <0>;
+                                       };
+
+                                       clk_vip: clk_vip_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <14 2>;
+                                               clocks = <&clk_cpll>, <&xin24m>, <&clk_gpll>, <&xin24m>;
+                                               clock-output-names = "clk_vip";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               clk_sel_con22: sel-con@0158 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0158 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_isp_div: clk_isp_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 6>;
+                                               clocks = <&clk_isp>;
+                                               clock-output-names = "clk_isp";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                       };
+
+                                       clk_isp: clk_isp_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <6 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&clk_npll>, <&clk_npll>;
+                                               clock-output-names = "clk_isp";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+                               };
+
+                               clk_sel_con23: sel-con@015c {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x015c 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_edp_div: clk_edp_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 6>;
+                                               clocks = <&clk_edp>;
+                                               clock-output-names = "clk_edp";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                       };
+
+                                       clk_edp: clk_edp_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <6 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&clk_npll>, <&clk_npll>;
+                                               clock-output-names = "clk_edp";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       clk_edp_24m: clk_edp_24m_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 1>;
+                                               clocks = <&xin24m>, <&dummy>;
+                                               clock-output-names = "clk_edp_24m";
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               /* sel[24]: reserved */
+
+                               clk_sel_con25: sel-con@0164 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0164 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_tsadc: clk_tsadc_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 6>;
+                                               clocks = <&clk_32k_mux>;
+                                               clock-output-names = "clk_tsadc";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                       };
+
+                                       clk_saradc: clk_saradc_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 8>;
+                                               clocks = <&xin24m>;
+                                               clock-output-names = "clk_saradc";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con26: sel-con@0168 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0168 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       /* 7:0 reserved */
+
+                                       hsic_usb_480m: hsic_usb_480m_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 1>;
+                                               clocks = <&usbotg_480m_out>, <&dummy>;
+                                               clock-output-names = "hsic_usb_480m";
+                                               #clock-cells = <0>;
+                                       };
+
+                                       /* 11:9 reserved */
+
+                                       hsicphy_480m: hsicphy_480m_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <12 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&hsic_usb_480m>, <&hsic_usb_480m>;
+                                               clock-output-names = "hsicphy_480m";
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con27: sel-con@016c {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x016c 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       i2s_pll_div: i2s_pll_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&i2s_pll>;
+                                               clock-output-names = "i2s_pll";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_MUX_DIV>;
+                                               rockchip,flags = <CLK_SET_RATE_NO_REPARENT>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_i2s: clk_i2s_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 2>;
+                                               clocks = <&i2s_pll>, <&i2s_frac>, <&i2s_clkin>, <&xin12m>;
+                                               clock-output-names = "clk_i2s";
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_RK3288_I2S>;
+                                               rockchip,flags = <CLK_SET_RATE_PARENT>;
+                                       };
+
+                                       /* 11:10 reserved */
+
+                                       i2s_pll: i2s_pll_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <12 1>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>;
+                                               clock-output-names = "i2s_pll";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       /* 14:13 reserved */
+
+                                       i2s_out: i2s_out_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <15 1>;
+                                               clocks = <&clk_i2s>, <&xin12m>;
+                                               clock-output-names = "i2s_out";
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con28: sel-con@0170 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0170 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       i2s_frac: i2s_frac {
+                                               compatible = "rockchip,rk3188-frac-con";
+                                               clocks = <&i2s_pll>;
+                                               clock-output-names = "i2s_frac";
+                                               /* numerator    denominator */
+                                               rockchip,bits = <0 32>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_FRAC>;
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               /* sel[30:29] reserved */
+
+                               clk_sel_con31: sel-con@017c {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x017c 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+
+                                       spdif_8ch_pll_div: spdif_8ch_pll_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&spdif_8ch_pll>;
+                                               clock-output-names = "spdif_8ch_pll";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_MUX_DIV>;
+                                               rockchip,flags = <CLK_SET_RATE_NO_REPARENT>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_spidf_8ch: clk_spidf_8ch_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 2>;
+                                               clocks = <&spdif_8ch_pll>, <&spdif_8ch_frac>, <&i2s_clkin>, <&xin12m>;
+                                               clock-output-names = "clk_spidf_8ch";
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_RK3288_I2S>;
+                                               rockchip,flags = <CLK_SET_RATE_PARENT>;
+                                       };
+
+                                       /* 11:10 reserved */
+
+                                       spdif_8ch_pll: spdif_8ch_pll_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <12 1>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>;
+                                               clock-output-names = "spdif_8ch_pll";
+                                               #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                       };
+
+                                       /* 15:13 reserved */
+                               };
+
+                               clk_sel_con32: sel-con@0180 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0180 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       spdif_8ch_frac: spdif_8ch_frac {
+                                               compatible = "rockchip,rk3188-frac-con";
+                                               clocks = <&spdif_8ch_pll>;
+                                               clock-output-names = "spdif_8ch_frac";
+                                               /* numerator    denominator */
+                                               rockchip,bits = <0 32>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_FRAC>;
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con33: sel-con@0184 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0184 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_uart0_pll_div: clk_uart0_pll_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&clk_uart0_pll>;
+                                               clock-output-names = "clk_uart0_pll";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       /* 7: reserved */
+
+                                       clk_uart0: clk_uart0_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 2>;
+                                               clocks = <&clk_uart0_pll>, <&uart0_frac>, <&xin24m>, <&xin24m>;
+                                               clock-output-names = "clk_uart0";
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_RK3288_I2S>;
+                                               rockchip,flags = <CLK_SET_RATE_PARENT>;
+                                       };
+
+                                       /* 11:10 reserved */
+
+                                       clk_uart0_pll: clk_uart0_pll_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <12 2>;
+                                               clocks = <&clk_cpll>, <&clk_gpll>, <&usbphy_480m>, <&usbphy_480m>;
+                                               clock-output-names = "clk_uart0_pll";
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con34: sel-con@0188 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0188 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       uart0_frac: uart0_frac {
+                                               compatible = "rockchip,rk3188-frac-con";
+                                               clocks = <&clk_uart0_pll>;
+                                               clock-output-names = "uart0_frac";
+                                               /* numerator    denominator */
+                                               rockchip,bits = <0 32>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_FRAC>;
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con35: sel-con@018c {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x018c 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       uart1_div: uart1_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&clk_uart_pll>;
+                                               clock-output-names = "uart1_div";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_uart1: clk_uart1_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 2>;
+                                               clocks = <&uart1_div>, <&uart1_frac>, <&xin24m>, <&xin24m>;
+                                               clock-output-names = "clk_uart1";
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_RK3288_I2S>;
+                                               rockchip,flags = <CLK_SET_RATE_PARENT>;
+                                       };
+
+                                       /* 11:10 reserved */
+
+                                       clk_uart_pll: clk_uart_pll_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <12 1>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>;
+                                                clock-output-names = "clk_uart_pll";
+                                                #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                        };
+
+                                       /* 14:13 reserved */
+                               };
+
+                               clk_sel_con36: sel-con@0190 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0190 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       uart1_frac: uart1_frac {
+                                               compatible = "rockchip,rk3188-frac-con";
+                                               clocks = <&uart1_div>;
+                                               clock-output-names = "uart1_frac";
+                                               /* numerator    denominator */
+                                               rockchip,bits = <0 32>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_FRAC>;
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con37: sel-con@0194 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x0194 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       uart2_div: uart2_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&clk_uart_pll>;
+                                               clock-output-names = "uart2_div";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_uart2: clk_uart2_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 1>;
+                                               clocks = <&uart2_div>, <&xin24m>;
+                                               clock-output-names = "clk_uart2";
+                                               #clock-cells = <0>;
+                                               rockchip,flags = <CLK_SET_RATE_PARENT>;
+                                       };
+                               };
+
+                               /* sel[38] reserved */
+
+                               clk_sel_con39: sel-con@019c {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x019c 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       uart3_div: uart3_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&clk_uart_pll>;
+                                               clock-output-names = "uart3_div";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_uart3: clk_uart3_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 2>;
+                                               clocks = <&uart3_div>, <&uart3_frac>, <&xin24m>, <&xin24m>;
+                                               clock-output-names = "clk_uart3";
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_RK3288_I2S>;
+                                               rockchip,flags = <CLK_SET_RATE_PARENT>;
+                                       };
+                               };
+
+                               clk_sel_con40: sel-con@01a0 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01a0 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       uart3_frac: uart3_frac {
+                                               compatible = "rockchip,rk3188-frac-con";
+                                               clocks = <&uart3_div>;
+                                               clock-output-names = "uart3_frac";
+                                               /* numerator    denominator */
+                                               rockchip,bits = <0 32>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_FRAC>;
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con41: sel-con@01a4 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01a4 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       uart4_div: uart4_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&clk_uart_pll>;
+                                               clock-output-names = "uart4_div";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_uart4: clk_uart4_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 2>;
+                                               clocks = <&uart4_div>, <&uart4_frac>, <&xin24m>, <&xin24m>;
+                                               clock-output-names = "clk_uart4";
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_RK3288_I2S>;
+                                               rockchip,flags = <CLK_SET_RATE_PARENT>;
+                                       };
+                               };
+
+                               clk_sel_con42: sel-con@01a8 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01a8 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       uart4_frac: uart4_frac {
+                                               compatible = "rockchip,rk3188-frac-con";
+                                               clocks = <&uart4_div>;
+                                               clock-output-names = "uart4_frac";
+                                               /* numerator    denominator */
+                                               rockchip,bits = <0 32>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_FRAC>;
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con43: sel-con@01ac {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01ac 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_mac_pll_div: clk_mac_pll_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_mac_pll>;
+                                               clock-output-names = "clk_mac_pll";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                       };
+
+                                       /* 5 reserved */
+
+                                       clk_mac_pll: clk_mac_pll_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <6 2>;
+                                                clocks = <&clk_npll>, <&clk_cpll>, <&clk_gpll>, <&clk_gpll>;
+                                                clock-output-names = "clk_mac_pll";
+                                                #clock-cells = <0>;
+                                        };
+
+                                       clk_mac: clk_mac_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <8 1>;
+                                                clocks = <&clk_mac_pll>, <&gmac_clkin>;
+                                                clock-output-names = "clk_mac";
+                                                #clock-cells = <0>;
+                                               rockchip,flags = <CLK_SET_RATE_PARENT>;
+                                               #clock-init-cells = <1>;
+                                        };
+
+                                       /* 11:9 reserved */
+
+                                       /* 12: test_clk: wifi_pll_sel */
+
+                                       /* 15:13 reserved */
+                               };
+
+                               clk_sel_con44: sel-con@01b0 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01b0 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       /* test_clk: wifi_frac */
+                               };
+
+                               clk_sel_con45: sel-con@01b4 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01b4 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_spi0_div: clk_spi0_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&clk_spi0>;
+                                               clock-output-names = "clk_spi0";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       clk_spi0: clk_spi0_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <7 1>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>;
+                                                clock-output-names = "clk_spi0";
+                                                #clock-cells = <0>;
+                                        };
+
+                                       clk_spi1_div: clk_spi1_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 7>;
+                                               clocks = <&clk_spi1>;
+                                               clock-output-names = "clk_spi1";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       clk_spi1: clk_spi1_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <15 1>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>;
+                                                clock-output-names = "clk_spi1";
+                                                #clock-cells = <0>;
+                                        };
+                               };
+
+                               clk_sel_con46: sel-con@01b8 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01b8 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_tsp_div: clk_tsp_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_tsp>;
+                                               clock-output-names = "clk_tsp";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                       };
+
+                                       /* 5 reserved */
+
+                                       clk_tsp: clk_tsp_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <6 2>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>, <&clk_npll>, <&clk_npll>;
+                                                clock-output-names = "clk_tsp";
+                                                #clock-cells = <0>;
+                                        };
+
+                                       clk_spi2_div: clk_spi2_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <8 7>;
+                                               clocks = <&clk_spi2>;
+                                               clock-output-names = "clk_spi2";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       clk_spi2: clk_spi2_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <15 1>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>;
+                                                clock-output-names = "clk_spi2";
+                                                #clock-cells = <0>;
+                                        };
+                               };
+
+                               clk_sel_con47: sel-con@01bc {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01bc 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_nandc0_div: clk_nandc0_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_nandc0>;
+                                               clock-output-names = "clk_nandc0";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_EVENDIV>;
+                                       };
+
+                                       /* 6:5 reserved */
+
+                                       clk_nandc0: clk_nandc0_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <7 1>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>;
+                                                clock-output-names = "clk_nandc0";
+                                                #clock-cells = <0>;
+                                        };
+
+                                       /* 12:8 test_div */
+
+                                       /* 15:13 reserved */
+                               };
+
+                               clk_sel_con48: sel-con@01c0 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01c0 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_sdio0_div: clk_sdio0_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&clk_sdio0>;
+                                               clock-output-names = "clk_sdio0";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_EVENDIV>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_sdio0: clk_sdio0_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <8 2>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>, <&usbphy_480m>, <&xin24m>;
+                                                clock-output-names = "clk_sdio0";
+                                                #clock-cells = <0>;
+                                        };
+
+                                       /* 15:10 reserved */
+                               };
+
+                               /* sel[49] reserved */
+
+                               clk_sel_con50: sel-con@01c8 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01c8 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_sdmmc0_div: clk_sdmmc0_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&clk_sdmmc0>;
+                                               clock-output-names = "clk_sdmmc0";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_EVENDIV>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_sdmmc0: clk_sdmmc0_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <8 2>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>, <&usbphy_480m>, <&xin24m>;
+                                                clock-output-names = "clk_sdmmc0";
+                                                #clock-cells = <0>;
+                                        };
+
+                                       /* 15:10 reserved */
+                               };
+
+                               clk_sel_con51: sel-con@01cc {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01cc 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_emmc_div: clk_emmc_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&clk_emmc>;
+                                               clock-output-names = "clk_emmc";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_EVENDIV>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_emmc: clk_emmc_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <8 2>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>, <&usbphy_480m>, <&xin24m>;
+                                                clock-output-names = "clk_emmc";
+                                                #clock-cells = <0>;
+                                        };
+
+                                       /* 15:10 reserved */
+                               };
+
+                               clk_sel_con52: sel-con@01d0 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01d0 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_sfc_div: clk_sfc_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 5>;
+                                               clocks = <&clk_sfc>;
+                                               clock-output-names = "clk_sfc";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                       };
+
+                                       /* 6:5 reserved */
+
+                                       clk_sfc: clk_sfc_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <7 1>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>;
+                                                clock-output-names = "clk_sfc";
+                                                #clock-cells = <0>;
+                                        };
+
+                                       /* 15:8 reserved */
+                               };
+
+                               clk_sel_con53: sel-con@01d4 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01d4 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       i2s_2ch_pll_div: i2s_2ch_pll_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 7>;
+                                               clocks = <&i2s_2ch_pll>;
+                                               clock-output-names = "i2s_2ch_pll";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_MUX_DIV>;
+                                               rockchip,flags = <CLK_SET_RATE_NO_REPARENT>;
+                                       };
+
+                                       /* 7 reserved */
+
+                                       clk_i2s_2ch: clk_i2s_2ch_mux {
+                                               compatible = "rockchip,rk3188-mux-con";
+                                               rockchip,bits = <8 2>;
+                                               clocks = <&i2s_2ch_pll>, <&i2s_2ch_frac>, <&dummy>, <&xin12m>;
+                                               clock-output-names = "clk_i2s_2ch";
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_RK3288_I2S>;
+                                               rockchip,flags = <CLK_SET_RATE_PARENT>;
+                                       };
+
+                                       /* 11:10 reserved */
+
+                                       i2s_2ch_pll: i2s_2ch_pll_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <12 1>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>;
+                                                clock-output-names = "i2s_2ch_pll";
+                                                #clock-cells = <0>;
+                                               #clock-init-cells = <1>;
+                                        };
+
+                               };
+
+                               clk_sel_con54: sel-con@01d8 {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01d8 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       i2s_2ch_frac: i2s_2ch_frac {
+                                               compatible = "rockchip,rk3188-frac-con";
+                                               clocks = <&i2s_2ch_pll>;
+                                               clock-output-names = "i2s_2ch_frac";
+                                               /* numerator    denominator */
+                                               rockchip,bits = <0 32>;
+                                               rockchip,clkops-idx =
+                                                       <CLKOPS_RATE_FRAC>;
+                                               #clock-cells = <0>;
+                                       };
+                               };
+
+                               clk_sel_con55: sel-con@01dc {
+                                       compatible = "rockchip,rk3188-selcon";
+                                       reg = <0x01dc 0x4>;
+                                       #address-cells = <1>;
+                                       #size-cells = <1>;
+
+                                       clk_hdcp_div: clk_hdcp_div {
+                                               compatible = "rockchip,rk3188-div-con";
+                                               rockchip,bits = <0 6>;
+                                               clocks = <&clk_hdcp>;
+                                               clock-output-names = "clk_hdcp";
+                                               rockchip,div-type = <CLK_DIVIDER_PLUS_ONE>;
+                                               #clock-cells = <0>;
+                                               rockchip,clkops-idx = <CLKOPS_RATE_RK3368_MUX_DIV_NPLL>;
+                                       };
+
+                                       clk_hdcp: clk_hdcp_mux {
+                                                compatible = "rockchip,rk3188-mux-con";
+                                                rockchip,bits = <6 2>;
+                                                clocks = <&clk_cpll>, <&clk_gpll>, <&clk_npll>, <&clk_npll>;
+                                                clock-output-names = "clk_hdcp";
+                                                #clock-cells = <0>;
+                                        };
+                               };
+                       };
+
+                       /* Gate control regs */
+                       clk_gate_cons {
+                                compatible = "rockchip,rk-gate-cons";
+                                #address-cells = <1>;
+                                #size-cells = <1>;
+                                ranges;
+
+                                clk_gates0: gate-clk@0200 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0200 0x4>;
+                                        clocks =
+                                                <&dummy>,      <&dummy>,
+                                                <&dummy>,      <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&clk_gpll>,    <&clk_apllb>,
+                                               <&clk_aplll>,   <&dummy>,
+
+                                               <&aclk_cci>,    <&clkin_trace>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "reserved",    "reserved",/* core_b_apll core_b_gpll */
+                                                "reserved",    "reserved",
+
+                                               "reserved",     "reserved",/* core_l_apll core_l_gpll */
+                                               "reserved",     "reserved",
+
+                                               "g_clk_cs_gpll",        "g_clk_cs_apllb",
+                                               "g_clk_cs_aplll",       "reserved",
+
+                                               "aclk_cci",     "clkin_trace",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates1: gate-clk@0204 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0204 0x4>;
+                                        clocks =
+                                                <&aclk_bus>,   <&hclk_bus>,
+                                                <&pclk_bus>,   <&fclk_mcu>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&clk_gpll>,    <&clk_cpll>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "aclk_bus",    "hclk_bus",
+                                                "pclk_bus",    "fclk_mcu",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",/* ddr_dpll  ddr_gpll */
+                                               "aclk_bus_gpll",        "aclk_bus_cpll",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates2: gate-clk@0208 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0208 0x4>;
+                                        clocks =
+                                                <&clk_uart0_pll>,      <&uart0_frac>,
+                                                <&uart1_div>,  <&uart1_frac>,
+
+                                               <&uart2_div>,   <&dummy>,
+                                               <&uart3_div>,   <&uart3_frac>,
+
+                                               <&uart4_div>,   <&uart4_frac>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "clk_uart0_pll",       "uart0_frac",
+                                                "uart1_div",   "uart1_frac",
+
+                                               "uart2_div",    "reserved",
+                                               "uart3_div",    "uart3_frac",
+
+                                               "uart4_div",    "uart4_frac",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates3: gate-clk@020c {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x020c 0x4>;
+                                        clocks =
+                                                <&aclk_peri>,  <&dummy>,
+                                                <&hclk_peri>,  <&pclk_peri>,
+
+                                               <&clk_mac_pll>, <&clk_tsadc>,
+                                               <&clk_saradc>,  <&clk_spi0>,
+
+                                               <&clk_spi1>,    <&clk_spi2>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "aclk_peri",   "reserved", /* bit1: aclk_peri */
+                                                "hclk_peri",   "pclk_peri",
+
+                                               "clk_mac_pll",  "clk_tsadc",
+                                               "clk_saradc",   "clk_spi0",
+
+                                               "clk_spi1",     "clk_spi2",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates4: gate-clk@0210 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0210 0x4>;
+                                        clocks =
+                                                <&aclk_vio0>,  <&dclk_vop0>,
+                                                <&xin24m>,     <&aclk_rga_pre>,
+
+                                               <&clk_rga>,     <&clk_vip>,
+                                               <&aclk_vepu>,   <&aclk_vdpu>,
+
+                                               <&dummy>,       <&clk_isp>,
+                                               <&dummy>,       <&clk_gpu_core>,
+
+                                               <&xin32k>,      <&xin24m>,
+                                               <&xin24m>,      <&dummy>;
+
+                                        clock-output-names =
+                                                "aclk_vio0",   "dclk_vop0",
+                                                "clk_vop0_pwm",        "aclk_rga_pre",
+
+                                               "clk_rga",      "clk_vip",
+                                               "aclk_vepu",    "aclk_vdpu",
+
+                                               "reserved",     "clk_isp", /* bit8: hclk_vpu */
+                                               "reserved",     "clk_gpu_core",
+
+                                               "clk_hdmi_cec", "clk_hdmi_hdcp",
+                                               "clk_dsiphy_24m",       "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates5: gate-clk@0214 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0214 0x4>;
+                                        clocks =
+                                                <&dummy>,      <&clk_hevc_cabac>,
+                                                <&clk_hevc_core>,      <&clk_edp>,
+
+                                               <&clk_edp_24m>, <&clk_hdcp>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&aclk_gpu_mem>,        <&aclk_gpu_cfg>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&i2s_2ch_pll>,
+                                               <&i2s_2ch_frac>,        <&clk_i2s_2ch>;
+
+                                        clock-output-names =
+                                                "reserved",    "clk_hevc_cabac",
+                                                "clk_hevc_core",       "clk_edp",
+
+                                               "clk_edp_24m",  "clk_hdcp",
+                                               "reserved",     "reserved",
+
+                                               "aclk_gpu_mem", "aclk_gpu_cfg",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "i2s_2ch_pll",
+                                               "i2s_2ch_frac", "clk_i2s_2ch";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates6: gate-clk@0218 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0218 0x4>;
+                                        clocks =
+                                                <&i2s_out>,    <&i2s_pll>,
+                                                <&i2s_frac>,   <&clk_i2s>,
+
+                                               <&spdif_8ch_pll>,       <&spdif_8ch_frac>,
+                                               <&clk_spidf_8ch>,       <&clk_sfc>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&clk_tsp>,     <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "i2s_out",     "i2s_pll",
+                                                "i2s_frac",    "clk_i2s",
+
+                                               "spdif_8ch_pll",        "spdif_8ch_frac",
+                                               "clk_spidf_8ch",        "clk_sfc",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "clk_tsp",      "reserved",
+                                               "reserved",     "reserved";/* clk_ddrphy_gate   clk4x_ddrphy_gate */
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates7: gate-clk@021c {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x021c 0x4>;
+                                        clocks =
+                                                <&jtag_clkin>, <&dummy>,
+                                                <&clk_crypto>, <&xin24m>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&clk_mac>,     <&clk_mac>,
+
+                                               <&clk_nandc0>,  <&pclk_pmu_pre>,
+                                               <&xin24m>,      <&xin24m>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "clk_jtag",    "reserved",/* bit1: test_clk */
+                                                "clk_crypto",  "clk_pvtm_pmu",
+
+                                               "reserved",     "reserved",/* clk_mac_rx  clk_mac_tx */
+                                               "clk_mac_ref",  "clk_mac_refout",
+
+                                               "clk_nandc0",   "pclk_pmu_pre",
+                                               "clk_pvtm_core",        "clk_pvtm_gpu",
+
+                                               "clk_sdmmc0",   "clk_sdio0",
+                                               "reserved",     "clk_emmc";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates8: gate-clk@0220 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0220 0x4>;
+                                        clocks =
+                                                <&hsic_usb_480m>,      <&xin24m>,
+                                                <&dummy>,      <&dummy>,
+
+                                               <&clk_32k_mux>, <&dummy>,
+                                               <&xin12m>,      <&hsicphy_480m>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "hsic_usb_480m",       "clk_otgphy0",
+                                                "reserved",    "reserved",
+
+                                               "g_clk_otg_adp",        "reserved",/* bit4: clk_otg_adp */
+                                               "hsicphy_12m",  "hsicphy_480m",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates9: gate-clk@0224 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0224 0x4>;
+                                        clocks =
+                                                <&dummy>,      <&dummy>,
+                                                <&dummy>,      <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "reserved",    "reserved",
+                                                "reserved",    "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates10: gate-clk@0228 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0228 0x4>;
+                                        clocks =
+                                                <&dummy>,      <&dummy>,
+                                                <&dummy>,      <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "reserved",    "reserved",
+                                                "reserved",    "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates11: gate-clk@022c {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x022c 0x4>;
+                                        clocks =
+                                                <&dummy>,      <&dummy>,
+                                                <&dummy>,      <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "reserved",    "reserved",
+                                                "reserved",    "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates12: gate-clk@0230 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0230 0x4>;
+                                        clocks =
+                                                <&pclk_bus>,   <&pclk_bus>,
+                                                <&pclk_bus>,   <&pclk_bus>,
+
+                                               <&aclk_bus>,    <&aclk_bus>,
+                                               <&aclk_bus>,    <&hclk_bus>,
+
+                                               <&hclk_bus>,    <&hclk_bus>,
+                                               <&hclk_bus>,    <&aclk_bus>,
+
+                                               <&aclk_bus>,    <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "g_pclk_pwm0", "g_p_mailbox",
+                                                "g_p_i2cpmu",  "g_p_i2caudio",
+
+                                               "g_aclk_intmem",        "g_clk_intmem0",
+                                               "g_clk_intmem1",        "g_h_i2s_8ch",
+
+                                               "g_h_i2s_2ch",  "g_hclk_rom",
+                                               "g_hclk_spdif", "g_aclk_dmac",
+
+                                               "g_a_strc_sys", "reserved",/* bit13: pclk_ddrupctl */
+                                               "reserved",     "reserved";/* bit14: pclk_ddrphy */
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates13: gate-clk@0234 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0234 0x4>;
+                                        clocks =
+                                                <&pclk_bus>,   <&pclk_bus>,
+                                                <&dummy>,      <&hclk_bus>,
+
+                                               <&hclk_bus>,    <&pclk_bus>,
+                                               <&pclk_bus>,    <&clkin_hsadc_tsp>,
+
+                                               <&pclk_bus>,    <&aclk_bus>,
+                                               <&hclk_bus>,    <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "g_p_efuse_1024",      "g_p_efuse_256",
+                                                "reserved",    "g_mclk_crypto",/* bit2: nclk_ddrupctl */
+
+                                               "g_sclk_crypto",        "g_p_uartdbg",
+                                               "g_pclk_pwm1",  "clk_hsadc_tsp",
+
+                                               "g_pclk_sim",   "g_aclk_gic400",
+                                               "g_hclk_tsp",   "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates14: gate-clk@0238 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0238 0x4>;
+                                        clocks =
+                                                <&dummy>,      <&dummy>,
+                                                <&dummy>,      <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "reserved",    "reserved",
+                                                "reserved",    "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates15: gate-clk@023c {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x023c 0x4>;
+                                        clocks =
+                                                <&dummy>,      <&dummy>,
+                                                <&dummy>,      <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "reserved",    "reserved",/* aclk_video hclk_video */
+                                                "reserved",    "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates16: gate-clk@0240 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0240 0x4>;
+                                        clocks =
+                                                <&clk_gates16 10>,     <&clk_gates16 8>,
+                                                <&clk_gates16 9>,      <&clk_gates16 8>,
+
+                                               <&clk_gates16 9>,       <&clk_gates16 9>,
+                                               <&clk_gates16 8>,       <&clk_gates16 8>,
+
+                                               <&hclk_vio>,    <&aclk_vio0>,
+                                               <&aclk_rga_pre>,        <&clk_gates16 9>,
+
+                                               <&clk_gates16 8>,       <&pclkin_vip>,
+                                               <&clk_isp>,     <&dummy>;
+
+                                        clock-output-names =
+                                                "g_aclk_rga",  "g_hclk_rga",
+                                                "g_aclk_iep",  "g_hclk_iep",
+
+                                               "g_aclk_vop_iep",       "g_aclk_vop",
+                                               "g_hclk_vop",   "h_vio_ahb_arbi",
+
+                                               "g_hclk_vio_noc",       "g_aclk_vio0_noc",
+                                               "g_aclk_vio1_noc",      "g_aclk_vip",
+
+                                               "g_hclk_vip",   "g_pclkin_vip",
+                                               "g_hclk_isp",   "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates17: gate-clk@0244 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0244 0x4>;
+                                        clocks =
+                                                <&clk_isp>,    <&dummy>,
+                                                <&pclkin_isp>, <&pclk_vio>,
+
+                                               <&pclk_vio>,    <&dummy>,
+                                               <&pclk_vio>,    <&clk_gates16 8>,
+
+                                               <&pclk_vio>,    <&pclk_vio>,
+                                               <&clk_gates16 10>,      <&pclk_vio>,
+
+                                               <&clk_gates16 8>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "g_aclk_isp",  "reserved",
+                                                "g_pclkin_isp",        "g_p_mipi_dsi0",
+
+                                               "g_p_mipi_csi", "reserved",
+                                               "g_p_hdmi_ctrl",        "g_hclk_vio_h2p",
+
+                                               "g_pclk_vio_h2p",       "g_p_edp_ctrl",
+                                               "g_aclk_hdcp",  "g_pclk_hdcp",
+
+                                               "g_h_hdcpmmu",  "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates18: gate-clk@0248 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0248 0x4>;
+                                        clocks =
+                                                <&dummy>,      <&dummy>,
+                                                <&dummy>,      <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "reserved",    "reserved",/* bit0-1: aclk_gpu_cfg aclk_gpu_mem */
+                                                "reserved",    "reserved",/* bit2: clk_gpu_core */
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates19: gate-clk@024c {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x024c 0x4>;
+                                        clocks =
+                                                <&hclk_peri>,  <&pclk_peri>,
+                                                <&aclk_peri>,  <&aclk_peri>,
+
+                                               <&pclk_peri>,   <&pclk_peri>,
+                                               <&pclk_peri>,   <&pclk_peri>,
+
+                                               <&pclk_peri>,   <&pclk_peri>,
+                                               <&pclk_peri>,   <&pclk_peri>,
+
+                                               <&pclk_peri>,   <&pclk_peri>,
+                                               <&pclk_peri>,   <&pclk_peri>;
+
+                                        clock-output-names =
+                                                "g_hp_axi_matrix",     "g_pp_axi_matrix",
+                                                "g_ap_axi_matrix",     "g_a_dmac_peri",
+
+                                               "g_pclk_spi0",  "g_pclk_spi1",
+                                               "g_pclk_spi2",  "g_pclk_uart0",
+
+                                               "g_pclk_uart1", "g_pclk_uart3",
+                                               "g_pclk_uart4", "g_pclk_i2c2",
+
+                                               "g_pclk_i2c3",  "g_pclk_i2c4",
+                                               "g_pclk_i2c5",  "g_pclk_saradc";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates20: gate-clk@0250 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0250 0x4>;
+                                        clocks =
+                                                <&pclk_peri>,  <&hclk_peri>,
+                                                <&hclk_peri>,  <&hclk_peri>,
+
+                                               <&dummy>,       <&hclk_peri>,
+                                               <&hclk_peri>,   <&hclk_peri>,
+
+                                               <&aclk_peri>,   <&hclk_peri>,
+                                               <&hclk_peri>,   <&hclk_peri>,
+
+                                               <&dummy>,       <&aclk_peri>,
+                                               <&pclk_peri>,   <&aclk_peri>;
+
+                                        clock-output-names =
+                                                "g_pclk_tsadc",        "g_hclk_otg0",
+                                                "g_h_pmu_otg0",        "g_hclk_host0",
+
+                                               "reserved",     "g_hclk_hsic",
+                                               "g_h_usb_peri", "g_h_p_ahb_arbi",
+
+                                               "g_a_peri_niu", "g_h_emem_peri",
+                                               "g_h_mmc_peri", "g_hclk_nand0",
+
+                                               "reserved",     "g_aclk_gmac",
+                                               "g_pclk_gmac",  "g_hclk_sfc";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates21: gate-clk@0254 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0254 0x4>;
+                                        clocks =
+                                                <&hclk_peri>,  <&hclk_peri>,
+                                                <&hclk_peri>,  <&hclk_peri>,
+
+                                               <&aclk_peri>,   <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "g_hclk_sdmmc",        "g_hclk_sdio0",
+                                                "g_hclk_emmc", "g_hclk_hsadc",
+
+                                               "g_aclk_peri_mmu",      "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates22: gate-clk@0258 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0258 0x4>;
+                                        clocks =
+                                                <&dummy>,      <&pclk_alive_pre>,
+                                                <&pclk_alive_pre>,     <&pclk_alive_pre>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&pclk_alive_pre>,      <&pclk_alive_pre>,
+                                               <&pclk_vio>,    <&pclk_vio>,
+
+                                               <&pclk_alive_pre>,      <&pclk_alive_pre>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "reserved",    "g_pclk_gpio1",
+                                                "g_pclk_gpio2",        "g_pclk_gpio3",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "g_pclk_grf",   "g_p_alive_niu",
+                                               "g_pclk_dphytx0",       "g_pclk_dphyrx",
+
+                                               "g_pclk_timer0",        "g_pclk_timer1",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates23: gate-clk@025c {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x025c 0x4>;
+                                        clocks =
+                                                <&pclk_pmu_pre>,       <&pclk_pmu_pre>,
+                                                <&pclk_pmu_pre>,       <&pclk_pmu_pre>,
+
+                                               <&pclk_pmu_pre>,        <&pclk_pmu_pre>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "g_pclk_pmu",  "g_pclk_intmem1",
+                                                "g_pclk_pmu_noc",      "g_pclk_sgrf",
+
+                                               "g_pclk_gpio0", "g_pclk_pmugrf",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+
+                               clk_gates24: gate-clk@0260 {
+                                        compatible = "rockchip,rk3188-gate-clk";
+                                        reg = <0x0260 0x4>;
+                                        clocks =
+                                                <&xin24m>,     <&xin24m>,
+                                                <&xin24m>,     <&xin24m>,
+
+                                               <&xin24m>,      <&xin24m>,
+                                               <&xin24m>,      <&xin24m>,
+
+                                               <&xin24m>,      <&xin24m>,
+                                               <&xin24m>,      <&xin24m>,
+
+                                               <&dummy>,       <&dummy>,
+                                               <&dummy>,       <&dummy>;
+
+                                        clock-output-names =
+                                                "g_clk_timer0",        "g_clk_timer1",
+                                                "g_clk_timer2",        "g_clk_timer3",
+
+                                               "g_clk_timer4", "g_clk_timer5",
+                                               "g_clk_timer10",        "g_clk_timer11",
+
+                                               "g_clk_timer12",        "g_clk_timer13",
+                                               "g_clk_timer14",        "g_clk_timer15",
+
+                                               "reserved",     "reserved",
+                                               "reserved",     "reserved";
+
+                                       #clock-cells = <1>;
+                                };
+                       };
+               };
+
+               special_regs {
+                       compatible = "rockchip,rk-clock-special-regs";
+                       #address-cells = <2>;
+                       #size-cells = <2>;
+                       ranges;
+
+                       clk_32k_mux: clk_32k_mux {
+                               compatible = "rockchip,rk3188-mux-con";
+                               reg = <0x0 0xff738100 0x0 0x4>;
+                               rockchip,bits = <6 1>;
+                               clocks = <&xin32k>, <&clk_gates7 3>;
+                               clock-output-names = "clk_32k_mux";
+                               #clock-cells = <0>;
+                               #clock-init-cells = <1>;
+                       };
+               };
+       };
+};
diff --git a/arch/arm64/boot/dts/rk3368-fpga.dts b/arch/arm64/boot/dts/rk3368-fpga.dts
new file mode 100644 (file)
index 0000000..0c336cd
--- /dev/null
@@ -0,0 +1,168 @@
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/rkfb/rk_fb.h>
+
+/ {
+       compatible = "rockchip,rk3368";
+
+       interrupt-parent = <&gic>;
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       xin24m: xin24m {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <24000000>;
+               clock-output-names = "xin24m";
+       };
+
+       aliases {
+               serial2 = &uart_dbg;
+       };
+
+       cpus {
+               #address-cells = <2>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a53","arm,armv8";
+                       reg = <0x0 0x0>;
+               };
+       };
+
+       chosen {
+               bootargs = "console=ttyS2 earlyprintk=uart8250-32bit,0xff690000";
+       };
+
+       timer {
+               compatible = "arm,armv8-timer";
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+               clock-frequency = <24000000>;
+       };
+
+       memory@00000000 {
+               device_type = "memory";
+               reg = <0x00000000 0x00000000 0x0 0x20000000>;
+       };
+
+       uart_dbg: serial@ff690000 {
+               compatible = "rockchip,serial";
+               reg = <0x0 0xff690000 0x0 0x100>;
+               interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+               clock-frequency = <24000000>;
+               clocks = <&xin24m>, <&xin24m>;
+               clock-names = "sclk_uart", "pclk_uart";
+               reg-shift = <2>;
+               reg-io-width = <4>;
+       };
+
+       gic: interrupt-controller@ffb70000 {
+               compatible = "arm,cortex-a15-gic";
+               #interrupt-cells = <3>;
+               #address-cells = <0>;
+               interrupt-controller;
+               reg = <0x0 0xffb71000 0 0x1000>,
+                     <0x0 0xffb72000 0 0x1000>;
+       };
+
+       ion {
+               compatible = "rockchip,ion";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ion_cma: rockchip,ion-heap@1 { /* CMA HEAP */
+                       compatible = "rockchip,ion-heap";
+                       rockchip,ion_heap = <1>;
+                       reg = <0x00000000 0x08000000>; /* 512MB */
+               };
+               rockchip,ion-heap@3 { /* VMALLOC HEAP */
+                       compatible = "rockchip,ion-heap";
+                       rockchip,ion_heap = <3>;
+               };
+       };
+
+       fb: fb {
+               compatible = "rockchip,rk-fb";
+               rockchip,disp-mode = <NO_DUAL>;
+       };
+
+
+       rk_screen: rk_screen {
+                       compatible = "rockchip,screen";
+                       disp_timings: display-timings {
+                       native-mode = <&timing0>;
+                       timing0: timing0 {
+                                               screen-type = <SCREEN_RGB>;
+                                               out-face    = <OUT_P888>;
+                                               color-mode = <COLOR_RGB>;
+                                               clock-frequency = <27000000>;
+                                               hactive = <800>;
+                                               vactive = <480>;
+                                               hback-porch = <206>;
+                                               hfront-porch = <1>;
+                                               vback-porch = <25>;
+                                               vfront-porch = <10>;
+                                               hsync-len = <10>;
+                                               vsync-len = <10>;
+                                               hsync-active = <0>;
+                                               vsync-active = <0>;
+                                               de-active = <0>;
+                                               pixelclk-active = <0>;
+                                               swap-rb = <0>;
+                                               swap-rg = <0>;
+                                               swap-gb = <0>;
+                       };
+               };
+       };
+
+       lvds: lvds@ff968000 {
+               compatible = "rockchip,rk3368-lvds";
+               reg = <0x0 0xff968000 0x0 0x4000>, <0x0 0xff9600b0 0x0 0x01>;
+               //reg = <0xff968000 0x4000>, <0xff9600b0 0x01>;
+               reg-names = "mipi_lvds_phy", "mipi_lvds_ctl";
+               //clocks = <&dummy>, <&dummy>;
+               //clock-names = "pclk_lvds", "pclk_lvds_ctl";
+               status = "okay";
+       };
+
+       lcdc: lcdc@ff930000 {
+               compatible = "rockchip,rk3368-lcdc";
+               rockchip,prop = <PRMRY>;
+               rockchip,pwr18 = <0>;
+               rockchip,iommu-enabled = <0>;
+               //reg = <0xff930000 0x10000>;
+               reg = <0x0 0xff930000 0x0 0x10000>;
+               interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+               //pinctrl-names = "default", "gpio";
+               //pinctrl-0 = <&lcdc_lcdc>;
+               //pinctrl-1 = <&lcdc_gpio>;
+               status = "okay";
+               //clocks = <&dummy>, <&dummy>, <&dummy>, <&dummy>, <&dummy>;
+               //clock-names = "aclk_lcdc", "dclk_lcdc", "hclk_lcdc", "pd_lcdc", "sclk_lcdc";
+               power_ctr: power_ctr {
+               rockchip,debug = <0>;
+               /*
+               lcd_en:lcd_en {
+                       rockchip,power_type = <GPIO>;
+                       gpios = <&gpio7 GPIO_A3 GPIO_ACTIVE_HIGH>;
+                       rockchip,delay = <10>;
+               };
+               */
+               /*lcd_cs:lcd_cs {
+                       rockchip,power_type = <REGULATOR>;
+                       rockchip,delay = <10>;
+               };
+
+               lcd_rst:lcd_rst {
+                       rockchip,power_type = <GPIO>;
+                       gpios = <&gpio3 GPIO_D6 GPIO_ACTIVE_HIGH>;
+                       rockchip,delay = <5>;
+               };*/
+               };
+       };
+};
diff --git a/arch/arm64/boot/dts/rk3368.dtsi b/arch/arm64/boot/dts/rk3368.dtsi
new file mode 100644 (file)
index 0000000..8f6a888
--- /dev/null
@@ -0,0 +1,1036 @@
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/pinctrl/rockchip-rk3288.h>
+
+#include "rk3368-clocks.dtsi"
+#include <dt-bindings/rkfb/rk_fb.h>
+
+/ {
+       compatible = "rockchip,rk3368";
+
+       interrupt-parent = <&gic>;
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       aliases {
+               serial0 = &uart_bt;
+               serial1 = &uart_bb;
+               serial2 = &uart_dbg;
+               serial3 = &uart_gps;
+               serial4 = &uart_exp;
+               i2c0 = &i2c0;
+               i2c1 = &i2c1;
+               i2c2 = &i2c2;
+               i2c3 = &i2c3;
+               i2c4 = &i2c4;
+               i2c5 = &i2c5;
+               spi0 = &spi0;
+               spi1 = &spi1;
+               spi2 = &spi2;
+       };
+
+       cpus {
+               #address-cells = <2>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a53","arm,armv8";
+                       reg = <0x0 0x0>;
+               };
+       };
+
+       chosen {
+               bootargs = "console=ttyS2 earlyprintk=uart8250-32bit,0xff690000 clk_ignore_unused";
+       };
+
+       timer {
+               compatible = "arm,armv8-timer";
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+               clock-frequency = <24000000>;
+       };
+
+       memory@00000000 {
+               device_type = "memory";
+               reg = <0x0 0x00000000 0x0 0x20000000>;
+       };
+
+       gic: interrupt-controller@ffb70000 {
+               compatible = "arm,cortex-a15-gic";
+               #interrupt-cells = <3>;
+               #address-cells = <0>;
+               interrupt-controller;
+               reg = <0x0 0xffb71000 0 0x1000>,
+                     <0x0 0xffb72000 0 0x1000>;
+       };
+
+       pmu_grf: syscon@ff738000 {
+               compatible = "rockchip,rk3388-pmu-grf", "syscon";
+               reg = <0x0 0xff738000 0x0 0x100>;
+       };
+
+       sgrf: syscon@ff740000 {
+               compatible = "rockchip,rk3388-sgrf", "syscon";
+               reg = <0x0 0xff740000 0x0 0x1000>;
+
+       };
+
+       grf: syscon@ff770000 {
+               compatible = "rockchip,rk3388-grf", "syscon";
+               reg = <0x0 0xff770000 0x0 0x1000>;
+       };
+
+       rockchip_clocks_init: clocks-init{
+               compatible = "rockchip,clocks-init";
+               rockchip,clocks-init-parent =
+                       <&i2s_pll &clk_gpll>, <&spdif_8ch_pll &clk_gpll>,
+                       <&i2s_2ch_pll &clk_gpll>, <&usbphy_480m &usbotg_480m_out>,
+                       <&clk_uart_pll &clk_gpll>, <&aclk_gpu &clk_cpll>,
+                       <&clk_cs &clk_gpll>;
+               rockchip,clocks-init-rate =
+                       <&clk_core_b 792000000>,        <&clk_core_l 600000000>,
+                       <&clk_gpll 576000000>,          <&clk_cpll 400000000>,
+                       /*<&clk_npll 500000000>,*/      <&aclk_bus 300000000>,
+                       <&hclk_bus 150000000>,          <&pclk_bus 75000000>,
+                       <&clk_crypto 150000000>,        <&aclk_peri 300000000>,
+                       <&hclk_peri 150000000>,         <&pclk_peri 75000000>,
+                       <&pclk_alive_pre 100000000>,    <&pclk_pmu_pre 100000000>,
+                       <&clk_cs 300000000>,            <&clkin_trace 300000000>,
+                       <&aclk_cci 600000000>,          <&clk_mac 50000000>,
+                       <&aclk_vio0 400000000>,         <&hclk_vio 100000000>,
+                       <&aclk_rga_pre 400000000>,      <&clk_rga 400000000>,
+                       <&clk_isp 400000000>,           <&clk_edp 200000000>,
+                       <&clk_gpu_core 400000000>,      <&aclk_gpu_mem 400000000>,
+                       <&aclk_gpu_cfg 400000000>,      <&aclk_vepu 400000000>,
+                       <&aclk_vdpu 400000000>,         <&clk_hevc_core 300000000>,
+                       <&clk_hevc_cabac 300000000>;
+               /*rockchip,clocks-uboot-has-init =
+                       <&aclk_vio0>;*/
+       };
+
+       rockchip_clocks_enable: clocks-enable {
+               compatible = "rockchip,clocks-enable";
+               clocks =
+                       /*PLL*/
+                       <&clk_apllb>,
+                       <&clk_aplll>,
+                       <&clk_dpll>,
+                       <&clk_gpll>,
+                       <&clk_cpll>,
+
+                       /*PD_CORE*/
+                       <&clk_cs>,
+                       <&clkin_trace>,
+
+                       /*PD_BUS*/
+                       <&aclk_bus>,
+                       <&hclk_bus>,
+                       <&pclk_bus>,
+                       <&clk_gates12 12>,/*aclk_strc_sys*/
+                       <&clk_gates12 6>,/*aclk_intmem1*/
+                       <&clk_gates12 5>,/*aclk_intmem0*/
+                       <&clk_gates12 4>,/*aclk_intmem*/
+                       <&clk_gates13 9>,/*aclk_gic400*/
+
+                       /*PD_ALIVE*/
+                       <&clk_gates22 13>,/*pclk_timer1*/
+                       <&clk_gates22 12>,/*pclk_timer0*/
+                       <&clk_gates22 9>,/*pclk_alive_niu*/
+                       <&clk_gates22 8>,/*pclk_grf*/
+
+                       /*PD_PMU*/
+                       <&clk_gates23 5>,/*pclk_pmugrf*/
+                       <&clk_gates23 3>,/*pclk_sgrf*/
+                       <&clk_gates23 2>,/*pclk_pmu_noc*/
+                       <&clk_gates23 1>,/*pclk_intmem1*/
+                       <&clk_gates23 0>,/*pclk_pmu*/
+
+                       /*PD_PERI*/
+                       <&clk_gates19 2>,/*aclk_peri_axi_matrix*/
+                       <&clk_gates20 8>,/*aclk_peri_niu*/
+                       <&clk_gates21 4>,/*aclk_peri_mmu*/
+                       <&clk_gates19 0>,/*hclk_peri_axi_matrix*/
+                       <&clk_gates20 7>,/*hclk_peri_ahb_arbi*/
+                       <&clk_gates19 1>;/*pclk_peri_axi_matrix*/
+       };
+
+
+       i2c0: i2c@ff650000 {
+               compatible = "rockchip,rk30-i2c";
+               reg = <0x0 0xff650000 0x0 0x1000>;
+               interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-names = "default", "gpio";
+               pinctrl-0 = <&i2c0_xfer>;
+               pinctrl-1 = <&i2c0_gpio>;
+               gpios = <&gpio0 GPIO_A6 GPIO_ACTIVE_LOW>, <&gpio0 GPIO_A7 GPIO_ACTIVE_LOW>;
+               //clocks = <&clk_gates10 2>;
+               rockchip,check-idle = <1>;
+               status = "disabled";
+       };
+
+       i2c1: i2c@ff140000 {
+               compatible = "rockchip,rk30-i2c";
+               reg = <0x0 0xff140000 0x0 0x1000>;
+               interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-names = "default", "gpio";
+               pinctrl-0 = <&i2c1_xfer>;
+               pinctrl-1 = <&i2c1_gpio>;
+               gpios = <&gpio2 GPIO_C5 GPIO_ACTIVE_LOW>, <&gpio2 GPIO_C6 GPIO_ACTIVE_LOW>;
+               //clocks = <&clk_gates10 3>;
+               rockchip,check-idle = <1>;
+               status = "disabled";
+       };
+
+       i2c2: i2c@ff660000 {
+               compatible = "rockchip,rk30-i2c";
+               reg = <0x0 0xff660000 0x0 0x1000>;
+               interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-names = "default", "gpio";
+               pinctrl-0 = <&i2c2_xfer>;
+               pinctrl-1 = <&i2c2_gpio>;
+               gpios = <&gpio3 GPIO_D7 GPIO_ACTIVE_LOW>, <&gpio0 GPIO_B1 GPIO_ACTIVE_LOW>;
+               //clocks = <&clk_gates6 13>;
+               rockchip,check-idle = <1>;
+               status = "disabled";
+       };
+
+       i2c3: i2c@ff150000 {
+               compatible = "rockchip,rk30-i2c";
+               reg = <0x0 0xff150000 0x0 0x1000>;
+               interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-names = "default", "gpio";
+               pinctrl-0 = <&i2c3_xfer>;
+               pinctrl-1 = <&i2c3_gpio>;
+               gpios = <&gpio1 GPIO_C1 GPIO_ACTIVE_LOW>, <&gpio1 GPIO_C0 GPIO_ACTIVE_LOW>;
+               //clocks = <&clk_gates6 14>;
+               rockchip,check-idle = <1>;
+               status = "disabled";
+       };
+
+       i2c4: i2c@ff160000 {
+               compatible = "rockchip,rk30-i2c";
+               reg = <0x0 0xff160000 0x0 0x1000>;
+               interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-names = "default", "gpio";
+               pinctrl-0 = <&i2c4_xfer>;
+               pinctrl-1 = <&i2c4_gpio>;
+               gpios = <&gpio3 GPIO_D0 GPIO_ACTIVE_LOW>, <&gpio3 GPIO_D1 GPIO_ACTIVE_LOW>;
+               //clocks = <&clk_gates6 15>;
+               rockchip,check-idle = <1>;
+               status = "disabled";
+       };
+
+       i2c5: i2c@ff170000 {
+               compatible = "rockchip,rk30-i2c";
+               reg = <0x0 0xff170000 0x0 0x1000>;
+               interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-names = "default", "gpio";
+               pinctrl-0 = <&i2c5_xfer>;
+               pinctrl-1 = <&i2c5_gpio>;
+               gpios = <&gpio3 GPIO_D2 GPIO_ACTIVE_LOW>, <&gpio3 GPIO_D3 GPIO_ACTIVE_LOW>;
+               //clocks = <&clk_gates7 0>;
+               rockchip,check-idle = <1>;
+               status = "disabled";
+       };
+       fb: fb{
+               compatible = "rockchip,rk-fb";
+               rockchip,disp-mode = <NO_DUAL>;
+       };
+
+       rk_screen: rk_screen{
+               compatible = "rockchip,screen";
+       };
+
+       uart_bt: serial@ff180000 {
+               compatible = "rockchip,serial";
+               reg = <0x0 0xff180000 0x0 0x100>;
+               interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+               clock-frequency = <24000000>;
+               clocks = <&xin24m>, <&xin24m>;
+               clock-names = "sclk_uart", "pclk_uart";
+               reg-shift = <2>;
+               reg-io-width = <4>;
+               //dmas = <&pdma1 1>, <&pdma1 2>;
+               //#dma-cells = <2>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
+               status = "disabled";
+       };
+
+       uart_bb: serial@ff190000 {
+               compatible = "rockchip,serial";
+               reg = <0x0 0xff190000 0x0 0x100>;
+               interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+               clock-frequency = <24000000>;
+               clocks = <&xin24m>, <&xin24m>;
+               clock-names = "sclk_uart", "pclk_uart";
+               reg-shift = <2>;
+               reg-io-width = <4>;
+               //dmas = <&pdma1 3>, <&pdma1 4>;
+               //#dma-cells = <2>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&uart1_xfer &uart1_cts &uart1_rts>;
+               status = "disabled";
+       };
+
+       uart_dbg: serial@ff690000 {
+               compatible = "rockchip,serial";
+               reg = <0x0 0xff690000 0x0 0x100>;
+               interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+               clock-frequency = <24000000>;
+               clocks = <&xin24m>, <&xin24m>;
+               clock-names = "sclk_uart", "pclk_uart";
+               reg-shift = <2>;
+               reg-io-width = <4>;
+               //dmas = <&pdma0 4>, <&pdma0 5>;
+               //#dma-cells = <2>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&uart2_xfer>;
+               //status = "disabled";
+       };
+
+       uart_gps: serial@ff1b0000 {
+               compatible = "rockchip,serial";
+               reg = <0x0 0xff1b0000 0x0 0x100>;
+               interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+               clock-frequency = <24000000>;
+               clocks = <&xin24m>, <&xin24m>;
+               clock-names = "sclk_uart", "pclk_uart";
+               current-speed = <115200>;
+               reg-shift = <2>;
+               reg-io-width = <4>;
+               //dmas = <&pdma1 7>, <&pdma1 8>;
+               //#dma-cells = <2>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&uart3_xfer &uart3_cts &uart3_rts>;
+               status = "disabled";
+       };
+
+       uart_exp: serial@ff1c0000 {
+               compatible = "rockchip,serial";
+               reg = <0x0 0xff1c0000 0x0 0x100>;
+               interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+               clock-frequency = <24000000>;
+               clocks = <&xin24m>, <&xin24m>;
+               clock-names = "sclk_uart", "pclk_uart";
+               reg-shift = <2>;
+               reg-io-width = <4>;
+               //dmas = <&pdma1 9>, <&pdma1 10>;
+               //#dma-cells = <2>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&uart4_xfer &uart4_cts &uart4_rts>;
+               status = "disabled";
+       };
+
+       spi0: spi@ff110000 {
+               compatible = "rockchip,rockchip-spi";
+               reg = <0x0 0xff110000 0x0 0x1000>;
+               interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0 &spi0_cs1>;
+               rockchip,spi-src-clk = <0>;
+               num-cs = <2>;
+               //clocks =<&clk_spi0>, <&clk_gates6 4>;
+               //clock-names = "spi","pclk_spi0";
+               //dmas = <&pdma1 11>, <&pdma1 12>;
+               //#dma-cells = <2>;
+               //dma-names = "tx", "rx";
+               status = "disabled";
+       };
+
+       spi1: spi@ff120000 {
+               compatible = "rockchip,rockchip-spi";
+               reg = <0x0 0xff120000 0x0 0x1000>;
+               interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&spi1_clk &spi1_tx &spi1_rx &spi1_cs0>;
+               rockchip,spi-src-clk = <1>;
+               num-cs = <1>;
+               //clocks = <&clk_spi1>, <&clk_gates6 5>;
+               //clock-names = "spi","pclk_spi1";
+               //dmas = <&pdma1 13>, <&pdma1 14>;
+               //#dma-cells = <2>;
+               //dma-names = "tx", "rx";
+               status = "disabled";
+       };
+
+       spi2: spi@ff130000 {
+               compatible = "rockchip,rockchip-spi";
+               reg = <0x0 0xff130000 0x0 0x1000>;
+               interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&spi2_clk &spi2_tx &spi2_rx &spi2_cs0>;
+               rockchip,spi-src-clk = <2>;
+               num-cs = <2>;
+               //clocks = <&clk_spi2>, <&clk_gates6 6>;
+               //clock-names = "spi","pclk_spi2";
+               //dmas = <&pdma1 15>, <&pdma1 16>;
+               //#dma-cells = <2>;
+               //dma-names = "tx", "rx";
+               status = "disabled";
+       };
+
+
+       pinctrl: pinctrl {
+               compatible = "rockchip,rk3368-pinctrl";
+               rockchip,grf = <&grf>;
+               rockchip,pmu = <&pmu_grf>;
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               gpio0: gpio0@ff750000 {
+                       compatible = "rockchip,gpio-bank";
+                       reg =   <0x0 0xff750000 0x0 0x100>;
+                       interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+                       //clocks = <&clk_gates17 4>;
+
+                       gpio-controller;
+                       #gpio-cells = <2>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+
+               gpio1: gpio1@ff780000 {
+                       compatible = "rockchip,gpio-bank";
+                       reg = <0x0 0xff780000 0x0 0x100>;
+                       interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+                       //clocks = <&clk_gates14 1>;
+
+                       gpio-controller;
+                       #gpio-cells = <2>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+
+               gpio2: gpio2@ff790000 {
+                       compatible = "rockchip,gpio-bank";
+                       reg = <0x0 0xff790000 0x0 0x100>;
+                       interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+                       //clocks = <&clk_gates14 2>;
+
+                       gpio-controller;
+                       #gpio-cells = <2>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+
+               gpio3: gpio3@ff7a0000 {
+                       compatible = "rockchip,gpio-bank";
+                       reg = <0x0 0xff7a0000 0x0 0x100>;
+                       interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+                       //clocks = <&clk_gates14 3>;
+
+                       gpio-controller;
+                       #gpio-cells = <2>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+
+               pcfg_pull_up: pcfg-pull-up {
+                       bias-pull-up;
+               };
+
+               pcfg_pull_down: pcfg-pull-down {
+                       bias-pull-down;
+               };
+
+               pcfg_pull_none: pcfg-pull-none {
+                       bias-disable;
+               };
+
+               pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+                       drive-strength = <8>;
+               };
+
+               pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+                       bias-pull-up;
+                       drive-strength = <8>;
+               };
+
+               pcfg_pull_none_drv_4ma: pcfg-pull-none-drv-4ma {
+                       drive-strength = <4>;
+               };
+
+               pcfg_pull_up_drv_4ma: pcfg-pull-up-drv-4ma {
+                       bias-pull-up;
+                       drive-strength = <4>;
+               };
+
+               pcfg_output_high: pcfg-output-high {
+                       output-high;
+               };
+
+               pcfg_output_low: pcfg-output-low {
+                       output-low;
+               };
+
+               i2c0 {
+                       i2c0_xfer: i2c0-xfer {
+                               rockchip,pins = <0 GPIO_A6 RK_FUNC_1 &pcfg_pull_none>,
+                                               <0 GPIO_A7 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+                       i2c0_gpio: i2c0-gpio {
+                               rockchip,pins = <0 GPIO_A6 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <0 GPIO_A7 RK_FUNC_GPIO &pcfg_pull_none>;
+                       };
+               };
+
+               i2c1 {
+                       i2c1_xfer: i2c1-xfer {
+                               rockchip,pins = <2 GPIO_C5 RK_FUNC_1 &pcfg_pull_none>,
+                                               <2 GPIO_C6 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+                       i2c1_gpio: i2c1-gpio {
+                               rockchip,pins = <2 GPIO_C5 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <2 GPIO_C6 RK_FUNC_GPIO &pcfg_pull_none>;
+                       };
+               };
+
+               i2c2 {
+                       i2c2_xfer: i2c2-xfer {
+                               rockchip,pins = <3 GPIO_D7 RK_FUNC_2 &pcfg_pull_none>,
+                                               <0 GPIO_B1 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+                       i2c2_gpio: i2c2-gpio {
+                               rockchip,pins = <3 GPIO_D7 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <0 GPIO_B1 RK_FUNC_GPIO &pcfg_pull_none>;
+            };
+               };
+
+               i2c3 {
+                       i2c3_xfer: i2c3-xfer {
+                               rockchip,pins = <1 GPIO_C0 RK_FUNC_1 &pcfg_pull_none>,
+                                               <1 GPIO_C1 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+                       i2c3_gpio: i2c3-gpio {
+                               rockchip,pins = <1 GPIO_C0 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <1 GPIO_C1 RK_FUNC_GPIO &pcfg_pull_none>;
+                       };
+               };
+
+               i2c4 {
+                       i2c4_xfer: i2c4-xfer {
+                               rockchip,pins = <3 GPIO_D0 RK_FUNC_2 &pcfg_pull_none>,
+                                               <3 GPIO_D1 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+                       i2c4_gpio: i2c4-gpio {
+                               rockchip,pins = <3 GPIO_D0 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <3 GPIO_D1 RK_FUNC_GPIO &pcfg_pull_none>;
+                       };
+               };
+
+               i2c5 {
+                       i2c5_xfer: i2c5-xfer {
+                               rockchip,pins = <3 GPIO_D2 RK_FUNC_2 &pcfg_pull_none>,
+                                               <3 GPIO_D3 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+                       i2c5_gpio: i2c5-gpio {
+                               rockchip,pins = <3 GPIO_D2 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <3 GPIO_D3 RK_FUNC_GPIO &pcfg_pull_none>;
+                       };
+               };
+
+               uart0 {
+                       uart0_xfer: uart0-xfer {
+                               rockchip,pins = <2 GPIO_D0 RK_FUNC_1 &pcfg_pull_up>,
+                                               <2 GPIO_D1 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       uart0_cts: uart0-cts {
+                               rockchip,pins = <2 GPIO_D2 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       uart0_rts: uart0-rts {
+                               rockchip,pins = <2 GPIO_D3 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       uart0_rts_gpio: uart0-rts-gpio {
+                               rockchip,pins = <2 GPIO_D3 RK_FUNC_GPIO &pcfg_pull_none>;
+                       };
+               };
+
+               uart1 {
+                       uart1_xfer: uart1-xfer {
+                               rockchip,pins = <0 GPIO_C4 RK_FUNC_3 &pcfg_pull_up>,
+                                               <0 GPIO_C5 RK_FUNC_3 &pcfg_pull_none>;
+                       };
+
+                       uart1_cts: uart1-cts {
+                               rockchip,pins = <0 GPIO_C6 RK_FUNC_3 &pcfg_pull_none>;
+                       };
+
+                       uart1_rts: uart1-rts {
+                               rockchip,pins = <0 GPIO_C7 RK_FUNC_3 &pcfg_pull_none>;
+                       };
+               };
+
+               uart2 {
+                       uart2_xfer: uart2-xfer {
+                               rockchip,pins = <2 GPIO_A6 RK_FUNC_2 &pcfg_pull_up>,
+                                               <2 GPIO_A5 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+               };
+
+               uart3 {
+                       uart3_xfer: uart3-xfer {
+                               rockchip,pins = <3 GPIO_D5 RK_FUNC_2 &pcfg_pull_up>,
+                                               <3 GPIO_D6 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+
+                       uart3_cts: uart3-cts {
+                               rockchip,pins = <3 GPIO_C0 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+
+                       uart3_rts: uart3-rts {
+                               rockchip,pins = <3 GPIO_C1 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+               };
+
+               uart4 {
+                       uart4_xfer: uart4-xfer {
+                               rockchip,pins = <0 GPIO_D3 RK_FUNC_3 &pcfg_pull_up>,
+                                               <0 GPIO_D2 RK_FUNC_3 &pcfg_pull_none>;
+                       };
+
+                       uart4_cts: uart4-cts {
+                               rockchip,pins = <0 GPIO_D0 RK_FUNC_3 &pcfg_pull_none>;
+                       };
+
+                       uart4_rts: uart4-rts {
+                               rockchip,pins = <0 GPIO_D1 RK_FUNC_3 &pcfg_pull_none>;
+                       };
+               };
+
+               spi0 {
+                       spi0_clk: spi0-clk {
+                               rockchip,pins = <1 GPIO_D5 RK_FUNC_2 &pcfg_pull_up>;
+                       };
+                       spi0_cs0: spi0-cs0 {
+                               rockchip,pins = <1 GPIO_D0 RK_FUNC_3 &pcfg_pull_up>;
+                       };
+                       spi0_tx: spi0-tx {
+                               rockchip,pins = <1 GPIO_C7 RK_FUNC_3 &pcfg_pull_up>;
+                       };
+                       spi0_rx: spi0-rx {
+                               rockchip,pins = <1 GPIO_C6 RK_FUNC_3 &pcfg_pull_up>;
+                       };
+                       spi0_cs1: spi0-cs1 {
+                               rockchip,pins = <1 GPIO_D1 RK_FUNC_3 &pcfg_pull_up>;
+                       };
+               };
+
+               spi1 {
+                       spi1_clk: spi1-clk {
+                               rockchip,pins = <1 GPIO_B6 RK_FUNC_2 &pcfg_pull_up>;
+                       };
+                       spi1_cs0: spi1-cs0 {
+                               rockchip,pins = <1 GPIO_B7 RK_FUNC_2 &pcfg_pull_up>;
+                       };
+                       spi1_rx: spi1-rx {
+                               rockchip,pins = <1 GPIO_C0 RK_FUNC_2 &pcfg_pull_up>;
+                       };
+                       spi1_tx: spi1-tx {
+                               rockchip,pins = <1 GPIO_C1 RK_FUNC_2 &pcfg_pull_up>;
+                       };
+               };
+
+               spi2 {
+                       spi2_clk: spi2-clk {
+                               rockchip,pins = <0 GPIO_B4 RK_FUNC_2 &pcfg_pull_up>;
+                       };
+                       spi2_cs0: spi2-cs0 {
+                               rockchip,pins = <0 GPIO_B5 RK_FUNC_2 &pcfg_pull_up>;
+                       };
+                       spi2_rx: spi2-rx {
+                               rockchip,pins = <0 GPIO_B2 RK_FUNC_2 &pcfg_pull_up>;
+                       };
+                       spi2_tx: spi2-tx {
+                               rockchip,pins = <0 GPIO_B3 RK_FUNC_2 &pcfg_pull_up>;
+                       };
+               };
+
+               i2s {
+                       i2s_mclk: i2s-mclk {
+                               rockchip,pins = <2 GPIO_C4 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       i2s_sclk:i2s-sclk {
+                               rockchip,pins = <2 GPIO_B4 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       i2s_lrckrx:i2s-lrckrx {
+                               rockchip,pins = <2 GPIO_B5 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       i2s_lrcktx:i2s-lrcktx {
+                               rockchip,pins = <2 GPIO_B6 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       i2s_sdi:i2s-sdi {
+                               rockchip,pins = <2 GPIO_B7 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       i2s_sdo0:i2s-sdo0 {
+                               rockchip,pins = <2 GPIO_C0 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       i2s_sdo1:i2s-sdo1 {
+                               rockchip,pins = <2 GPIO_C1 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       i2s_sdo2:i2s-sdo2 {
+                               rockchip,pins = <2 GPIO_C2 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       i2s_sdo3:i2s-sdo3 {
+                               rockchip,pins = <2 GPIO_C3 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+
+                       i2s_gpio: i2s-gpio {
+                               rockchip,pins = <2 GPIO_C4  RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <2 GPIO_B4 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <2 GPIO_B5 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <2 GPIO_B6 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <2 GPIO_B7 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <2 GPIO_C0 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <2 GPIO_C1 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <2 GPIO_C2 RK_FUNC_GPIO &pcfg_pull_none>,
+                                               <2 GPIO_C3 RK_FUNC_GPIO &pcfg_pull_none>;
+                       };
+               };
+
+               spdif {
+                       spdif_tx: spdif-tx {
+                               rockchip,pins = <2 GPIO_C7 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+               };
+
+               sdmmc {
+                       sdmmc_clk: sdmmc-clk {
+                               rockchip,pins = <2 GPIO_B1 RK_FUNC_1 &pcfg_pull_none_drv_4ma>;
+                       };
+
+                       sdmmc_cmd: sdmmc-cmd {
+                               rockchip,pins = <2 GPIO_B2 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+                       };
+
+                       sdmmc_dectn: sdmmc-dectn {
+                               rockchip,pins = <2 GPIO_B3 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+                       };
+
+                       sdmmc_bus1: sdmmc-bus1 {
+                               rockchip,pins = <2 GPIO_A5 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+                       };
+
+                       sdmmc_bus4: sdmmc-bus4 {
+                               rockchip,pins = <2 GPIO_A5 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
+                                               <2 GPIO_A6 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
+                                               <2 GPIO_A7 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
+                                               <2 GPIO_B0 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+                       };
+
+                       sdmmc_gpio: sdmmc-gpio {
+                               rockchip,pins = <2 GPIO_B1 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//CLK
+                                               <2 GPIO_B2 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//CMD
+                                               <2 GPIO_B3 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//DET
+                                               <2 GPIO_A5 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//DO
+                                               <2 GPIO_A6 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//D1
+                                               <2 GPIO_A7 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//D2
+                                               <2 GPIO_B0 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>;//D3
+                       };
+               };
+
+               sdio0 {
+                       sdio0_bus1: sdio0-bus1 {
+                               rockchip,pins = <2 GPIO_D4 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+                       };
+
+                       sdio0_bus4: sdio0-bus4 {
+                               rockchip,pins = <2 GPIO_D4 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
+                                               <2 GPIO_D5 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
+                                               <2 GPIO_D6 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
+                                               <2 GPIO_D7 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+                       };
+
+                       sdio0_cmd: sdio0-cmd {
+                               rockchip,pins = <3 GPIO_A0 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+                       };
+
+                       sdio0_clk: sdio0-clk {
+                               rockchip,pins = <3 GPIO_A1 RK_FUNC_1 &pcfg_pull_none_drv_4ma>;
+                       };
+
+                       sdio0_dectn: sdio0-dectn {
+                               rockchip,pins = <3 GPIO_A2 RK_FUNC_1 &pcfg_pull_up>;
+                       };
+
+                       sdio0_wrprt: sdio0-wrprt {
+                               rockchip,pins = <3 GPIO_A3 RK_FUNC_1 &pcfg_pull_up>;
+                       };
+
+                       sdio0_pwren: sdio0-pwren {
+                               rockchip,pins = <3 GPIO_A4 RK_FUNC_1 &pcfg_pull_up>;
+                       };
+
+                       sdio0_bkpwr: sdio0-bkpwr {
+                               rockchip,pins = <3 GPIO_A5 RK_FUNC_1 &pcfg_pull_up>;
+                       };
+
+                       sdio0_int: sdio0-int {
+                               rockchip,pins = <3 GPIO_A6 RK_FUNC_1 &pcfg_pull_up>;
+                       };
+
+                       sdio0_gpio: sdio0-gpio {
+                               rockchip,pins = <3 GPIO_A0 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//CMD
+                                               <3 GPIO_A1 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//CLK
+                                               <3 GPIO_A2 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//DET
+                                               <3 GPIO_A3 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//wrprt
+                                               <3 GPIO_A4 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//PWREN
+                                               <3 GPIO_A5 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//BKPWR
+                                               <3 GPIO_A6 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//INTN
+                                               <2 GPIO_D4 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//DO
+                                               <2 GPIO_D5 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//D1
+                                               <2 GPIO_D6 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>,//D2
+                                               <2 GPIO_D7 RK_FUNC_GPIO &pcfg_pull_up_drv_4ma>;//D3
+                       };
+               };
+
+               emmc {
+                       emmc_clk: emmc-clk {
+                               rockchip,pins = <2 GPIO_A4 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+                       };
+
+                       emmc_cmd: emmc-cmd {
+                               rockchip,pins = <1 GPIO_D2 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+                       };
+
+                       emmc_pwren: emmc-pwren {
+                               rockchip,pins = <1 GPIO_D3 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+
+                       emmc_rstnout: emmc_rstnout {
+                               rockchip,pins = <2 GPIO_A3 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+
+                       emmc_bus1: emmc-bus1 {
+                               rockchip,pins = <1 GPIO_C2 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;//DO
+                       };
+
+                       emmc_bus4: emmc-bus4 {
+                               rockchip,pins = <1 GPIO_C2 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,//DO
+                                               <1 GPIO_C3 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,//D1
+                                               <1 GPIO_C4 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,//D2
+                                               <1 GPIO_C5 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;//D3
+                       };
+               };
+
+               pwm0 {
+                       pwm0_pin: pwm0-pin {
+                               rockchip,pins = <3 GPIO_B0 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+
+                       vop_pwm_pin:vop-pwm {
+                               rockchip,pins = <3 GPIO_B0 RK_FUNC_3 &pcfg_pull_none>;
+                       };
+               };
+
+               pwm1 {
+                       pwm1_pin: pwm1-pin {
+                               rockchip,pins = <0 GPIO_B0 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+               };
+
+               pwm3 {
+                       pwm3_pin: pwm3-pin {
+                               rockchip,pins = <3 GPIO_D6 RK_FUNC_3 &pcfg_pull_none>;
+                       };
+               };
+
+               lcdc {
+                       lcdc_lcdc: lcdc-lcdc {
+                               rockchip,pins = <0 GPIO_D7 RK_FUNC_1 &pcfg_pull_none>,//DCLK
+                                               <0 GPIO_D6 RK_FUNC_1 &pcfg_pull_none>,//DEN
+                                               <0 GPIO_D4 RK_FUNC_1 &pcfg_pull_none>,//HSYNC
+                                               <0 GPIO_D5 RK_FUNC_1 &pcfg_pull_none>;//VSYN
+                       };
+
+                       lcdc_gpio: lcdc-gpio {
+                               rockchip,pins = <0 GPIO_D7 RK_FUNC_GPIO &pcfg_pull_none>,//DCLK
+                                               <0 GPIO_D6 RK_FUNC_GPIO &pcfg_pull_none>,//DEN
+                                               <0 GPIO_D4 RK_FUNC_GPIO &pcfg_pull_none>,//HSYNC
+                                               <0 GPIO_D5 RK_FUNC_GPIO &pcfg_pull_none>;//VSYN
+                       };
+               };
+
+               isp {
+                       cif_clkout: cif-clkout {
+                               rockchip,pins = <1 GPIO_B3 RK_FUNC_1 &pcfg_pull_none>;//cif_clkout              
+                       };
+
+                       isp_dvp_d2d9: isp-dvp-d2d9 {
+                               rockchip,pins = <1 GPIO_A0 RK_FUNC_1 &pcfg_pull_none>,//cif_data2
+                                               <1 GPIO_A1 RK_FUNC_1 &pcfg_pull_none>,//cif_data3
+                                               <1 GPIO_A2 RK_FUNC_1 &pcfg_pull_none>,//cif_data4
+                                               <1 GPIO_A3 RK_FUNC_1 &pcfg_pull_none>,//cif_data5
+                                               <1 GPIO_A4 RK_FUNC_1 &pcfg_pull_none>,//cif_data6
+                                               <1 GPIO_A5 RK_FUNC_1 &pcfg_pull_none>,//cif_data7
+                                               <1 GPIO_A6 RK_FUNC_1 &pcfg_pull_none>,//cif_data8
+                                               <1 GPIO_A7 RK_FUNC_1 &pcfg_pull_none>,//cif_data9
+                                               <1 GPIO_B0 RK_FUNC_1 &pcfg_pull_none>,//cif_sync
+                                               <1 GPIO_B1 RK_FUNC_1 &pcfg_pull_none>,//cif_href
+                                               <1 GPIO_B2 RK_FUNC_1 &pcfg_pull_none>,//cif_clkin
+                                               <1 GPIO_B3 RK_FUNC_1 &pcfg_pull_none>;//cif_clkout
+                       };
+                       
+                       isp_dvp_d0d1: isp-dvp-d0d1 {
+                               rockchip,pins = <1 GPIO_B4 RK_FUNC_1 &pcfg_pull_none>,//cif_data0
+                                               <1 GPIO_B5 RK_FUNC_1 &pcfg_pull_none>;//cif_data1
+                       };
+
+                       isp_dvp_d10d11:isp_d10d11       {
+                               rockchip,pins = <1 GPIO_B6 RK_FUNC_1 &pcfg_pull_none>,//cif_data10
+                                               <1 GPIO_B7 RK_FUNC_1 &pcfg_pull_none>;//cif_data11
+                       };
+                       
+                       isp_dvp_d0d7: isp-dvp-d0d7 {
+                               rockchip,pins = <1 GPIO_B4 RK_FUNC_1 &pcfg_pull_none>,//cif_data0
+                                               <1 GPIO_B5 RK_FUNC_1 &pcfg_pull_none>,//cif_data1
+                                               <1 GPIO_A0 RK_FUNC_1 &pcfg_pull_none>,//cif_data2
+                                               <1 GPIO_A1 RK_FUNC_1 &pcfg_pull_none>,//cif_data3
+                                               <1 GPIO_A2 RK_FUNC_1 &pcfg_pull_none>,//cif_data4
+                                               <1 GPIO_A3 RK_FUNC_1 &pcfg_pull_none>,//cif_data5
+                                               <1 GPIO_A4 RK_FUNC_1 &pcfg_pull_none>,//cif_data6
+                                               <1 GPIO_A5 RK_FUNC_1 &pcfg_pull_none>;//cif_data7
+                       };
+
+                       isp_shutter: isp-shutter {
+                               rockchip,pins = <3 GPIO_C3 RK_FUNC_2 &pcfg_pull_none>, //SHUTTEREN
+                                               <3 GPIO_C6 RK_FUNC_2 &pcfg_pull_none>;//SHUTTERTRIG
+                       };
+
+                       isp_flash_trigger: isp-flash-trigger {
+                               rockchip,pins = <3 GPIO_C4 RK_FUNC_2 &pcfg_pull_none>; //ISP_FLASHTRIGOU
+                       };
+
+                       isp_prelight: isp-prelight {
+                               rockchip,pins = <3 GPIO_C5 RK_FUNC_2 &pcfg_pull_none>;//ISP_PRELIGHTTRIG
+                       };
+
+                       isp_flash_trigger_as_gpio: isp_flash_trigger_as_gpio {
+                               rockchip,pins = <3 GPIO_C4 RK_FUNC_GPIO &pcfg_pull_none>;//ISP_FLASHTRIGOU
+                       };
+               };
+
+               gps {
+                       gps_mag: gps-mag {
+                               rockchip,pins = <3 GPIO_B6 RK_FUNC_2 &pcfg_pull_none>;
+                       };
+
+                       gps_sig: gps-sig {
+                               rockchip,pins = <3 GPIO_B7 RK_FUNC_2 &pcfg_pull_none>;
+
+                       };
+
+                       gps_rfclk: gps-rfclk {
+                               rockchip,pins = <3 GPIO_C0 RK_FUNC_3 &pcfg_pull_none>;
+                       };
+               };
+
+               gmac {
+                       mac_clk: mac-clk {
+                               rockchip,pins = <3 GPIO_C6 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+                       
+                       mac_txpins: mac-txpins {
+                               rockchip,pins = <3 GPIO_B0 RK_FUNC_1 &pcfg_pull_none>,//TXD0
+                                               <3 GPIO_B1 RK_FUNC_1 &pcfg_pull_none>,//TXD1
+                                               <3 GPIO_B2 RK_FUNC_1 &pcfg_pull_none>,//TXD2
+                                               <3 GPIO_B6 RK_FUNC_1 &pcfg_pull_none>,//TXD3
+                                               <3 GPIO_B5 RK_FUNC_1 &pcfg_pull_none>,//TXEN
+                                               <3 GPIO_D4 RK_FUNC_1 &pcfg_pull_none>;//TXCLK
+                       };
+                       
+                       mac_rxpins: mac-rxpins {
+                               rockchip,pins = <3 GPIO_B7 RK_FUNC_1 &pcfg_pull_none>,//RXD0
+                                               <3 GPIO_C0 RK_FUNC_1 &pcfg_pull_none>,//RXD1
+                                               <3 GPIO_C1 RK_FUNC_1 &pcfg_pull_none>,//RXD2
+                                               <3 GPIO_C2 RK_FUNC_1 &pcfg_pull_none>,//RXD3
+                                               <3 GPIO_C4 RK_FUNC_1 &pcfg_pull_none>,//RXDV
+                                               <3 GPIO_C5 RK_FUNC_1 &pcfg_pull_none>,//RXER
+                                               <3 GPIO_D1 RK_FUNC_1 &pcfg_pull_none>,//RXCLK
+                                               <3 GPIO_B4 RK_FUNC_1 &pcfg_pull_none>;//COL
+                       };
+                       
+                       mac_crs: mac-crs {
+                               rockchip,pins = <3 GPIO_B3 RK_FUNC_1 &pcfg_pull_none>; //CRS
+                       };
+                       
+                       mac_mdpins: mac-mdpins {
+                               rockchip,pins = <3 GPIO_D0 RK_FUNC_1 &pcfg_pull_none>,//MDIO
+                                               <3 GPIO_C3 RK_FUNC_1 &pcfg_pull_none>;//MDC
+                       };
+               };
+
+               tsadc_pin {
+                       tsadc_int: tsadc-int {
+                               rockchip,pins = <0 GPIO_A3 RK_FUNC_1 &pcfg_pull_none>;
+                       };
+                       tsadc_gpio: tsadc-gpio {
+                               rockchip,pins = <0 GPIO_A3 RK_FUNC_GPIO &pcfg_pull_none>;
+                       };
+               };
+       };
+
+       lvds: lvds@ff968000 {
+               compatible = "rockchip,rk3368-lvds";
+               reg = <0x0 0xff968000 0x0 0x4000>, <0x0 0xff9600b0 0x0 0x01>;
+               reg-names = "mipi_lvds_phy", "mipi_lvds_ctl";
+               clocks = <&dummy>, <&dummy>;
+               clock-names = "pclk_lvds", "pclk_lvds_ctl";
+               status = "disabled";
+       };
+
+       lcdc: lcdc@ff930000 {
+               compatible = "rockchip,rk3368-lcdc";
+               rockchip,prop = <PRMRY>;
+               rockchip,pwr18 = <0>;
+               rockchip,iommu-enabled = <0>;
+               reg = <0x0 0xff930000 0x0 0x10000>;
+               interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+               pinctrl-names = "default", "gpio";
+               pinctrl-0 = <&lcdc_lcdc>;
+               pinctrl-1 = <&lcdc_gpio>;
+               status = "disabled";
+               clocks = <&dummy>, <&dummy>, <&dummy>, <&dummy>, <&dummy>;
+               clock-names = "aclk_lcdc", "dclk_lcdc", "hclk_lcdc", "pd_lcdc", "sclk_pll";
+       };
+};
diff --git a/arch/arm64/configs/rockchip_defconfig b/arch/arm64/configs/rockchip_defconfig
new file mode 100644 (file)
index 0000000..a362500
--- /dev/null
@@ -0,0 +1,587 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_LOG_BUF_SHIFT=19
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_PANIC_TIMEOUT=1
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMDLINE_PARTITION=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_BLOCK_RKNAND=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_PREEMPT=y
+CONFIG_ARMV7_COMPAT=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+# CONFIG_COREDUMP is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_DEBUG=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_SUSPEND_TIME=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_RK=y
+CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_UID_STAT=y
+CONFIG_SRAM=y
+CONFIG_MPU_SENSORS_TIMERIRQ=y
+CONFIG_INV_SENSORS=y
+CONFIG_MPU_SENSORS_MPU6050B1=y
+CONFIG_MPU_SENSORS_MPU6050_ACCEL=y
+CONFIG_MPU_SENSORS_AK8963=y
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_RK_GMAC_ETH=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_SLIP=y
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_USB_CATC=y
+CONFIG_USB_KAWETH=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_CDC_EEM=y
+CONFIG_USB_NET_CDC_MBIM=y
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_DM9620=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_USB_NET_GL620A=y
+CONFIG_USB_NET_PLUSB=y
+CONFIG_USB_NET_MCS7830=y
+CONFIG_USB_NET_RNDIS_HOST=y
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_CX82310_ETH=y
+CONFIG_USB_NET_KALMIA=y
+CONFIG_USB_NET_QMI_WWAN=y
+CONFIG_USB_HSO=y
+CONFIG_USB_NET_INT51X1=y
+CONFIG_USB_IPHETH=y
+CONFIG_USB_SIERRA_NET=y
+CONFIG_RTL8188EU=y
+CONFIG_ESP8089=y
+CONFIG_RKWIFI=y
+CONFIG_AP6335=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_TABLET_USB_WACOM=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ZET62XX=y
+CONFIG_TOUCHSCREEN_GT8XX=y
+CONFIG_TOUCHSCREEN_VTL_CT36X=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_RICOH619_PWRKEY=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_GS_LIS3DH is not set
+# CONFIG_GS_MMA7660 is not set
+CONFIG_GS_MC3230=y
+# CONFIG_GS_LSM303D is not set
+# CONFIG_COMPASS_DEVICE is not set
+# CONFIG_GYROSCOPE_DEVICE is not set
+# CONFIG_HALL_DEVICE is not set
+CONFIG_ROCKCHIP_REMOTECTL=y
+CONFIG_ROCKCHIP_REMOTECTL_PWM=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_ROCKCHIP=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_ROCKCHIP=y
+CONFIG_I2C_ROCKCHIP_COMPAT=y
+CONFIG_SPI=y
+CONFIG_SPI_ROCKCHIP_CORE=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_ROCKCHIP_DMA=y
+CONFIG_SPI_ROCKCHIP_TEST=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_CHARGER_RT5025=y
+CONFIG_BATTERY_RT5025=y
+CONFIG_CHARGER_RT5036=y
+CONFIG_RT_POWER=y
+CONFIG_BATTERY_RICOH619=y
+CONFIG_BATTERY_BQ24296=y
+CONFIG_BATTERY_BQ27320=y
+CONFIG_BATTERY_RK30_ADC_FAC=y
+CONFIG_CW2015_BATTERY=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_SENSORS_ROCKCHIP_TSADC=y
+CONFIG_THERMAL=y
+CONFIG_MFD_RT5025=y
+CONFIG_MISC_RT5025=y
+CONFIG_IRQ_RT5025=y
+CONFIG_DEBUG_RT5025=y
+CONFIG_MFD_RT5036=y
+CONFIG_MISC_RT5036=y
+CONFIG_IRQ_RT5036=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_RK818=y
+CONFIG_MFD_RICOH619=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_ACT8846=y
+CONFIG_ACT8846_SUPPORT_RESET=y
+CONFIG_REGULATOR_ACT8931=y
+CONFIG_REGULATOR_RT5025=y
+CONFIG_REGULATOR_RT5036=y
+CONFIG_ROCKCHIP_PWM_REGULATOR=y
+CONFIG_REGULATOR_SYR82X=y
+CONFIG_REGULATOR_RICOH619=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_FB=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_FB_ROCKCHIP=y
+CONFIG_LCDC_RK3368=y
+CONFIG_RK_TRSM=y
+CONFIG_DP_ANX6345=y
+CONFIG_RK_HDMI=y
+# CONFIG_HDMI_RK3288 is not set
+# CONFIG_HDMI_RK3036 is not set
+CONFIG_RK_TVENCODER=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PRINTK=y
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_RK_SOC=y
+CONFIG_SND_RK_SOC_HDMI_I2S=y
+CONFIG_SND_RK_SOC_SPDIF_CARD=y
+CONFIG_SND_RK_SOC_RT5631=y
+CONFIG_SND_RK_SOC_RT3224=y
+CONFIG_SND_RK_SOC_RK312X=y
+CONFIG_HID_BATTERY_STRENGTH=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_APPLEIR=y
+CONFIG_HID_AUREAL=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_ICADE=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LENOVO_TPKBD=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_PS3REMOTE=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_STEELSERIES=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THINGM=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_RK=y
+CONFIG_USB_ACM=y
+CONFIG_USB_PRINTER=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_STORAGE_ENE_UB6250=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_OPTION=y
+CONFIG_USB_TRANCEVIBRATOR=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_G_ANDROID=y
+CONFIG_USB20_HOST=y
+CONFIG_USB20_OTG=y
+CONFIG_MMC=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_MINORS=32
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HYM8563=y
+CONFIG_RTC_RT5036=y
+CONFIG_RTC_DRV_RC5T619=y
+CONFIG_STAGING=y
+CONFIG_ZSMALLOC=y
+CONFIG_ZRAM=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_SW_SYNC_USER=y
+CONFIG_ION=y
+CONFIG_ION_ROCKCHIP=y
+CONFIG_ION_CMA_HIGHMEM=y
+CONFIG_ION_ROCKCHIP_SNAPSHOT=y
+CONFIG_ION_SNAPSHOT_BUF_SHIFT=15
+CONFIG_FIQ_DEBUGGER=y
+CONFIG_FIQ_DEBUGGER_NO_SLEEP=y
+CONFIG_FIQ_DEBUGGER_CONSOLE=y
+CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE=y
+CONFIG_COMMON_CLK_DEBUG=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_ROCKCHIP_IOVMM=y
+CONFIG_IIO=y
+CONFIG_ROCKCHIP_ADC=y
+CONFIG_PWM=y
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_RK_HEADSET=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CIFS=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_SCHED_TRACER=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
index 2070a56ecc468677f3b3109c387311a8615c1a57..a3f935fde97527d2189fb7f6afffc9dc5102ba57 100644 (file)
@@ -35,4 +35,4 @@ AFLAGS_aes-neon.o     := -DINTERLEAVE=4
 CFLAGS_aes-glue-ce.o   := -DUSE_V8_CRYPTO_EXTENSIONS
 
 $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
-       $(call if_changed_dep,cc_o_c)
+       $(call if_changed_rule,cc_o_c)
index 60f2f4c122561fc1f0a85b83305e706c163db4d5..79cd911ef88c3e95806db16f55bffc904ff961e2 100644 (file)
@@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
        for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
                aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
        for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
                aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_dec, rounds, blocks, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
                                first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
                                first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key1.key_enc, rounds, blocks,
                                (u8 *)ctx->key2.key_enc, walk.iv, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
 
@@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key1.key_dec, rounds, blocks,
                                (u8 *)ctx->key2.key_enc, walk.iv, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
 
index b9e6eaf41c9be14c5f5269477203e9eaf5565eda..607928ad1fdf04fc3d335bf9f972a486c8b8eea1 100644 (file)
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-       DATA    .req    v0
-       SHASH   .req    v1
-       IN1     .req    v2
+       SHASH   .req    v0
+       SHASH2  .req    v1
        T1      .req    v2
        T2      .req    v3
-       T3      .req    v4
-       VZR     .req    v5
+       MASK    .req    v4
+       XL      .req    v5
+       XM      .req    v6
+       XH      .req    v7
+       IN1     .req    v7
 
        .text
        .arch           armv8-a+crypto
         *                         struct ghash_key const *k, const char *head)
         */
 ENTRY(pmull_ghash_update)
-       ld1             {DATA.16b}, [x1]
        ld1             {SHASH.16b}, [x3]
-       eor             VZR.16b, VZR.16b, VZR.16b
+       ld1             {XL.16b}, [x1]
+       movi            MASK.16b, #0xe1
+       ext             SHASH2.16b, SHASH.16b, SHASH.16b, #8
+       shl             MASK.2d, MASK.2d, #57
+       eor             SHASH2.16b, SHASH2.16b, SHASH.16b
 
        /* do the head block first, if supplied */
        cbz             x4, 0f
-       ld1             {IN1.2d}, [x4]
+       ld1             {T1.2d}, [x4]
        b               1f
 
-0:     ld1             {IN1.2d}, [x2], #16
+0:     ld1             {T1.2d}, [x2], #16
        sub             w0, w0, #1
-1:     ext             IN1.16b, IN1.16b, IN1.16b, #8
-CPU_LE(        rev64           IN1.16b, IN1.16b        )
-       eor             DATA.16b, DATA.16b, IN1.16b
 
-       /* multiply DATA by SHASH in GF(2^128) */
-       ext             T2.16b, DATA.16b, DATA.16b, #8
-       ext             T3.16b, SHASH.16b, SHASH.16b, #8
-       eor             T2.16b, T2.16b, DATA.16b
-       eor             T3.16b, T3.16b, SHASH.16b
+1:     /* multiply XL by SHASH in GF(2^128) */
+CPU_LE(        rev64           T1.16b, T1.16b  )
 
-       pmull2          T1.1q, SHASH.2d, DATA.2d        // a1 * b1
-       pmull           DATA.1q, SHASH.1d, DATA.1d      // a0 * b0
-       pmull           T2.1q, T2.1d, T3.1d             // (a1 + a0)(b1 + b0)
-       eor             T2.16b, T2.16b, T1.16b          // (a0 * b1) + (a1 * b0)
-       eor             T2.16b, T2.16b, DATA.16b
+       ext             T2.16b, XL.16b, XL.16b, #8
+       ext             IN1.16b, T1.16b, T1.16b, #8
+       eor             T1.16b, T1.16b, T2.16b
+       eor             XL.16b, XL.16b, IN1.16b
 
-       ext             T3.16b, VZR.16b, T2.16b, #8
-       ext             T2.16b, T2.16b, VZR.16b, #8
-       eor             DATA.16b, DATA.16b, T3.16b
-       eor             T1.16b, T1.16b, T2.16b  // <T1:DATA> is result of
-                                               // carry-less multiplication
+       pmull2          XH.1q, SHASH.2d, XL.2d          // a1 * b1
+       eor             T1.16b, T1.16b, XL.16b
+       pmull           XL.1q, SHASH.1d, XL.1d          // a0 * b0
+       pmull           XM.1q, SHASH2.1d, T1.1d         // (a1 + a0)(b1 + b0)
 
-       /* first phase of the reduction */
-       shl             T3.2d, DATA.2d, #1
-       eor             T3.16b, T3.16b, DATA.16b
-       shl             T3.2d, T3.2d, #5
-       eor             T3.16b, T3.16b, DATA.16b
-       shl             T3.2d, T3.2d, #57
-       ext             T2.16b, VZR.16b, T3.16b, #8
-       ext             T3.16b, T3.16b, VZR.16b, #8
-       eor             DATA.16b, DATA.16b, T2.16b
-       eor             T1.16b, T1.16b, T3.16b
+       ext             T1.16b, XL.16b, XH.16b, #8
+       eor             T2.16b, XL.16b, XH.16b
+       eor             XM.16b, XM.16b, T1.16b
+       eor             XM.16b, XM.16b, T2.16b
+       pmull           T2.1q, XL.1d, MASK.1d
 
-       /* second phase of the reduction */
-       ushr            T2.2d, DATA.2d, #5
-       eor             T2.16b, T2.16b, DATA.16b
-       ushr            T2.2d, T2.2d, #1
-       eor             T2.16b, T2.16b, DATA.16b
-       ushr            T2.2d, T2.2d, #1
-       eor             T1.16b, T1.16b, T2.16b
-       eor             DATA.16b, DATA.16b, T1.16b
+       mov             XH.d[0], XM.d[1]
+       mov             XM.d[1], XL.d[0]
+
+       eor             XL.16b, XM.16b, T2.16b
+       ext             T2.16b, XL.16b, XL.16b, #8
+       pmull           XL.1q, XL.1d, MASK.1d
+       eor             T2.16b, T2.16b, XH.16b
+       eor             XL.16b, XL.16b, T2.16b
 
        cbnz            w0, 0b
 
-       st1             {DATA.16b}, [x1]
+       st1             {XL.16b}, [x1]
        ret
 ENDPROC(pmull_ghash_update)
index b92baf3f68c72b27453140e362d723b94ead4812..833ec1e3f3e9b7491cc26da24fc0ba386b73de81 100644 (file)
@@ -67,11 +67,12 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
                blocks = len / GHASH_BLOCK_SIZE;
                len %= GHASH_BLOCK_SIZE;
 
-               kernel_neon_begin_partial(6);
+               kernel_neon_begin_partial(8);
                pmull_ghash_update(blocks, ctx->digest, src, key,
                                   partial ? ctx->buf : NULL);
                kernel_neon_end();
                src += blocks * GHASH_BLOCK_SIZE;
+               partial = 0;
        }
        if (len)
                memcpy(ctx->buf + partial, src, len);
@@ -88,7 +89,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
 
                memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
 
-               kernel_neon_begin_partial(6);
+               kernel_neon_begin_partial(8);
                pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
                kernel_neon_end();
        }
index 736c5916d367ba00627f560905b03804adfe7acf..a049bf7f51507f10ad873ffd16ba8d7a3c9b28ff 100644 (file)
@@ -63,7 +63,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
        int result;
 
        asm volatile("// atomic_add_return\n"
-"1:    ldaxr   %w0, %2\n"
+"1:    ldxr    %w0, %2\n"
 "      add     %w0, %w0, %w3\n"
 "      stlxr   %w1, %w0, %2\n"
 "      cbnz    %w1, 1b"
@@ -71,6 +71,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
        : "Ir" (i)
        : "memory");
 
+       smp_mb();
        return result;
 }
 
@@ -94,7 +95,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
        int result;
 
        asm volatile("// atomic_sub_return\n"
-"1:    ldaxr   %w0, %2\n"
+"1:    ldxr    %w0, %2\n"
 "      sub     %w0, %w0, %w3\n"
 "      stlxr   %w1, %w0, %2\n"
 "      cbnz    %w1, 1b"
@@ -102,6 +103,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
        : "Ir" (i)
        : "memory");
 
+       smp_mb();
        return result;
 }
 
@@ -110,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
        unsigned long tmp;
        int oldval;
 
+       smp_mb();
+
        asm volatile("// atomic_cmpxchg\n"
-"1:    ldaxr   %w1, %2\n"
+"1:    ldxr    %w1, %2\n"
 "      cmp     %w1, %w3\n"
 "      b.ne    2f\n"
-"      stlxr   %w0, %w4, %2\n"
+"      stxr    %w0, %w4, %2\n"
 "      cbnz    %w0, 1b\n"
 "2:"
        : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
        : "Ir" (old), "r" (new)
        : "cc");
 
+       smp_mb();
        return oldval;
 }
 
@@ -194,7 +199,7 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
        unsigned long tmp;
 
        asm volatile("// atomic64_add_return\n"
-"1:    ldaxr   %0, %2\n"
+"1:    ldxr    %0, %2\n"
 "      add     %0, %0, %3\n"
 "      stlxr   %w1, %0, %2\n"
 "      cbnz    %w1, 1b"
@@ -202,6 +207,7 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
        : "Ir" (i)
        : "memory");
 
+       smp_mb();
        return result;
 }
 
@@ -225,7 +231,7 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
        unsigned long tmp;
 
        asm volatile("// atomic64_sub_return\n"
-"1:    ldaxr   %0, %2\n"
+"1:    ldxr    %0, %2\n"
 "      sub     %0, %0, %3\n"
 "      stlxr   %w1, %0, %2\n"
 "      cbnz    %w1, 1b"
@@ -233,6 +239,7 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
        : "Ir" (i)
        : "memory");
 
+       smp_mb();
        return result;
 }
 
@@ -241,17 +248,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
        long oldval;
        unsigned long res;
 
+       smp_mb();
+
        asm volatile("// atomic64_cmpxchg\n"
-"1:    ldaxr   %1, %2\n"
+"1:    ldxr    %1, %2\n"
 "      cmp     %1, %3\n"
 "      b.ne    2f\n"
-"      stlxr   %w0, %4, %2\n"
+"      stxr    %w0, %4, %2\n"
 "      cbnz    %w0, 1b\n"
 "2:"
        : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
        : "Ir" (old), "r" (new)
        : "cc");
 
+       smp_mb();
        return oldval;
 }
 
@@ -263,11 +273,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
        unsigned long tmp;
 
        asm volatile("// atomic64_dec_if_positive\n"
-"1:    ldaxr   %0, %2\n"
+"1:    ldxr    %0, %2\n"
 "      subs    %0, %0, #1\n"
 "      b.mi    2f\n"
 "      stlxr   %w1, %0, %2\n"
 "      cbnz    %w1, 1b\n"
+"      dmb     ish\n"
 "2:"
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
        :
index c98d0a88916a4bfd824f11a4014e9d719518a24b..709f1f6d6bbd8fb5623beb94523dce950433f94d 100644 (file)
 #define wfi()          asm volatile("wfi" : : : "memory")
 
 #define isb()          asm volatile("isb" : : : "memory")
-#define dmb(opt)       asm volatile("dmb sy" : : : "memory")
-#define dsb(opt)       asm volatile("dsb sy" : : : "memory")
+#define dmb(opt)       asm volatile("dmb " #opt : : : "memory")
+#define dsb(opt)       asm volatile("dsb " #opt : : : "memory")
 
 #define mb()           dsb(sy)
-#define rmb()          asm volatile("dsb ld" : : : "memory")
-#define wmb()          asm volatile("dsb st" : : : "memory")
+#define rmb()          dsb(ld)
+#define wmb()          dsb(st)
 
 #ifndef CONFIG_SMP
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
+
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       barrier();                                                      \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       barrier();                                                      \
+       ___p1;                                                          \
+})
+
 #else
-#define smp_mb()       asm volatile("dmb ish" : : : "memory")
-#define smp_rmb()      asm volatile("dmb ishld" : : : "memory")
-#define smp_wmb()      asm volatile("dmb ishst" : : : "memory")
+
+#define smp_mb()       dmb(ish)
+#define smp_rmb()      dmb(ishld)
+#define smp_wmb()      dmb(ishst)
+
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       switch (sizeof(*p)) {                                           \
+       case 4:                                                         \
+               asm volatile ("stlr %w1, %0"                            \
+                               : "=Q" (*p) : "r" (v) : "memory");      \
+               break;                                                  \
+       case 8:                                                         \
+               asm volatile ("stlr %1, %0"                             \
+                               : "=Q" (*p) : "r" (v) : "memory");      \
+               break;                                                  \
+       }                                                               \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1;                                               \
+       compiletime_assert_atomic_type(*p);                             \
+       switch (sizeof(*p)) {                                           \
+       case 4:                                                         \
+               asm volatile ("ldar %w0, %1"                            \
+                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+               break;                                                  \
+       case 8:                                                         \
+               asm volatile ("ldar %0, %1"                             \
+                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+               break;                                                  \
+       }                                                               \
+       ___p1;                                                          \
+})
+
 #endif
 
 #define read_barrier_depends()         do { } while(0)
index 73eb0736613e7d75cb0f3b0b98d221438c45a170..ddb9d7830558f0840bf7b71fac33a20388c4a1f6 100644 (file)
@@ -29,7 +29,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
        switch (size) {
        case 1:
                asm volatile("//        __xchg1\n"
-               "1:     ldaxrb  %w0, %2\n"
+               "1:     ldxrb   %w0, %2\n"
                "       stlxrb  %w1, %w3, %2\n"
                "       cbnz    %w1, 1b\n"
                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
@@ -38,7 +38,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
                break;
        case 2:
                asm volatile("//        __xchg2\n"
-               "1:     ldaxrh  %w0, %2\n"
+               "1:     ldxrh   %w0, %2\n"
                "       stlxrh  %w1, %w3, %2\n"
                "       cbnz    %w1, 1b\n"
                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
@@ -47,7 +47,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
                break;
        case 4:
                asm volatile("//        __xchg4\n"
-               "1:     ldaxr   %w0, %2\n"
+               "1:     ldxr    %w0, %2\n"
                "       stlxr   %w1, %w3, %2\n"
                "       cbnz    %w1, 1b\n"
                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
@@ -56,7 +56,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
                break;
        case 8:
                asm volatile("//        __xchg8\n"
-               "1:     ldaxr   %0, %2\n"
+               "1:     ldxr    %0, %2\n"
                "       stlxr   %w1, %3, %2\n"
                "       cbnz    %w1, 1b\n"
                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
@@ -67,6 +67,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
                BUILD_BUG();
        }
 
+       smp_mb();
        return ret;
 }
 
index 253e33bc94fb5e4e4a33b39be0151070cda825db..e94e8dde78b432021905c9f2b995e2ae07fac701 100644 (file)
@@ -37,8 +37,8 @@ typedef s32           compat_ssize_t;
 typedef s32            compat_time_t;
 typedef s32            compat_clock_t;
 typedef s32            compat_pid_t;
-typedef u32            __compat_uid_t;
-typedef u32            __compat_gid_t;
+typedef u16            __compat_uid_t;
+typedef u16            __compat_gid_t;
 typedef u16            __compat_uid16_t;
 typedef u16            __compat_gid16_t;
 typedef u32            __compat_uid32_t;
@@ -205,6 +205,13 @@ typedef struct compat_siginfo {
                        compat_long_t _band;    /* POLL_IN, POLL_OUT, POLL_MSG */
                        int _fd;
                } _sigpoll;
+
+               /* SIGSYS */
+               struct {
+                       compat_uptr_t _call_addr; /* calling user insn */
+                       int _syscall;   /* triggering system call number */
+                       unsigned int _arch;     /* AUDIT_ARCH_* of syscall */
+               } _sigsys;
        } _sifields;
 } compat_siginfo_t;
 
index c404fb0df3a673710285603c8ba7571fa42a86f8..27f54a7cc81b3b0d524b33afa20ef69f25b6ae29 100644 (file)
@@ -41,6 +41,7 @@
 
 #define ARM_CPU_PART_AEM_V8    0xD0F0
 #define ARM_CPU_PART_FOUNDATION        0xD000
+#define ARM_CPU_PART_CORTEX_A53        0xD030
 #define ARM_CPU_PART_CORTEX_A57        0xD070
 
 #define APM_CPU_PART_POTENZA   0x0000
index 7c951a510b5443a49ffa139f64066fecf54c89d3..aab72ce22348a9a7b09372576d7f917c03c0a5e2 100644 (file)
 
 #ifdef __KERNEL__
 
+/* Low-level stepping controls. */
+#define DBG_MDSCR_SS           (1 << 0)
+#define DBG_SPSR_SS            (1 << 21)
+
+/* MDSCR_EL1 enabling bits */
+#define DBG_MDSCR_KDE          (1 << 13)
+#define DBG_MDSCR_MDE          (1 << 15)
+#define DBG_MDSCR_MASK         ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
+
 #define        DBG_ESR_EVT(x)          (((x) >> 27) & 0x7)
 
 /* AArch64 */
 
 #define CACHE_FLUSH_IS_SAFE            1
 
-enum debug_el {
-       DBG_ACTIVE_EL0 = 0,
-       DBG_ACTIVE_EL1,
-};
-
 /* AArch32 */
 #define DBG_ESR_EVT_BKPT       0x4
 #define DBG_ESR_EVT_VECC       0x5
@@ -115,6 +119,11 @@ void unregister_break_hook(struct break_hook *hook);
 
 u8 debug_monitors_arch(void);
 
+enum debug_el {
+       DBG_ACTIVE_EL0 = 0,
+       DBG_ACTIVE_EL1,
+};
+
 void enable_debug_monitors(enum debug_el el);
 void disable_debug_monitors(enum debug_el el);
 
index c43b4ac13008ffec8f0b39101e5cfd77e43fd3b5..50f559f574fe53b4d25995bd465c7b90f0a4b8be 100644 (file)
@@ -37,8 +37,21 @@ struct fpsimd_state {
                        u32 fpcr;
                };
        };
+       /* the id of the last cpu to have restored this state */
+       unsigned int cpu;
 };
 
+/*
+ * Struct for stacking the bottom 'n' FP/SIMD registers.
+ */
+struct fpsimd_partial_state {
+       u32             fpsr;
+       u32             fpcr;
+       u32             num_regs;
+       __uint128_t     vregs[32];
+};
+
+
 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /* Masks for extracting the FPSR and FPCR from the FPSCR */
 #define VFP_FPSCR_STAT_MASK    0xf800009f
@@ -58,6 +71,16 @@ extern void fpsimd_load_state(struct fpsimd_state *state);
 extern void fpsimd_thread_switch(struct task_struct *next);
 extern void fpsimd_flush_thread(void);
 
+extern void fpsimd_preserve_current_state(void);
+extern void fpsimd_restore_current_state(void);
+extern void fpsimd_update_current_state(struct fpsimd_state *state);
+
+extern void fpsimd_flush_task_state(struct task_struct *target);
+
+extern void fpsimd_save_partial_state(struct fpsimd_partial_state *state,
+                                     u32 num_regs);
+extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state);
+
 #endif
 
 #endif
index bbec599c96bd61df88740e7e1295d81540189edc..768414d55e642f461788c0a031e1749c83cfac86 100644 (file)
        ldr     w\tmpnr, [\state, #16 * 2 + 4]
        msr     fpcr, x\tmpnr
 .endm
+
+.altmacro
+.macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2
+       mrs     x\tmpnr1, fpsr
+       str     w\numnr, [\state, #8]
+       mrs     x\tmpnr2, fpcr
+       stp     w\tmpnr1, w\tmpnr2, [\state]
+       adr     x\tmpnr1, 0f
+       add     \state, \state, x\numnr, lsl #4
+       sub     x\tmpnr1, x\tmpnr1, x\numnr, lsl #1
+       br      x\tmpnr1
+       .irp    qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0
+       .irp    qb, %(qa + 1)
+       stp     q\qa, q\qb, [\state, # -16 * \qa - 16]
+       .endr
+       .endr
+0:
+.endm
+
+.macro fpsimd_restore_partial state, tmpnr1, tmpnr2
+       ldp     w\tmpnr1, w\tmpnr2, [\state]
+       msr     fpsr, x\tmpnr1
+       msr     fpcr, x\tmpnr2
+       adr     x\tmpnr1, 0f
+       ldr     w\tmpnr2, [\state, #8]
+       add     \state, \state, x\tmpnr2, lsl #4
+       sub     x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1
+       br      x\tmpnr1
+       .irp    qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0
+       .irp    qb, %(qa + 1)
+       ldp     q\qa, q\qb, [\state, # -16 * \qa - 16]
+       .endr
+       .endr
+0:
+.endm
index 6230baba7869530064b686dc46e97ba24f1fa2dd..5f750dc96e0fd64123851ac787659f5953bc71e5 100644 (file)
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)                \
        asm volatile(                                                   \
-"1:    ldaxr   %w1, %2\n"                                              \
+"1:    ldxr    %w1, %2\n"                                              \
        insn "\n"                                                       \
 "2:    stlxr   %w3, %w0, %2\n"                                         \
 "      cbnz    %w3, 1b\n"                                              \
+"      dmb     ish\n"                                                  \
 "3:\n"                                                                 \
 "      .pushsection .fixup,\"ax\"\n"                                   \
 "      .align  2\n"                                                    \
@@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                return -EFAULT;
 
        asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-"1:    ldaxr   %w1, %2\n"
+"1:    ldxr    %w1, %2\n"
 "      sub     %w3, %w1, %w4\n"
 "      cbnz    %w3, 3f\n"
 "2:    stlxr   %w3, %w5, %2\n"
 "      cbnz    %w3, 1b\n"
+"      dmb     ish\n"
 "3:\n"
 "      .pushsection .fixup,\"ax\"\n"
 "4:    mov     %w0, %w6\n"
index d064047612b12acb2668927c4b3cedf504d22c11..52b484b6aa1a7fec251a72b028f655523c74236a 100644 (file)
@@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg,
  */
 #define ARM_MAX_BRP            16
 #define ARM_MAX_WRP            16
-#define ARM_MAX_HBP_SLOTS      (ARM_MAX_BRP + ARM_MAX_WRP)
 
 /* Virtual debug register bases. */
 #define AARCH64_DBG_REG_BVR    0
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
new file mode 100644 (file)
index 0000000..7fd3e27
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_ARM_H__
+#define __ARM64_KVM_ARM_H__
+
+#include <asm/types.h>
+
+/* Hyp Configuration Register (HCR) bits */
+#define HCR_ID         (UL(1) << 33)
+#define HCR_CD         (UL(1) << 32)
+#define HCR_RW_SHIFT   31
+#define HCR_RW         (UL(1) << HCR_RW_SHIFT)
+#define HCR_TRVM       (UL(1) << 30)
+#define HCR_HCD                (UL(1) << 29)
+#define HCR_TDZ                (UL(1) << 28)
+#define HCR_TGE                (UL(1) << 27)
+#define HCR_TVM                (UL(1) << 26)
+#define HCR_TTLB       (UL(1) << 25)
+#define HCR_TPU                (UL(1) << 24)
+#define HCR_TPC                (UL(1) << 23)
+#define HCR_TSW                (UL(1) << 22)
+#define HCR_TAC                (UL(1) << 21)
+#define HCR_TIDCP      (UL(1) << 20)
+#define HCR_TSC                (UL(1) << 19)
+#define HCR_TID3       (UL(1) << 18)
+#define HCR_TID2       (UL(1) << 17)
+#define HCR_TID1       (UL(1) << 16)
+#define HCR_TID0       (UL(1) << 15)
+#define HCR_TWE                (UL(1) << 14)
+#define HCR_TWI                (UL(1) << 13)
+#define HCR_DC         (UL(1) << 12)
+#define HCR_BSU                (3 << 10)
+#define HCR_BSU_IS     (UL(1) << 10)
+#define HCR_FB         (UL(1) << 9)
+#define HCR_VA         (UL(1) << 8)
+#define HCR_VI         (UL(1) << 7)
+#define HCR_VF         (UL(1) << 6)
+#define HCR_AMO                (UL(1) << 5)
+#define HCR_IMO                (UL(1) << 4)
+#define HCR_FMO                (UL(1) << 3)
+#define HCR_PTW                (UL(1) << 2)
+#define HCR_SWIO       (UL(1) << 1)
+#define HCR_VM         (UL(1) << 0)
+
+/*
+ * The bits we set in HCR:
+ * RW:         64bit by default, can be overriden for 32bit VMs
+ * TAC:                Trap ACTLR
+ * TSC:                Trap SMC
+ * TVM:                Trap VM ops (until M+C set in SCTLR_EL1)
+ * TSW:                Trap cache operations by set/way
+ * TWE:                Trap WFE
+ * TWI:                Trap WFI
+ * TIDCP:      Trap L2CTLR/L2ECTLR
+ * BSU_IS:     Upgrade barriers to the inner shareable domain
+ * FB:         Force broadcast of all maintainance operations
+ * AMO:                Override CPSR.A and enable signaling with VA
+ * IMO:                Override CPSR.I and enable signaling with VI
+ * FMO:                Override CPSR.F and enable signaling with VF
+ * SWIO:       Turn set/way invalidates into set/way clean+invalidate
+ */
+#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
+                        HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
+                        HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
+#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+#define HCR_INT_OVERRIDE   (HCR_FMO | HCR_IMO)
+
+
+/* Hyp System Control Register (SCTLR_EL2) bits */
+#define SCTLR_EL2_EE   (1 << 25)
+#define SCTLR_EL2_WXN  (1 << 19)
+#define SCTLR_EL2_I    (1 << 12)
+#define SCTLR_EL2_SA   (1 << 3)
+#define SCTLR_EL2_C    (1 << 2)
+#define SCTLR_EL2_A    (1 << 1)
+#define SCTLR_EL2_M    1
+#define SCTLR_EL2_FLAGS        (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C |      \
+                        SCTLR_EL2_SA | SCTLR_EL2_I)
+
+/* TCR_EL2 Registers bits */
+#define TCR_EL2_TBI    (1 << 20)
+#define TCR_EL2_PS     (7 << 16)
+#define TCR_EL2_PS_40B (2 << 16)
+#define TCR_EL2_TG0    (1 << 14)
+#define TCR_EL2_SH0    (3 << 12)
+#define TCR_EL2_ORGN0  (3 << 10)
+#define TCR_EL2_IRGN0  (3 << 8)
+#define TCR_EL2_T0SZ   0x3f
+#define TCR_EL2_MASK   (TCR_EL2_TG0 | TCR_EL2_SH0 | \
+                        TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
+
+#define TCR_EL2_FLAGS  (TCR_EL2_PS_40B)
+
+/* VTCR_EL2 Registers bits */
+#define VTCR_EL2_PS_MASK       (7 << 16)
+#define VTCR_EL2_TG0_MASK      (1 << 14)
+#define VTCR_EL2_TG0_4K                (0 << 14)
+#define VTCR_EL2_TG0_64K       (1 << 14)
+#define VTCR_EL2_SH0_MASK      (3 << 12)
+#define VTCR_EL2_SH0_INNER     (3 << 12)
+#define VTCR_EL2_ORGN0_MASK    (3 << 10)
+#define VTCR_EL2_ORGN0_WBWA    (1 << 10)
+#define VTCR_EL2_IRGN0_MASK    (3 << 8)
+#define VTCR_EL2_IRGN0_WBWA    (1 << 8)
+#define VTCR_EL2_SL0_MASK      (3 << 6)
+#define VTCR_EL2_SL0_LVL1      (1 << 6)
+#define VTCR_EL2_T0SZ_MASK     0x3f
+#define VTCR_EL2_T0SZ_40B      24
+
+/*
+ * We configure the Stage-2 page tables to always restrict the IPA space to be
+ * 40 bits wide (T0SZ = 24).  Systems with a PARange smaller than 40 bits are
+ * not known to exist and will break with this configuration.
+ *
+ * Note that when using 4K pages, we concatenate two first level page tables
+ * together.
+ *
+ * The magic numbers used for VTTBR_X in this patch can be found in Tables
+ * D4-23 and D4-25 in ARM DDI 0487A.b.
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+/*
+ * Stage2 translation configuration:
+ * 40bits output (PS = 2)
+ * 40bits input  (T0SZ = 24)
+ * 64kB pages (TG0 = 1)
+ * 2 level page tables (SL = 1)
+ */
+#define VTCR_EL2_FLAGS         (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
+                                VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
+                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
+#define VTTBR_X                (38 - VTCR_EL2_T0SZ_40B)
+#else
+/*
+ * Stage2 translation configuration:
+ * 40bits output (PS = 2)
+ * 40bits input  (T0SZ = 24)
+ * 4kB pages (TG0 = 0)
+ * 3 level page tables (SL = 1)
+ */
+#define VTCR_EL2_FLAGS         (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
+                                VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
+                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
+#define VTTBR_X                (37 - VTCR_EL2_T0SZ_40B)
+#endif
+
+#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+#define VTTBR_BADDR_MASK  (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT  (48LLU)
+#define VTTBR_VMID_MASK          (0xffLLU << VTTBR_VMID_SHIFT)
+
+/* Hyp System Trap Register */
+#define HSTR_EL2_TTEE  (1 << 16)
+#define HSTR_EL2_T(x)  (1 << x)
+
+/* Hyp Coprocessor Trap Register */
+#define CPTR_EL2_TCPAC (1 << 31)
+#define CPTR_EL2_TTA   (1 << 20)
+#define CPTR_EL2_TFP   (1 << 10)
+
+/* Hyp Debug Configuration Register bits */
+#define MDCR_EL2_TDRA          (1 << 11)
+#define MDCR_EL2_TDOSA         (1 << 10)
+#define MDCR_EL2_TDA           (1 << 9)
+#define MDCR_EL2_TDE           (1 << 8)
+#define MDCR_EL2_HPME          (1 << 7)
+#define MDCR_EL2_TPM           (1 << 6)
+#define MDCR_EL2_TPMCR         (1 << 5)
+#define MDCR_EL2_HPMN_MASK     (0x1F)
+
+/* Exception Syndrome Register (ESR) bits */
+#define ESR_EL2_EC_SHIFT       (26)
+#define ESR_EL2_EC             (0x3fU << ESR_EL2_EC_SHIFT)
+#define ESR_EL2_IL             (1U << 25)
+#define ESR_EL2_ISS            (ESR_EL2_IL - 1)
+#define ESR_EL2_ISV_SHIFT      (24)
+#define ESR_EL2_ISV            (1U << ESR_EL2_ISV_SHIFT)
+#define ESR_EL2_SAS_SHIFT      (22)
+#define ESR_EL2_SAS            (3U << ESR_EL2_SAS_SHIFT)
+#define ESR_EL2_SSE            (1 << 21)
+#define ESR_EL2_SRT_SHIFT      (16)
+#define ESR_EL2_SRT_MASK       (0x1f << ESR_EL2_SRT_SHIFT)
+#define ESR_EL2_SF             (1 << 15)
+#define ESR_EL2_AR             (1 << 14)
+#define ESR_EL2_EA             (1 << 9)
+#define ESR_EL2_CM             (1 << 8)
+#define ESR_EL2_S1PTW          (1 << 7)
+#define ESR_EL2_WNR            (1 << 6)
+#define ESR_EL2_FSC            (0x3f)
+#define ESR_EL2_FSC_TYPE       (0x3c)
+
+#define ESR_EL2_CV_SHIFT       (24)
+#define ESR_EL2_CV             (1U << ESR_EL2_CV_SHIFT)
+#define ESR_EL2_COND_SHIFT     (20)
+#define ESR_EL2_COND           (0xfU << ESR_EL2_COND_SHIFT)
+
+
+#define FSC_FAULT      (0x04)
+#define FSC_PERM       (0x0c)
+
+/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
+#define HPFAR_MASK     (~0xFUL)
+
+#define ESR_EL2_EC_UNKNOWN     (0x00)
+#define ESR_EL2_EC_WFI         (0x01)
+#define ESR_EL2_EC_CP15_32     (0x03)
+#define ESR_EL2_EC_CP15_64     (0x04)
+#define ESR_EL2_EC_CP14_MR     (0x05)
+#define ESR_EL2_EC_CP14_LS     (0x06)
+#define ESR_EL2_EC_FP_ASIMD    (0x07)
+#define ESR_EL2_EC_CP10_ID     (0x08)
+#define ESR_EL2_EC_CP14_64     (0x0C)
+#define ESR_EL2_EC_ILL_ISS     (0x0E)
+#define ESR_EL2_EC_SVC32       (0x11)
+#define ESR_EL2_EC_HVC32       (0x12)
+#define ESR_EL2_EC_SMC32       (0x13)
+#define ESR_EL2_EC_SVC64       (0x15)
+#define ESR_EL2_EC_HVC64       (0x16)
+#define ESR_EL2_EC_SMC64       (0x17)
+#define ESR_EL2_EC_SYS64       (0x18)
+#define ESR_EL2_EC_IABT                (0x20)
+#define ESR_EL2_EC_IABT_HYP    (0x21)
+#define ESR_EL2_EC_PC_ALIGN    (0x22)
+#define ESR_EL2_EC_DABT                (0x24)
+#define ESR_EL2_EC_DABT_HYP    (0x25)
+#define ESR_EL2_EC_SP_ALIGN    (0x26)
+#define ESR_EL2_EC_FP_EXC32    (0x28)
+#define ESR_EL2_EC_FP_EXC64    (0x2C)
+#define ESR_EL2_EC_SERROR      (0x2F)
+#define ESR_EL2_EC_BREAKPT     (0x30)
+#define ESR_EL2_EC_BREAKPT_HYP (0x31)
+#define ESR_EL2_EC_SOFTSTP     (0x32)
+#define ESR_EL2_EC_SOFTSTP_HYP (0x33)
+#define ESR_EL2_EC_WATCHPT     (0x34)
+#define ESR_EL2_EC_WATCHPT_HYP (0x35)
+#define ESR_EL2_EC_BKPT32      (0x38)
+#define ESR_EL2_EC_VECTOR32    (0x3A)
+#define ESR_EL2_EC_BRK64       (0x3C)
+
+#define ESR_EL2_EC_xABT_xFSR_EXTABT    0x10
+
+#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
+
+#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
new file mode 100644 (file)
index 0000000..4838421
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_ASM_H__
+#define __ARM_KVM_ASM_H__
+
+#include <asm/virt.h>
+
+/*
+ * 0 is reserved as an invalid value.
+ * Order *must* be kept in sync with the hyp switch code.
+ */
+#define        MPIDR_EL1       1       /* MultiProcessor Affinity Register */
+#define        CSSELR_EL1      2       /* Cache Size Selection Register */
+#define        SCTLR_EL1       3       /* System Control Register */
+#define        ACTLR_EL1       4       /* Auxilliary Control Register */
+#define        CPACR_EL1       5       /* Coprocessor Access Control */
+#define        TTBR0_EL1       6       /* Translation Table Base Register 0 */
+#define        TTBR1_EL1       7       /* Translation Table Base Register 1 */
+#define        TCR_EL1         8       /* Translation Control Register */
+#define        ESR_EL1         9       /* Exception Syndrome Register */
+#define        AFSR0_EL1       10      /* Auxilary Fault Status Register 0 */
+#define        AFSR1_EL1       11      /* Auxilary Fault Status Register 1 */
+#define        FAR_EL1         12      /* Fault Address Register */
+#define        MAIR_EL1        13      /* Memory Attribute Indirection Register */
+#define        VBAR_EL1        14      /* Vector Base Address Register */
+#define        CONTEXTIDR_EL1  15      /* Context ID Register */
+#define        TPIDR_EL0       16      /* Thread ID, User R/W */
+#define        TPIDRRO_EL0     17      /* Thread ID, User R/O */
+#define        TPIDR_EL1       18      /* Thread ID, Privileged */
+#define        AMAIR_EL1       19      /* Aux Memory Attribute Indirection Register */
+#define        CNTKCTL_EL1     20      /* Timer Control Register (EL1) */
+#define        PAR_EL1         21      /* Physical Address Register */
+#define MDSCR_EL1      22      /* Monitor Debug System Control Register */
+#define DBGBCR0_EL1    23      /* Debug Breakpoint Control Registers (0-15) */
+#define DBGBCR15_EL1   38
+#define DBGBVR0_EL1    39      /* Debug Breakpoint Value Registers (0-15) */
+#define DBGBVR15_EL1   54
+#define DBGWCR0_EL1    55      /* Debug Watchpoint Control Registers (0-15) */
+#define DBGWCR15_EL1   70
+#define DBGWVR0_EL1    71      /* Debug Watchpoint Value Registers (0-15) */
+#define DBGWVR15_EL1   86
+#define MDCCINT_EL1    87      /* Monitor Debug Comms Channel Interrupt Enable Reg */
+
+/* 32bit specific registers. Keep them at the end of the range */
+#define        DACR32_EL2      88      /* Domain Access Control Register */
+#define        IFSR32_EL2      89      /* Instruction Fault Status Register */
+#define        FPEXC32_EL2     90      /* Floating-Point Exception Control Register */
+#define        DBGVCR32_EL2    91      /* Debug Vector Catch Register */
+#define        TEECR32_EL1     92      /* ThumbEE Configuration Register */
+#define        TEEHBR32_EL1    93      /* ThumbEE Handler Base Register */
+#define        NR_SYS_REGS     94
+
+/* 32bit mapping */
+#define c0_MPIDR       (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
+#define c0_CSSELR      (CSSELR_EL1 * 2)/* Cache Size Selection Register */
+#define c1_SCTLR       (SCTLR_EL1 * 2) /* System Control Register */
+#define c1_ACTLR       (ACTLR_EL1 * 2) /* Auxiliary Control Register */
+#define c1_CPACR       (CPACR_EL1 * 2) /* Coprocessor Access Control */
+#define c2_TTBR0       (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
+#define c2_TTBR0_high  (c2_TTBR0 + 1)  /* TTBR0 top 32 bits */
+#define c2_TTBR1       (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
+#define c2_TTBR1_high  (c2_TTBR1 + 1)  /* TTBR1 top 32 bits */
+#define c2_TTBCR       (TCR_EL1 * 2)   /* Translation Table Base Control R. */
+#define c3_DACR                (DACR32_EL2 * 2)/* Domain Access Control Register */
+#define c5_DFSR                (ESR_EL1 * 2)   /* Data Fault Status Register */
+#define c5_IFSR                (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
+#define c5_ADFSR       (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
+#define c5_AIFSR       (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
+#define c6_DFAR                (FAR_EL1 * 2)   /* Data Fault Address Register */
+#define c6_IFAR                (c6_DFAR + 1)   /* Instruction Fault Address Register */
+#define c7_PAR         (PAR_EL1 * 2)   /* Physical Address Register */
+#define c7_PAR_high    (c7_PAR + 1)    /* PAR top 32 bits */
+#define c10_PRRR       (MAIR_EL1 * 2)  /* Primary Region Remap Register */
+#define c10_NMRR       (c10_PRRR + 1)  /* Normal Memory Remap Register */
+#define c12_VBAR       (VBAR_EL1 * 2)  /* Vector Base Address Register */
+#define c13_CID                (CONTEXTIDR_EL1 * 2)    /* Context ID Register */
+#define c13_TID_URW    (TPIDR_EL0 * 2) /* Thread ID, User R/W */
+#define c13_TID_URO    (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
+#define c13_TID_PRIV   (TPIDR_EL1 * 2) /* Thread ID, Privileged */
+#define c10_AMAIR0     (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
+#define c10_AMAIR1     (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
+#define c14_CNTKCTL    (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+
+#define cp14_DBGDSCRext        (MDSCR_EL1 * 2)
+#define cp14_DBGBCR0   (DBGBCR0_EL1 * 2)
+#define cp14_DBGBVR0   (DBGBVR0_EL1 * 2)
+#define cp14_DBGBXVR0  (cp14_DBGBVR0 + 1)
+#define cp14_DBGWCR0   (DBGWCR0_EL1 * 2)
+#define cp14_DBGWVR0   (DBGWVR0_EL1 * 2)
+#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+
+#define NR_COPRO_REGS  (NR_SYS_REGS * 2)
+
+#define ARM_EXCEPTION_IRQ        0
+#define ARM_EXCEPTION_TRAP       1
+
+#define KVM_ARM64_DEBUG_DIRTY_SHIFT    0
+#define KVM_ARM64_DEBUG_DIRTY          (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+
+#ifndef __ASSEMBLY__
+struct kvm;
+struct kvm_vcpu;
+
+extern char __kvm_hyp_init[];
+extern char __kvm_hyp_init_end[];
+
+extern char __kvm_hyp_vector[];
+
+#define        __kvm_hyp_code_start    __hyp_text_start
+#define        __kvm_hyp_code_end      __hyp_text_end
+
+extern void __kvm_flush_vm_context(void);
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+
+extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+extern u64 __vgic_v3_get_ich_vtr_el2(void);
+
+extern char __save_vgic_v2_state[];
+extern char __restore_vgic_v2_state[];
+extern char __save_vgic_v3_state[];
+extern char __restore_vgic_v3_state[];
+
+#endif
+
+#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h
new file mode 100644 (file)
index 0000000..0b52377
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/include/asm/kvm_coproc.h
+ * Copyright (C) 2012 Rusty Russell IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_COPROC_H__
+#define __ARM64_KVM_COPROC_H__
+
+#include <linux/kvm_host.h>
+
+void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
+
+struct kvm_sys_reg_table {
+       const struct sys_reg_desc *table;
+       size_t num;
+};
+
+struct kvm_sys_reg_target_table {
+       struct kvm_sys_reg_table table64;
+       struct kvm_sys_reg_table table32;
+};
+
+void kvm_register_target_sys_reg_table(unsigned int target,
+                                      struct kvm_sys_reg_target_table *table);
+
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+#define kvm_coproc_table_init kvm_sys_reg_table_init
+void kvm_sys_reg_table_init(void);
+
+struct kvm_one_reg;
+int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
+
+#endif /* __ARM64_KVM_COPROC_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
new file mode 100644 (file)
index 0000000..5674a55
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/include/kvm_emulate.h
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_EMULATE_H__
+#define __ARM64_KVM_EMULATE_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmio.h>
+#include <asm/ptrace.h>
+
+unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
+unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
+
+bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
+void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
+
+void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+
+static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+{
+       return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
+}
+
+static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
+{
+       return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
+}
+
+static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
+{
+       return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
+}
+
+static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
+{
+       return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
+}
+
+static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
+{
+       if (vcpu_mode_is_32bit(vcpu))
+               return kvm_condition_valid32(vcpu);
+
+       return true;
+}
+
+static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+       if (vcpu_mode_is_32bit(vcpu))
+               kvm_skip_instr32(vcpu, is_wide_instr);
+       else
+               *vcpu_pc(vcpu) += 4;
+}
+
+static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
+{
+       *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
+}
+
+static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
+{
+       if (vcpu_mode_is_32bit(vcpu))
+               return vcpu_reg32(vcpu, reg_num);
+
+       return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+}
+
+/* Get vcpu SPSR for current mode */
+static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
+{
+       if (vcpu_mode_is_32bit(vcpu))
+               return vcpu_spsr32(vcpu);
+
+       return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
+}
+
+static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
+{
+       u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
+
+       if (vcpu_mode_is_32bit(vcpu))
+               return mode > COMPAT_PSR_MODE_USR;
+
+       return mode != PSR_MODE_EL0t;
+}
+
+static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.fault.esr_el2;
+}
+
+static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.fault.far_el2;
+}
+
+static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
+{
+       return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
+}
+
+static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
+{
+       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV);
+}
+
+static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+{
+       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR);
+}
+
+static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
+{
+       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE);
+}
+
+static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
+{
+       return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT;
+}
+
+static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
+{
+       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA);
+}
+
+static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
+{
+       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW);
+}
+
+static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
+{
+       return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT);
+}
+
+/* This one is not specific to Data Abort */
+static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
+{
+       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL);
+}
+
+static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT;
+}
+
+static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
+}
+
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+       return vcpu_sys_reg(vcpu, MPIDR_EL1);
+}
+
+static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+{
+       if (vcpu_mode_is_32bit(vcpu))
+               *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
+       else
+               vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
+}
+
+static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+{
+       if (vcpu_mode_is_32bit(vcpu))
+               return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
+
+       return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
+}
+
+static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
+                                                   unsigned long data,
+                                                   unsigned int len)
+{
+       if (kvm_vcpu_is_be(vcpu)) {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return be16_to_cpu(data & 0xffff);
+               case 4:
+                       return be32_to_cpu(data & 0xffffffff);
+               default:
+                       return be64_to_cpu(data);
+               }
+       } else {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return le16_to_cpu(data & 0xffff);
+               case 4:
+                       return le32_to_cpu(data & 0xffffffff);
+               default:
+                       return le64_to_cpu(data);
+               }
+       }
+
+       return data;            /* Leave LE untouched */
+}
+
+static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+                                                   unsigned long data,
+                                                   unsigned int len)
+{
+       if (kvm_vcpu_is_be(vcpu)) {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return cpu_to_be16(data & 0xffff);
+               case 4:
+                       return cpu_to_be32(data & 0xffffffff);
+               default:
+                       return cpu_to_be64(data);
+               }
+       } else {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return cpu_to_le16(data & 0xffff);
+               case 4:
+                       return cpu_to_le32(data & 0xffffffff);
+               default:
+                       return cpu_to_le64(data);
+               }
+       }
+
+       return data;            /* Leave LE untouched */
+}
+
+#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
new file mode 100644 (file)
index 0000000..bcde419
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/include/asm/kvm_host.h:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_HOST_H__
+#define __ARM64_KVM_HOST_H__
+
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmio.h>
+
+#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
+#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
+#else
+#define KVM_MAX_VCPUS 0
+#endif
+
+#define KVM_USER_MEM_SLOTS 32
+#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+#include <kvm/arm_vgic.h>
+#include <kvm/arm_arch_timer.h>
+
+#define KVM_VCPU_MAX_FEATURES 3
+
+int __attribute_const__ kvm_target_cpu(void);
+int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+int kvm_arch_dev_ioctl_check_extension(long ext);
+
+struct kvm_arch {
+       /* The VMID generation used for the virt. memory system */
+       u64    vmid_gen;
+       u32    vmid;
+
+       /* 1-level 2nd stage table and lock */
+       spinlock_t pgd_lock;
+       pgd_t *pgd;
+
+       /* VTTBR value associated with above pgd and vmid */
+       u64    vttbr;
+
+       /* Interrupt controller */
+       struct vgic_dist        vgic;
+
+       /* Timer */
+       struct arch_timer_kvm   timer;
+};
+
+#define KVM_NR_MEM_OBJS     40
+
+/*
+ * We don't want allocation failures within the mmu code, so we preallocate
+ * enough memory for a single page fault in a cache.
+ */
+struct kvm_mmu_memory_cache {
+       int nobjs;
+       void *objects[KVM_NR_MEM_OBJS];
+};
+
+struct kvm_vcpu_fault_info {
+       u32 esr_el2;            /* Hyp Syndrom Register */
+       u64 far_el2;            /* Hyp Fault Address Register */
+       u64 hpfar_el2;          /* Hyp IPA Fault Address Register */
+};
+
+struct kvm_cpu_context {
+       struct kvm_regs gp_regs;
+       union {
+               u64 sys_regs[NR_SYS_REGS];
+               u32 copro[NR_COPRO_REGS];
+       };
+};
+
+typedef struct kvm_cpu_context kvm_cpu_context_t;
+
+struct kvm_vcpu_arch {
+       struct kvm_cpu_context ctxt;
+
+       /* HYP configuration */
+       u64 hcr_el2;
+
+       /* Exception Information */
+       struct kvm_vcpu_fault_info fault;
+
+       /* Debug state */
+       u64 debug_flags;
+
+       /* Pointer to host CPU context */
+       kvm_cpu_context_t *host_cpu_context;
+
+       /* VGIC state */
+       struct vgic_cpu vgic_cpu;
+       struct arch_timer_cpu timer_cpu;
+
+       /*
+        * Anything that is not used directly from assembly code goes
+        * here.
+        */
+       /* dcache set/way operation pending */
+       int last_pcpu;
+       cpumask_t require_dcache_flush;
+
+       /* Don't run the guest */
+       bool pause;
+
+       /* IO related fields */
+       struct kvm_decode mmio_decode;
+
+       /* Interrupt related fields */
+       u64 irq_lines;          /* IRQ and FIQ levels */
+
+       /* Cache some mmu pages needed inside spinlock regions */
+       struct kvm_mmu_memory_cache mmu_page_cache;
+
+       /* Target CPU and feature flags */
+       int target;
+       DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
+
+       /* Detect first run of a vcpu */
+       bool has_run_once;
+};
+
+#define vcpu_gp_regs(v)                (&(v)->arch.ctxt.gp_regs)
+#define vcpu_sys_reg(v,r)      ((v)->arch.ctxt.sys_regs[(r)])
+/*
+ * CP14 and CP15 live in the same array, as they are backed by the
+ * same system registers.
+ */
+#define vcpu_cp14(v,r)         ((v)->arch.ctxt.copro[(r)])
+#define vcpu_cp15(v,r)         ((v)->arch.ctxt.copro[(r)])
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r))
+#define vcpu_cp15_64_low(v,r)  vcpu_cp15((v),(r) + 1)
+#else
+#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1)
+#define vcpu_cp15_64_low(v,r)  vcpu_cp15((v),(r))
+#endif
+
+struct kvm_vm_stat {
+       u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+       u32 halt_wakeup;
+};
+
+int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+                       const struct kvm_vcpu_init *init);
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_unmap_hva_range(struct kvm *kvm,
+                       unsigned long start, unsigned long end);
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+/* We do not have shadow page tables, hence the empty hooks */
+static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       return 0;
+}
+
+static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       return 0;
+}
+
+struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
+
+u64 kvm_call_hyp(void *hypfn, ...);
+
+int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+               int exception_index);
+
+int kvm_perf_init(void);
+int kvm_perf_teardown(void);
+
+static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
+                                      phys_addr_t pgd_ptr,
+                                      unsigned long hyp_stack_ptr,
+                                      unsigned long vector_ptr)
+{
+       /*
+        * Call initialization code, and switch to the full blown
+        * HYP code.
+        */
+       kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
+                    hyp_stack_ptr, vector_ptr);
+}
+
+struct vgic_sr_vectors {
+       void    *save_vgic;
+       void    *restore_vgic;
+};
+
+static inline void vgic_arch_setup(const struct vgic_params *vgic)
+{
+       extern struct vgic_sr_vectors __vgic_sr_vectors;
+
+       switch(vgic->type)
+       {
+       case VGIC_V2:
+               __vgic_sr_vectors.save_vgic     = __save_vgic_v2_state;
+               __vgic_sr_vectors.restore_vgic  = __restore_vgic_v2_state;
+               break;
+
+#ifdef CONFIG_ARM_GIC_V3
+       case VGIC_V3:
+               __vgic_sr_vectors.save_vgic     = __save_vgic_v3_state;
+               __vgic_sr_vectors.restore_vgic  = __restore_vgic_v3_state;
+               break;
+#endif
+
+       default:
+               BUG();
+       }
+}
+
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+
+#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
new file mode 100644 (file)
index 0000000..fc2f689
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_MMIO_H__
+#define __ARM64_KVM_MMIO_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+/*
+ * This is annoying. The mmio code requires this, even if we don't
+ * need any decoding. To be fixed.
+ */
+struct kvm_decode {
+       unsigned long rt;
+       bool sign_extend;
+};
+
+/*
+ * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
+ * which is an anonymous type. Use our own type instead.
+ */
+struct kvm_exit_mmio {
+       phys_addr_t     phys_addr;
+       u8              data[8];
+       u32             len;
+       bool            is_write;
+};
+
+static inline void kvm_prepare_mmio(struct kvm_run *run,
+                                   struct kvm_exit_mmio *mmio)
+{
+       run->mmio.phys_addr     = mmio->phys_addr;
+       run->mmio.len           = mmio->len;
+       run->mmio.is_write      = mmio->is_write;
+       memcpy(run->mmio.data, mmio->data, mmio->len);
+       run->exit_reason        = KVM_EXIT_MMIO;
+}
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                phys_addr_t fault_ipa);
+
+#endif /* __ARM64_KVM_MMIO_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
new file mode 100644 (file)
index 0000000..a030d16
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_MMU_H__
+#define __ARM64_KVM_MMU_H__
+
+#include <asm/page.h>
+#include <asm/memory.h>
+
+/*
+ * As we only have the TTBR0_EL2 register, we cannot express
+ * "negative" addresses. This makes it impossible to directly share
+ * mappings with the kernel.
+ *
+ * Instead, give the HYP mode its own VA region at a fixed offset from
+ * the kernel by just masking the top bits (which are all ones for a
+ * kernel address).
+ */
+#define HYP_PAGE_OFFSET_SHIFT  VA_BITS
+#define HYP_PAGE_OFFSET_MASK   ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
+#define HYP_PAGE_OFFSET                (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
+
+/*
+ * Our virtual mapping for the idmap-ed MMU-enable code. Must be
+ * shared across all the page-tables. Conveniently, we use the last
+ * possible page, where no kernel mapping will ever exist.
+ */
+#define TRAMPOLINE_VA          (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
+
+#ifdef __ASSEMBLY__
+
+/*
+ * Convert a kernel VA into a HYP VA.
+ * reg: VA to be converted.
+ */
+.macro kern_hyp_va     reg
+       and     \reg, \reg, #HYP_PAGE_OFFSET_MASK
+.endm
+
+#else
+
+#include <asm/cachetype.h>
+#include <asm/cacheflush.h>
+
+#define KERN_TO_HYP(kva)       ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
+
+/*
+ * We currently only support a 40bit IPA.
+ */
+#define KVM_PHYS_SHIFT (40)
+#define KVM_PHYS_SIZE  (1UL << KVM_PHYS_SHIFT)
+#define KVM_PHYS_MASK  (KVM_PHYS_SIZE - 1UL)
+
+/* Make sure we get the right size, and thus the right alignment */
+#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
+#define S2_PGD_ORDER   get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+
+int create_hyp_mappings(void *from, void *to);
+int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+void free_boot_hyp_pgd(void);
+void free_hyp_pgds(void);
+
+int kvm_alloc_stage2_pgd(struct kvm *kvm);
+void kvm_free_stage2_pgd(struct kvm *kvm);
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+                         phys_addr_t pa, unsigned long size);
+
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+
+phys_addr_t kvm_mmu_get_httbr(void);
+phys_addr_t kvm_mmu_get_boot_httbr(void);
+phys_addr_t kvm_get_idmap_vector(void);
+int kvm_mmu_init(void);
+void kvm_clear_hyp_idmap(void);
+
+#define        kvm_set_pte(ptep, pte)          set_pte(ptep, pte)
+#define        kvm_set_pmd(pmdp, pmd)          set_pmd(pmdp, pmd)
+
+static inline void kvm_clean_pgd(pgd_t *pgd) {}
+static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
+static inline void kvm_clean_pte(pte_t *pte) {}
+static inline void kvm_clean_pte_entry(pte_t *pte) {}
+
+static inline void kvm_set_s2pte_writable(pte_t *pte)
+{
+       pte_val(*pte) |= PTE_S2_RDWR;
+}
+
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+       pmd_val(*pmd) |= PMD_S2_RDWR;
+}
+
+#define kvm_pgd_addr_end(addr, end)    pgd_addr_end(addr, end)
+#define kvm_pud_addr_end(addr, end)    pud_addr_end(addr, end)
+#define kvm_pmd_addr_end(addr, end)    pmd_addr_end(addr, end)
+
+static inline bool kvm_page_empty(void *ptr)
+{
+       struct page *ptr_page = virt_to_page(ptr);
+       return page_count(ptr_page) == 1;
+}
+
+#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
+#ifndef CONFIG_ARM64_64K_PAGES
+#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
+#else
+#define kvm_pmd_table_empty(pmdp) (0)
+#endif
+#define kvm_pud_table_empty(pudp) (0)
+
+
+struct kvm;
+
+#define kvm_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
+
+static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+{
+       return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
+}
+
+static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
+                                            unsigned long size)
+{
+       if (!vcpu_has_cache_enabled(vcpu))
+               kvm_flush_dcache_to_poc((void *)hva, size);
+
+       if (!icache_is_aliasing()) {            /* PIPT */
+               flush_icache_range(hva, hva + size);
+       } else if (!icache_is_aivivt()) {       /* non ASID-tagged VIVT */
+               /* any kind of VIPT cache */
+               __flush_icache_all();
+       }
+}
+
+#define kvm_virt_to_phys(x)            __virt_to_phys((unsigned long)(x))
+
+void stage2_flush_vm(struct kvm *kvm);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h
new file mode 100644 (file)
index 0000000..bc39e55
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_PSCI_H__
+#define __ARM64_KVM_PSCI_H__
+
+#define KVM_ARM_PSCI_0_1       1
+#define KVM_ARM_PSCI_0_2       2
+
+int kvm_psci_version(struct kvm_vcpu *vcpu);
+int kvm_psci_call(struct kvm_vcpu *vcpu);
+
+#endif /* __ARM64_KVM_PSCI_H__ */
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
new file mode 100644 (file)
index 0000000..13ce4cc
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * linux/arch/arm64/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+
+#define cpu_has_neon()         (1)
+
+#define kernel_neon_begin()    kernel_neon_begin_partial(32)
+
+void kernel_neon_begin_partial(u32 num_regs);
+void kernel_neon_end(void);
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
new file mode 100644 (file)
index 0000000..fd189a5
--- /dev/null
@@ -0,0 +1,231 @@
+/*
+ *  Copied from arch/arm/include/asm/opcodes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_OPCODES_H
+#define __ASM_ARM_OPCODES_H
+
+#ifndef __ASSEMBLY__
+#include <linux/linkage.h>
+extern asmlinkage unsigned int arm_check_condition(u32 opcode, u64 psr);
+#endif
+
+#define ARM_OPCODE_CONDTEST_FAIL   0
+#define ARM_OPCODE_CONDTEST_PASS   1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
+
+/*
+ * Assembler opcode byteswap helpers.
+ * These are only intended for use by this header: don't use them directly,
+ * because they will be suboptimal in most cases.
+ */
+#define ___asm_opcode_swab32(x) (      \
+         (((x) << 24) & 0xFF000000)    \
+       | (((x) <<  8) & 0x00FF0000)    \
+       | (((x) >>  8) & 0x0000FF00)    \
+       | (((x) >> 24) & 0x000000FF)    \
+)
+#define ___asm_opcode_swab16(x) (      \
+         (((x) << 8) & 0xFF00)         \
+       | (((x) >> 8) & 0x00FF)         \
+)
+#define ___asm_opcode_swahb32(x) (     \
+         (((x) << 8) & 0xFF00FF00)     \
+       | (((x) >> 8) & 0x00FF00FF)     \
+)
+#define ___asm_opcode_swahw32(x) (     \
+         (((x) << 16) & 0xFFFF0000)    \
+       | (((x) >> 16) & 0x0000FFFF)    \
+)
+#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF)
+#define ___asm_opcode_identity16(x) ((x) & 0xFFFF)
+
+
+/*
+ * Opcode byteswap helpers
+ *
+ * These macros help with converting instructions between a canonical integer
+ * format and in-memory representation, in an endianness-agnostic manner.
+ *
+ * __mem_to_opcode_*() convert from in-memory representation to canonical form.
+ * __opcode_to_mem_*() convert from canonical form to in-memory representation.
+ *
+ *
+ * Canonical instruction representation:
+ *
+ *     ARM:            0xKKLLMMNN
+ *     Thumb 16-bit:   0x0000KKLL, where KK < 0xE8
+ *     Thumb 32-bit:   0xKKLLMMNN, where KK >= 0xE8
+ *
+ * There is no way to distinguish an ARM instruction in canonical representation
+ * from a Thumb instruction (just as these cannot be distinguished in memory).
+ * Where this distinction is important, it needs to be tracked separately.
+ *
+ * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not
+ * represent any valid Thumb-2 instruction.  For this range,
+ * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false.
+ *
+ * The ___asm variants are intended only for use by this header, in situations
+ * involving inline assembler.  For .S files, the normal __opcode_*() macros
+ * should do the right thing.
+ */
+#ifdef __ASSEMBLY__
+
+#define ___opcode_swab32(x) ___asm_opcode_swab32(x)
+#define ___opcode_swab16(x) ___asm_opcode_swab16(x)
+#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x)
+#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x)
+#define ___opcode_identity32(x) ___asm_opcode_identity32(x)
+#define ___opcode_identity16(x) ___asm_opcode_identity16(x)
+
+#else /* ! __ASSEMBLY__ */
+
+#include <linux/types.h>
+#include <linux/swab.h>
+
+#define ___opcode_swab32(x) swab32(x)
+#define ___opcode_swab16(x) swab16(x)
+#define ___opcode_swahb32(x) swahb32(x)
+#define ___opcode_swahw32(x) swahw32(x)
+#define ___opcode_identity32(x) ((u32)(x))
+#define ___opcode_identity16(x) ((u16)(x))
+
+#endif /* ! __ASSEMBLY__ */
+
+
+#ifdef CONFIG_CPU_ENDIAN_BE8
+
+#define __opcode_to_mem_arm(x) ___opcode_swab32(x)
+#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x)
+#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x)
+#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x)
+#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x)
+#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x)
+
+#else /* ! CONFIG_CPU_ENDIAN_BE8 */
+
+#define __opcode_to_mem_arm(x) ___opcode_identity32(x)
+#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x)
+#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x)
+#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x)
+#ifndef CONFIG_CPU_ENDIAN_BE32
+/*
+ * On BE32 systems, using 32-bit accesses to store Thumb instructions will not
+ * work in all cases, due to alignment constraints.  For now, a correct
+ * version is not provided for BE32.
+ */
+#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x)
+#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x)
+#endif
+
+#endif /* ! CONFIG_CPU_ENDIAN_BE8 */
+
+#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x)
+#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x)
+#ifndef CONFIG_CPU_ENDIAN_BE32
+#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x)
+#endif
+
+/* Operations specific to Thumb opcodes */
+
+/* Instruction size checks: */
+#define __opcode_is_thumb32(x) (               \
+          ((x) & 0xF8000000) == 0xE8000000     \
+       || ((x) & 0xF0000000) == 0xF0000000     \
+)
+#define __opcode_is_thumb16(x) (                                       \
+          ((x) & 0xFFFF0000) == 0                                      \
+       && !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000)      \
+)
+
+/* Operations to construct or split 32-bit Thumb instructions: */
+#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16))
+#define __opcode_thumb32_second(x) (___opcode_identity16(x))
+#define __opcode_thumb32_compose(first, second) (                      \
+         (___opcode_identity32(___opcode_identity16(first)) << 16)     \
+       | ___opcode_identity32(___opcode_identity16(second))            \
+)
+#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16))
+#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x))
+#define ___asm_opcode_thumb32_compose(first, second) (                     \
+         (___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \
+       | ___asm_opcode_identity32(___asm_opcode_identity16(second))        \
+)
+
+/*
+ * Opcode injection helpers
+ *
+ * In rare cases it is necessary to assemble an opcode which the
+ * assembler does not support directly, or which would normally be
+ * rejected because of the CFLAGS or AFLAGS used to build the affected
+ * file.
+ *
+ * Before using these macros, consider carefully whether it is feasible
+ * instead to change the build flags for your file, or whether it really
+ * makes sense to support old assembler versions when building that
+ * particular kernel feature.
+ *
+ * The macros defined here should only be used where there is no viable
+ * alternative.
+ *
+ *
+ * __inst_arm(x): emit the specified ARM opcode
+ * __inst_thumb16(x): emit the specified 16-bit Thumb opcode
+ * __inst_thumb32(x): emit the specified 32-bit Thumb opcode
+ *
+ * __inst_arm_thumb16(arm, thumb): emit either the specified arm or
+ *     16-bit Thumb opcode, depending on whether an ARM or Thumb-2
+ *     kernel is being built
+ *
+ * __inst_arm_thumb32(arm, thumb): emit either the specified arm or
+ *     32-bit Thumb opcode, depending on whether an ARM or Thumb-2
+ *     kernel is being built
+ *
+ *
+ * Note that using these macros directly is poor practice.  Instead, you
+ * should use them to define human-readable wrapper macros to encode the
+ * instructions that you care about.  In code which might run on ARMv7 or
+ * above, you can usually use the __inst_arm_thumb{16,32} macros to
+ * specify the ARM and Thumb alternatives at the same time.  This ensures
+ * that the correct opcode gets emitted depending on the instruction set
+ * used for the kernel build.
+ *
+ * Look at opcodes-virt.h for an example of how to use these macros.
+ */
+#include <linux/stringify.h>
+
+#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x))
+#define __inst_thumb32(x) ___inst_thumb32(                             \
+       ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)),   \
+       ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x))   \
+)
+#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x))
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \
+       __inst_thumb16(thumb_opcode)
+#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \
+       __inst_thumb32(thumb_opcode)
+#else
+#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
+#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
+#endif
+
+/* Helpers for the helpers.  Don't use these directly. */
+#ifdef __ASSEMBLY__
+#define ___inst_arm(x) .long x
+#define ___inst_thumb16(x) .short x
+#define ___inst_thumb32(first, second) .short first, second
+#else
+#define ___inst_arm(x) ".long " __stringify(x) "\n\t"
+#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t"
+#define ___inst_thumb32(first, second) \
+       ".short " __stringify(first) ", " __stringify(second) "\n\t"
+#endif
+
+#endif /* __ASM_ARM_OPCODES_H */
index 2e9d83673ef623afe4a08d58805f05b5f47f2641..f7af66b54cb216931f23718cc75754d269acefc4 100644 (file)
@@ -85,6 +85,8 @@
 #define PTE_S2_RDONLY          (_AT(pteval_t, 1) << 6)   /* HAP[2:1] */
 #define PTE_S2_RDWR            (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
 
+#define PMD_S2_RDWR            (_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
+
 /*
  * Memory Attribute override for Stage-2 (MemAttr[3:0])
  */
 #define PTE_HYP                        PTE_USER
 
 /*
- * 40-bit physical address supported.
+ * Highest possible physical address supported.
  */
-#define PHYS_MASK_SHIFT                (40)
+#define PHYS_MASK_SHIFT                (48)
 #define PHYS_MASK              ((UL(1) << PHYS_MASK_SHIFT) - 1)
 
 /*
 #define TCR_SHARED             ((UL(3) << 12) | (UL(3) << 28))
 #define TCR_TG0_64K            (UL(1) << 14)
 #define TCR_TG1_64K            (UL(1) << 30)
-#define TCR_IPS_40BIT          (UL(2) << 32)
 #define TCR_ASID16             (UL(1) << 36)
 #define TCR_TBI0               (UL(1) << 37)
 
index 45b20cd6cbca3ff1bc33df88f46858be36df8e15..001b764fffa16ca75063a1076cabbf7e4bac79a6 100644 (file)
@@ -136,8 +136,8 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev,
 #define task_pt_regs(p) \
        ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 
-#define KSTK_EIP(tsk)  task_pt_regs(tsk)->pc
-#define KSTK_ESP(tsk)  task_pt_regs(tsk)->sp
+#define KSTK_EIP(tsk)  ((unsigned long)task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk)  user_stack_pointer(task_pt_regs(tsk))
 
 /*
  * Prefetching support
index a429b5940be2e614a8149fcc7c7acac9d2b4fad0..c29d88526405f77683765ed8b11d02f911e2e4fd 100644 (file)
 #define COMPAT_PT_TEXT_ADDR            0x10000
 #define COMPAT_PT_DATA_ADDR            0x10004
 #define COMPAT_PT_TEXT_END_ADDR                0x10008
+
+/*
+ * used to skip a system call when tracer changes its number to -1
+ * with ptrace(PTRACE_SET_SYSCALL)
+ */
+#define RET_SKIP_SYSCALL       -1
+#define RET_SKIP_SYSCALL_TRACE -2
+#define IS_SKIP_SYSCALL(no)    ((int)(no & 0xffffffff) == -1)
+
 #ifndef __ASSEMBLY__
 
 /* sizeof(struct user) for AArch32 */
@@ -133,7 +142,7 @@ struct pt_regs {
        (!((regs)->pstate & PSR_F_BIT))
 
 #define user_stack_pointer(regs) \
-       (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp)
+       (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
 
 static inline unsigned long regs_return_value(struct pt_regs *regs)
 {
@@ -178,5 +187,13 @@ extern unsigned long profile_pc(struct pt_regs *regs);
 #define profile_pc(regs) instruction_pointer(regs)
 #endif
 
+/*
+ * True if instr is a 32-bit thumb instruction. This works if instr
+ * is the first or only half-word of a thumb instruction. It also works
+ * when instr holds all 32-bits of a wide thumb instruction if stored
+ * in the form (first_half<<16)|(second_half)
+ */
+#define is_wide_instruction(instr)     ((unsigned)(instr) >= 0xe800)
+
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h
new file mode 100644 (file)
index 0000000..bec3a43
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * arch/arm64/include/asm/seccomp.h
+ *
+ * Copyright (C) 2014 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi <at> linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#ifdef CONFIG_COMPAT
+#define __NR_seccomp_read_32           __NR_compat_read
+#define __NR_seccomp_write_32          __NR_compat_write
+#define __NR_seccomp_exit_32           __NR_compat_exit
+#define __NR_seccomp_sigreturn_32      __NR_compat_rt_sigreturn
+#endif /* CONFIG_COMPAT */
+
+#include <asm-generic/seccomp.h>
+
+#endif /* _ASM_SECCOMP_H */
index 383771eb0b87b5c540b49bf0553ff99e4ff907b8..709a574468f01a7fab6f20683310aee5f1689e3c 100644 (file)
@@ -16,6 +16,8 @@
 #ifndef __ASM_SYSCALL_H
 #define __ASM_SYSCALL_H
 
+#include <uapi/linux/audit.h>
+#include <linux/compat.h>
 #include <linux/err.h>
 
 extern const void *sys_call_table[];
@@ -105,4 +107,16 @@ static inline void syscall_set_arguments(struct task_struct *task,
        memcpy(&regs->regs[i], args, n * sizeof(args[0]));
 }
 
+/*
+ * We don't care about endianness (__AUDIT_ARCH_LE bit) here because
+ * AArch64 has the same system calls both on little- and big- endian.
+ */
+static inline int syscall_get_arch(void)
+{
+       if (is_compat_task())
+               return AUDIT_ARCH_ARM;
+
+       return AUDIT_ARCH_AARCH64;
+}
+
 #endif /* __ASM_SYSCALL_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
new file mode 100644 (file)
index 0000000..5c89df0
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Macros for accessing system registers with older binutils.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_SYSREG_H
+#define __ASM_SYSREG_H
+
+#define sys_reg(op0, op1, crn, crm, op2) \
+       ((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+
+#ifdef __ASSEMBLY__
+
+       .irp    num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
+       .equ    __reg_num_x\num, \num
+       .endr
+       .equ    __reg_num_xzr, 31
+
+       .macro  mrs_s, rt, sreg
+       .inst   0xd5300000|(\sreg)|(__reg_num_\rt)
+       .endm
+
+       .macro  msr_s, sreg, rt
+       .inst   0xd5100000|(\sreg)|(__reg_num_\rt)
+       .endm
+
+#else
+
+asm(
+"      .irp    num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
+"      .equ    __reg_num_x\\num, \\num\n"
+"      .endr\n"
+"      .equ    __reg_num_xzr, 31\n"
+"\n"
+"      .macro  mrs_s, rt, sreg\n"
+"      .inst   0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
+"      .endm\n"
+"\n"
+"      .macro  msr_s, sreg, rt\n"
+"      .inst   0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
+"      .endm\n"
+);
+
+#endif
+
+#endif /* __ASM_SYSREG_H */
index 59f151f8241d599724d0317dad48fc3437c76080..205d81b170235f3583856fc248405e66cbc90b5b 100644 (file)
@@ -109,6 +109,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_SIGPENDING         0
 #define TIF_NEED_RESCHED       1
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
+#define TIF_FOREIGN_FPSTATE    3       /* CPU's FP state is not current's */
 #define TIF_SYSCALL_TRACE      8
 #define TIF_SYSCALL_AUDIT      9
 #define TIF_SYSCALL_TRACEPOINT 10
@@ -128,10 +129,15 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT        (1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
+#define _TIF_FOREIGN_FPSTATE   (1 << TIF_FOREIGN_FPSTATE)
+#define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_TRACEPOINT        (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SECCOMP           (1 << TIF_SECCOMP)
 #define _TIF_32BIT             (1 << TIF_32BIT)
 
 #define _TIF_WORK_MASK         (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-                                _TIF_NOTIFY_RESUME)
+                                _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
 
 #define _TIF_SYSCALL_WORK      (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
                                 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
index 10ca8ff93cc25ca99a00b0bd3ed6993daea633a1..75f51eadb9eb01bcfbc4e98fc19c5f5e5fdac4d3 100644 (file)
 #ifndef __ASM_TRAP_H
 #define __ASM_TRAP_H
 
+#include <linux/list.h>
+
+struct undef_hook {
+       struct list_head node;
+       u32 instr_mask;
+       u32 instr_val;
+       u32 pstate_mask;
+       u32 pstate_val;
+       int (*fn)(struct pt_regs *regs, unsigned int instr);
+};
+
+void register_undef_hook(struct undef_hook *hook);
+
 static inline int in_exception_text(unsigned long ptr)
 {
        extern char __exception_text_start[];
index c335479c26381ccb32370a5e01c25471717ba653..f67d0ec20f946987ef2b8c025b2936c43e3c169f 100644 (file)
 #define __ARCH_WANT_COMPAT_SYS_SENDFILE
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_VFORK
+
+/*
+ * Compat syscall numbers used by the AArch64 kernel.
+ */
+#define __NR_compat_restart_syscall    0
+#define __NR_compat_exit               1
+#define __NR_compat_read               3
+#define __NR_compat_write              4
+#define __NR_compat_sigreturn          119
+#define __NR_compat_rt_sigreturn       173
+
+/*
+ * The following SVCs are ARM private.
+ */
+#define __ARM_NR_COMPAT_BASE           0x0f0000
+#define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
+#define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
+
+#define __NR_compat_syscalls           384
 #endif
+
 #define __ARCH_WANT_SYS_CLONE
 #include <uapi/asm/unistd.h>
 
index 58125bf008d3e647e5c89e6ba534827f0a0d5629..76d094565090582ebfb35527db95786dc3019fcf 100644 (file)
 #define __SYSCALL(x, y)
 #endif
 
-__SYSCALL(0,   sys_restart_syscall)
-__SYSCALL(1,   sys_exit)
-__SYSCALL(2,   sys_fork)
-__SYSCALL(3,   sys_read)
-__SYSCALL(4,   sys_write)
-__SYSCALL(5,   compat_sys_open)
-__SYSCALL(6,   sys_close)
-__SYSCALL(7,   sys_ni_syscall)                 /* 7 was sys_waitpid */
-__SYSCALL(8,   sys_creat)
-__SYSCALL(9,   sys_link)
-__SYSCALL(10,  sys_unlink)
-__SYSCALL(11,  compat_sys_execve)
-__SYSCALL(12,  sys_chdir)
-__SYSCALL(13,  sys_ni_syscall)                 /* 13 was sys_time */
-__SYSCALL(14,  sys_mknod)
-__SYSCALL(15,  sys_chmod)
-__SYSCALL(16,  sys_lchown16)
-__SYSCALL(17,  sys_ni_syscall)                 /* 17 was sys_break */
-__SYSCALL(18,  sys_ni_syscall)                 /* 18 was sys_stat */
-__SYSCALL(19,  compat_sys_lseek)
-__SYSCALL(20,  sys_getpid)
-__SYSCALL(21,  compat_sys_mount)
-__SYSCALL(22,  sys_ni_syscall)                 /* 22 was sys_umount */
-__SYSCALL(23,  sys_setuid16)
-__SYSCALL(24,  sys_getuid16)
-__SYSCALL(25,  sys_ni_syscall)                 /* 25 was sys_stime */
-__SYSCALL(26,  compat_sys_ptrace)
-__SYSCALL(27,  sys_ni_syscall)                 /* 27 was sys_alarm */
-__SYSCALL(28,  sys_ni_syscall)                 /* 28 was sys_fstat */
-__SYSCALL(29,  sys_pause)
-__SYSCALL(30,  sys_ni_syscall)                 /* 30 was sys_utime */
-__SYSCALL(31,  sys_ni_syscall)                 /* 31 was sys_stty */
-__SYSCALL(32,  sys_ni_syscall)                 /* 32 was sys_gtty */
-__SYSCALL(33,  sys_access)
-__SYSCALL(34,  sys_nice)
-__SYSCALL(35,  sys_ni_syscall)                 /* 35 was sys_ftime */
-__SYSCALL(36,  sys_sync)
-__SYSCALL(37,  sys_kill)
-__SYSCALL(38,  sys_rename)
-__SYSCALL(39,  sys_mkdir)
-__SYSCALL(40,  sys_rmdir)
-__SYSCALL(41,  sys_dup)
-__SYSCALL(42,  sys_pipe)
-__SYSCALL(43,  compat_sys_times)
-__SYSCALL(44,  sys_ni_syscall)                 /* 44 was sys_prof */
-__SYSCALL(45,  sys_brk)
-__SYSCALL(46,  sys_setgid16)
-__SYSCALL(47,  sys_getgid16)
-__SYSCALL(48,  sys_ni_syscall)                 /* 48 was sys_signal */
-__SYSCALL(49,  sys_geteuid16)
-__SYSCALL(50,  sys_getegid16)
-__SYSCALL(51,  sys_acct)
-__SYSCALL(52,  sys_umount)
-__SYSCALL(53,  sys_ni_syscall)                 /* 53 was sys_lock */
-__SYSCALL(54,  compat_sys_ioctl)
-__SYSCALL(55,  compat_sys_fcntl)
-__SYSCALL(56,  sys_ni_syscall)                 /* 56 was sys_mpx */
-__SYSCALL(57,  sys_setpgid)
-__SYSCALL(58,  sys_ni_syscall)                 /* 58 was sys_ulimit */
-__SYSCALL(59,  sys_ni_syscall)                 /* 59 was sys_olduname */
-__SYSCALL(60,  sys_umask)
-__SYSCALL(61,  sys_chroot)
-__SYSCALL(62,  compat_sys_ustat)
-__SYSCALL(63,  sys_dup2)
-__SYSCALL(64,  sys_getppid)
-__SYSCALL(65,  sys_getpgrp)
-__SYSCALL(66,  sys_setsid)
-__SYSCALL(67,  compat_sys_sigaction)
-__SYSCALL(68,  sys_ni_syscall)                 /* 68 was sys_sgetmask */
-__SYSCALL(69,  sys_ni_syscall)                 /* 69 was sys_ssetmask */
-__SYSCALL(70,  sys_setreuid16)
-__SYSCALL(71,  sys_setregid16)
-__SYSCALL(72,  sys_sigsuspend)
-__SYSCALL(73,  compat_sys_sigpending)
-__SYSCALL(74,  sys_sethostname)
-__SYSCALL(75,  compat_sys_setrlimit)
-__SYSCALL(76,  sys_ni_syscall)                 /* 76 was compat_sys_getrlimit */
-__SYSCALL(77,  compat_sys_getrusage)
-__SYSCALL(78,  compat_sys_gettimeofday)
-__SYSCALL(79,  compat_sys_settimeofday)
-__SYSCALL(80,  sys_getgroups16)
-__SYSCALL(81,  sys_setgroups16)
-__SYSCALL(82,  sys_ni_syscall)                 /* 82 was compat_sys_select */
-__SYSCALL(83,  sys_symlink)
-__SYSCALL(84,  sys_ni_syscall)                 /* 84 was sys_lstat */
-__SYSCALL(85,  sys_readlink)
-__SYSCALL(86,  sys_uselib)
-__SYSCALL(87,  sys_swapon)
-__SYSCALL(88,  sys_reboot)
-__SYSCALL(89,  sys_ni_syscall)                 /* 89 was sys_readdir */
-__SYSCALL(90,  sys_ni_syscall)                 /* 90 was sys_mmap */
-__SYSCALL(91,  sys_munmap)
-__SYSCALL(92,  compat_sys_truncate)
-__SYSCALL(93,  compat_sys_ftruncate)
-__SYSCALL(94,  sys_fchmod)
-__SYSCALL(95,  sys_fchown16)
-__SYSCALL(96,  sys_getpriority)
-__SYSCALL(97,  sys_setpriority)
-__SYSCALL(98,  sys_ni_syscall)                 /* 98 was sys_profil */
-__SYSCALL(99,  compat_sys_statfs)
-__SYSCALL(100, compat_sys_fstatfs)
-__SYSCALL(101, sys_ni_syscall)                 /* 101 was sys_ioperm */
-__SYSCALL(102, sys_ni_syscall)                 /* 102 was sys_socketcall */
-__SYSCALL(103, sys_syslog)
-__SYSCALL(104, compat_sys_setitimer)
-__SYSCALL(105, compat_sys_getitimer)
-__SYSCALL(106, compat_sys_newstat)
-__SYSCALL(107, compat_sys_newlstat)
-__SYSCALL(108, compat_sys_newfstat)
-__SYSCALL(109, sys_ni_syscall)                 /* 109 was sys_uname */
-__SYSCALL(110, sys_ni_syscall)                 /* 110 was sys_iopl */
-__SYSCALL(111, sys_vhangup)
-__SYSCALL(112, sys_ni_syscall)                 /* 112 was sys_idle */
-__SYSCALL(113, sys_ni_syscall)                 /* 113 was sys_syscall */
-__SYSCALL(114, compat_sys_wait4)
-__SYSCALL(115, sys_swapoff)
-__SYSCALL(116, compat_sys_sysinfo)
-__SYSCALL(117, sys_ni_syscall)                 /* 117 was sys_ipc */
-__SYSCALL(118, sys_fsync)
-__SYSCALL(119, compat_sys_sigreturn_wrapper)
-__SYSCALL(120, sys_clone)
-__SYSCALL(121, sys_setdomainname)
-__SYSCALL(122, sys_newuname)
-__SYSCALL(123, sys_ni_syscall)                 /* 123 was sys_modify_ldt */
-__SYSCALL(124, compat_sys_adjtimex)
-__SYSCALL(125, sys_mprotect)
-__SYSCALL(126, compat_sys_sigprocmask)
-__SYSCALL(127, sys_ni_syscall)                 /* 127 was sys_create_module */
-__SYSCALL(128, sys_init_module)
-__SYSCALL(129, sys_delete_module)
-__SYSCALL(130, sys_ni_syscall)                 /* 130 was sys_get_kernel_syms */
-__SYSCALL(131, sys_quotactl)
-__SYSCALL(132, sys_getpgid)
-__SYSCALL(133, sys_fchdir)
-__SYSCALL(134, sys_bdflush)
-__SYSCALL(135, sys_sysfs)
-__SYSCALL(136, sys_personality)
-__SYSCALL(137, sys_ni_syscall)                 /* 137 was sys_afs_syscall */
-__SYSCALL(138, sys_setfsuid16)
-__SYSCALL(139, sys_setfsgid16)
-__SYSCALL(140, sys_llseek)
-__SYSCALL(141, compat_sys_getdents)
-__SYSCALL(142, compat_sys_select)
-__SYSCALL(143, sys_flock)
-__SYSCALL(144, sys_msync)
-__SYSCALL(145, compat_sys_readv)
-__SYSCALL(146, compat_sys_writev)
-__SYSCALL(147, sys_getsid)
-__SYSCALL(148, sys_fdatasync)
-__SYSCALL(149, compat_sys_sysctl)
-__SYSCALL(150, sys_mlock)
-__SYSCALL(151, sys_munlock)
-__SYSCALL(152, sys_mlockall)
-__SYSCALL(153, sys_munlockall)
-__SYSCALL(154, sys_sched_setparam)
-__SYSCALL(155, sys_sched_getparam)
-__SYSCALL(156, sys_sched_setscheduler)
-__SYSCALL(157, sys_sched_getscheduler)
-__SYSCALL(158, sys_sched_yield)
-__SYSCALL(159, sys_sched_get_priority_max)
-__SYSCALL(160, sys_sched_get_priority_min)
-__SYSCALL(161, compat_sys_sched_rr_get_interval)
-__SYSCALL(162, compat_sys_nanosleep)
-__SYSCALL(163, sys_mremap)
-__SYSCALL(164, sys_setresuid16)
-__SYSCALL(165, sys_getresuid16)
-__SYSCALL(166, sys_ni_syscall)                 /* 166 was sys_vm86 */
-__SYSCALL(167, sys_ni_syscall)                 /* 167 was sys_query_module */
-__SYSCALL(168, sys_poll)
-__SYSCALL(169, sys_ni_syscall)
-__SYSCALL(170, sys_setresgid16)
-__SYSCALL(171, sys_getresgid16)
-__SYSCALL(172, sys_prctl)
-__SYSCALL(173, compat_sys_rt_sigreturn_wrapper)
-__SYSCALL(174, compat_sys_rt_sigaction)
-__SYSCALL(175, compat_sys_rt_sigprocmask)
-__SYSCALL(176, compat_sys_rt_sigpending)
-__SYSCALL(177, compat_sys_rt_sigtimedwait)
-__SYSCALL(178, compat_sys_rt_sigqueueinfo)
-__SYSCALL(179, compat_sys_rt_sigsuspend)
-__SYSCALL(180, compat_sys_pread64_wrapper)
-__SYSCALL(181, compat_sys_pwrite64_wrapper)
-__SYSCALL(182, sys_chown16)
-__SYSCALL(183, sys_getcwd)
-__SYSCALL(184, sys_capget)
-__SYSCALL(185, sys_capset)
-__SYSCALL(186, compat_sys_sigaltstack)
-__SYSCALL(187, compat_sys_sendfile)
-__SYSCALL(188, sys_ni_syscall)                 /* 188 reserved */
-__SYSCALL(189, sys_ni_syscall)                 /* 189 reserved */
-__SYSCALL(190, sys_vfork)
-__SYSCALL(191, compat_sys_getrlimit)           /* SuS compliant getrlimit */
-__SYSCALL(192, sys_mmap_pgoff)
-__SYSCALL(193, compat_sys_truncate64_wrapper)
-__SYSCALL(194, compat_sys_ftruncate64_wrapper)
-__SYSCALL(195, sys_stat64)
-__SYSCALL(196, sys_lstat64)
-__SYSCALL(197, sys_fstat64)
-__SYSCALL(198, sys_lchown)
-__SYSCALL(199, sys_getuid)
-__SYSCALL(200, sys_getgid)
-__SYSCALL(201, sys_geteuid)
-__SYSCALL(202, sys_getegid)
-__SYSCALL(203, sys_setreuid)
-__SYSCALL(204, sys_setregid)
-__SYSCALL(205, sys_getgroups)
-__SYSCALL(206, sys_setgroups)
-__SYSCALL(207, sys_fchown)
-__SYSCALL(208, sys_setresuid)
-__SYSCALL(209, sys_getresuid)
-__SYSCALL(210, sys_setresgid)
-__SYSCALL(211, sys_getresgid)
-__SYSCALL(212, sys_chown)
-__SYSCALL(213, sys_setuid)
-__SYSCALL(214, sys_setgid)
-__SYSCALL(215, sys_setfsuid)
-__SYSCALL(216, sys_setfsgid)
-__SYSCALL(217, compat_sys_getdents64)
-__SYSCALL(218, sys_pivot_root)
-__SYSCALL(219, sys_mincore)
-__SYSCALL(220, sys_madvise)
-__SYSCALL(221, compat_sys_fcntl64)
-__SYSCALL(222, sys_ni_syscall)                 /* 222 for tux */
-__SYSCALL(223, sys_ni_syscall)                 /* 223 is unused */
-__SYSCALL(224, sys_gettid)
-__SYSCALL(225, compat_sys_readahead_wrapper)
-__SYSCALL(226, sys_setxattr)
-__SYSCALL(227, sys_lsetxattr)
-__SYSCALL(228, sys_fsetxattr)
-__SYSCALL(229, sys_getxattr)
-__SYSCALL(230, sys_lgetxattr)
-__SYSCALL(231, sys_fgetxattr)
-__SYSCALL(232, sys_listxattr)
-__SYSCALL(233, sys_llistxattr)
-__SYSCALL(234, sys_flistxattr)
-__SYSCALL(235, sys_removexattr)
-__SYSCALL(236, sys_lremovexattr)
-__SYSCALL(237, sys_fremovexattr)
-__SYSCALL(238, sys_tkill)
-__SYSCALL(239, sys_sendfile64)
-__SYSCALL(240, compat_sys_futex)
-__SYSCALL(241, compat_sys_sched_setaffinity)
-__SYSCALL(242, compat_sys_sched_getaffinity)
-__SYSCALL(243, compat_sys_io_setup)
-__SYSCALL(244, sys_io_destroy)
-__SYSCALL(245, compat_sys_io_getevents)
-__SYSCALL(246, compat_sys_io_submit)
-__SYSCALL(247, sys_io_cancel)
-__SYSCALL(248, sys_exit_group)
-__SYSCALL(249, compat_sys_lookup_dcookie)
-__SYSCALL(250, sys_epoll_create)
-__SYSCALL(251, sys_epoll_ctl)
-__SYSCALL(252, sys_epoll_wait)
-__SYSCALL(253, sys_remap_file_pages)
-__SYSCALL(254, sys_ni_syscall)                 /* 254 for set_thread_area */
-__SYSCALL(255, sys_ni_syscall)                 /* 255 for get_thread_area */
-__SYSCALL(256, sys_set_tid_address)
-__SYSCALL(257, compat_sys_timer_create)
-__SYSCALL(258, compat_sys_timer_settime)
-__SYSCALL(259, compat_sys_timer_gettime)
-__SYSCALL(260, sys_timer_getoverrun)
-__SYSCALL(261, sys_timer_delete)
-__SYSCALL(262, compat_sys_clock_settime)
-__SYSCALL(263, compat_sys_clock_gettime)
-__SYSCALL(264, compat_sys_clock_getres)
-__SYSCALL(265, compat_sys_clock_nanosleep)
-__SYSCALL(266, compat_sys_statfs64_wrapper)
-__SYSCALL(267, compat_sys_fstatfs64_wrapper)
-__SYSCALL(268, sys_tgkill)
-__SYSCALL(269, compat_sys_utimes)
-__SYSCALL(270, compat_sys_fadvise64_64_wrapper)
-__SYSCALL(271, sys_pciconfig_iobase)
-__SYSCALL(272, sys_pciconfig_read)
-__SYSCALL(273, sys_pciconfig_write)
-__SYSCALL(274, compat_sys_mq_open)
-__SYSCALL(275, sys_mq_unlink)
-__SYSCALL(276, compat_sys_mq_timedsend)
-__SYSCALL(277, compat_sys_mq_timedreceive)
-__SYSCALL(278, compat_sys_mq_notify)
-__SYSCALL(279, compat_sys_mq_getsetattr)
-__SYSCALL(280, compat_sys_waitid)
-__SYSCALL(281, sys_socket)
-__SYSCALL(282, sys_bind)
-__SYSCALL(283, sys_connect)
-__SYSCALL(284, sys_listen)
-__SYSCALL(285, sys_accept)
-__SYSCALL(286, sys_getsockname)
-__SYSCALL(287, sys_getpeername)
-__SYSCALL(288, sys_socketpair)
-__SYSCALL(289, sys_send)
-__SYSCALL(290, sys_sendto)
-__SYSCALL(291, compat_sys_recv)
-__SYSCALL(292, compat_sys_recvfrom)
-__SYSCALL(293, sys_shutdown)
-__SYSCALL(294, compat_sys_setsockopt)
-__SYSCALL(295, compat_sys_getsockopt)
-__SYSCALL(296, compat_sys_sendmsg)
-__SYSCALL(297, compat_sys_recvmsg)
-__SYSCALL(298, sys_semop)
-__SYSCALL(299, sys_semget)
-__SYSCALL(300, compat_sys_semctl)
-__SYSCALL(301, compat_sys_msgsnd)
-__SYSCALL(302, compat_sys_msgrcv)
-__SYSCALL(303, sys_msgget)
-__SYSCALL(304, compat_sys_msgctl)
-__SYSCALL(305, compat_sys_shmat)
-__SYSCALL(306, sys_shmdt)
-__SYSCALL(307, sys_shmget)
-__SYSCALL(308, compat_sys_shmctl)
-__SYSCALL(309, sys_add_key)
-__SYSCALL(310, sys_request_key)
-__SYSCALL(311, compat_sys_keyctl)
-__SYSCALL(312, compat_sys_semtimedop)
-__SYSCALL(313, sys_ni_syscall)
-__SYSCALL(314, sys_ioprio_set)
-__SYSCALL(315, sys_ioprio_get)
-__SYSCALL(316, sys_inotify_init)
-__SYSCALL(317, sys_inotify_add_watch)
-__SYSCALL(318, sys_inotify_rm_watch)
-__SYSCALL(319, compat_sys_mbind)
-__SYSCALL(320, compat_sys_get_mempolicy)
-__SYSCALL(321, compat_sys_set_mempolicy)
-__SYSCALL(322, compat_sys_openat)
-__SYSCALL(323, sys_mkdirat)
-__SYSCALL(324, sys_mknodat)
-__SYSCALL(325, sys_fchownat)
-__SYSCALL(326, compat_sys_futimesat)
-__SYSCALL(327, sys_fstatat64)
-__SYSCALL(328, sys_unlinkat)
-__SYSCALL(329, sys_renameat)
-__SYSCALL(330, sys_linkat)
-__SYSCALL(331, sys_symlinkat)
-__SYSCALL(332, sys_readlinkat)
-__SYSCALL(333, sys_fchmodat)
-__SYSCALL(334, sys_faccessat)
-__SYSCALL(335, compat_sys_pselect6)
-__SYSCALL(336, compat_sys_ppoll)
-__SYSCALL(337, sys_unshare)
-__SYSCALL(338, compat_sys_set_robust_list)
-__SYSCALL(339, compat_sys_get_robust_list)
-__SYSCALL(340, sys_splice)
-__SYSCALL(341, compat_sys_sync_file_range2_wrapper)
-__SYSCALL(342, sys_tee)
-__SYSCALL(343, compat_sys_vmsplice)
-__SYSCALL(344, compat_sys_move_pages)
-__SYSCALL(345, sys_getcpu)
-__SYSCALL(346, compat_sys_epoll_pwait)
-__SYSCALL(347, compat_sys_kexec_load)
-__SYSCALL(348, compat_sys_utimensat)
-__SYSCALL(349, compat_sys_signalfd)
-__SYSCALL(350, sys_timerfd_create)
-__SYSCALL(351, sys_eventfd)
-__SYSCALL(352, compat_sys_fallocate_wrapper)
-__SYSCALL(353, compat_sys_timerfd_settime)
-__SYSCALL(354, compat_sys_timerfd_gettime)
-__SYSCALL(355, compat_sys_signalfd4)
-__SYSCALL(356, sys_eventfd2)
-__SYSCALL(357, sys_epoll_create1)
-__SYSCALL(358, sys_dup3)
-__SYSCALL(359, sys_pipe2)
-__SYSCALL(360, sys_inotify_init1)
-__SYSCALL(361, compat_sys_preadv)
-__SYSCALL(362, compat_sys_pwritev)
-__SYSCALL(363, compat_sys_rt_tgsigqueueinfo)
-__SYSCALL(364, sys_perf_event_open)
-__SYSCALL(365, compat_sys_recvmmsg)
-__SYSCALL(366, sys_accept4)
-__SYSCALL(367, sys_fanotify_init)
-__SYSCALL(368, compat_sys_fanotify_mark)
-__SYSCALL(369, sys_prlimit64)
-__SYSCALL(370, sys_name_to_handle_at)
-__SYSCALL(371, compat_sys_open_by_handle_at)
-__SYSCALL(372, compat_sys_clock_adjtime)
-__SYSCALL(373, sys_syncfs)
-__SYSCALL(374, compat_sys_sendmmsg)
-__SYSCALL(375, sys_setns)
-__SYSCALL(376, compat_sys_process_vm_readv)
-__SYSCALL(377, compat_sys_process_vm_writev)
-__SYSCALL(378, sys_ni_syscall)                 /* 378 for kcmp */
-
-#define __NR_compat_syscalls           379
-
-/*
- * Compat syscall numbers used by the AArch64 kernel.
- */
-#define __NR_compat_restart_syscall    0
-#define __NR_compat_sigreturn          119
-#define __NR_compat_rt_sigreturn       173
-
-
-/*
- * The following SVCs are ARM private.
- */
-#define __ARM_NR_COMPAT_BASE           0x0f0000
-#define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
-#define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
+#define __NR_restart_syscall 0
+__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
+#define __NR_exit 1
+__SYSCALL(__NR_exit, sys_exit)
+#define __NR_fork 2
+__SYSCALL(__NR_fork, sys_fork)
+#define __NR_read 3
+__SYSCALL(__NR_read, sys_read)
+#define __NR_write 4
+__SYSCALL(__NR_write, sys_write)
+#define __NR_open 5
+__SYSCALL(__NR_open, compat_sys_open)
+#define __NR_close 6
+__SYSCALL(__NR_close, sys_close)
+                       /* 7 was sys_waitpid */
+__SYSCALL(7, sys_ni_syscall)
+#define __NR_creat 8
+__SYSCALL(__NR_creat, sys_creat)
+#define __NR_link 9
+__SYSCALL(__NR_link, sys_link)
+#define __NR_unlink 10
+__SYSCALL(__NR_unlink, sys_unlink)
+#define __NR_execve 11
+__SYSCALL(__NR_execve, compat_sys_execve)
+#define __NR_chdir 12
+__SYSCALL(__NR_chdir, sys_chdir)
+                       /* 13 was sys_time */
+__SYSCALL(13, sys_ni_syscall)
+#define __NR_mknod 14
+__SYSCALL(__NR_mknod, sys_mknod)
+#define __NR_chmod 15
+__SYSCALL(__NR_chmod, sys_chmod)
+#define __NR_lchown 16
+__SYSCALL(__NR_lchown, sys_lchown16)
+                       /* 17 was sys_break */
+__SYSCALL(17, sys_ni_syscall)
+                       /* 18 was sys_stat */
+__SYSCALL(18, sys_ni_syscall)
+#define __NR_lseek 19
+__SYSCALL(__NR_lseek, compat_sys_lseek)
+#define __NR_getpid 20
+__SYSCALL(__NR_getpid, sys_getpid)
+#define __NR_mount 21
+__SYSCALL(__NR_mount, compat_sys_mount)
+                       /* 22 was sys_umount */
+__SYSCALL(22, sys_ni_syscall)
+#define __NR_setuid 23
+__SYSCALL(__NR_setuid, sys_setuid16)
+#define __NR_getuid 24
+__SYSCALL(__NR_getuid, sys_getuid16)
+                       /* 25 was sys_stime */
+__SYSCALL(25, sys_ni_syscall)
+#define __NR_ptrace 26
+__SYSCALL(__NR_ptrace, compat_sys_ptrace)
+                       /* 27 was sys_alarm */
+__SYSCALL(27, sys_ni_syscall)
+                       /* 28 was sys_fstat */
+__SYSCALL(28, sys_ni_syscall)
+#define __NR_pause 29
+__SYSCALL(__NR_pause, sys_pause)
+                       /* 30 was sys_utime */
+__SYSCALL(30, sys_ni_syscall)
+                       /* 31 was sys_stty */
+__SYSCALL(31, sys_ni_syscall)
+                       /* 32 was sys_gtty */
+__SYSCALL(32, sys_ni_syscall)
+#define __NR_access 33
+__SYSCALL(__NR_access, sys_access)
+#define __NR_nice 34
+__SYSCALL(__NR_nice, sys_nice)
+                       /* 35 was sys_ftime */
+__SYSCALL(35, sys_ni_syscall)
+#define __NR_sync 36
+__SYSCALL(__NR_sync, sys_sync)
+#define __NR_kill 37
+__SYSCALL(__NR_kill, sys_kill)
+#define __NR_rename 38
+__SYSCALL(__NR_rename, sys_rename)
+#define __NR_mkdir 39
+__SYSCALL(__NR_mkdir, sys_mkdir)
+#define __NR_rmdir 40
+__SYSCALL(__NR_rmdir, sys_rmdir)
+#define __NR_dup 41
+__SYSCALL(__NR_dup, sys_dup)
+#define __NR_pipe 42
+__SYSCALL(__NR_pipe, sys_pipe)
+#define __NR_times 43
+__SYSCALL(__NR_times, compat_sys_times)
+                       /* 44 was sys_prof */
+__SYSCALL(44, sys_ni_syscall)
+#define __NR_brk 45
+__SYSCALL(__NR_brk, sys_brk)
+#define __NR_setgid 46
+__SYSCALL(__NR_setgid, sys_setgid16)
+#define __NR_getgid 47
+__SYSCALL(__NR_getgid, sys_getgid16)
+                       /* 48 was sys_signal */
+__SYSCALL(48, sys_ni_syscall)
+#define __NR_geteuid 49
+__SYSCALL(__NR_geteuid, sys_geteuid16)
+#define __NR_getegid 50
+__SYSCALL(__NR_getegid, sys_getegid16)
+#define __NR_acct 51
+__SYSCALL(__NR_acct, sys_acct)
+#define __NR_umount2 52
+__SYSCALL(__NR_umount2, sys_umount)
+                       /* 53 was sys_lock */
+__SYSCALL(53, sys_ni_syscall)
+#define __NR_ioctl 54
+__SYSCALL(__NR_ioctl, compat_sys_ioctl)
+#define __NR_fcntl 55
+__SYSCALL(__NR_fcntl, compat_sys_fcntl)
+                       /* 56 was sys_mpx */
+__SYSCALL(56, sys_ni_syscall)
+#define __NR_setpgid 57
+__SYSCALL(__NR_setpgid, sys_setpgid)
+                       /* 58 was sys_ulimit */
+__SYSCALL(58, sys_ni_syscall)
+                       /* 59 was sys_olduname */
+__SYSCALL(59, sys_ni_syscall)
+#define __NR_umask 60
+__SYSCALL(__NR_umask, sys_umask)
+#define __NR_chroot 61
+__SYSCALL(__NR_chroot, sys_chroot)
+#define __NR_ustat 62
+__SYSCALL(__NR_ustat, compat_sys_ustat)
+#define __NR_dup2 63
+__SYSCALL(__NR_dup2, sys_dup2)
+#define __NR_getppid 64
+__SYSCALL(__NR_getppid, sys_getppid)
+#define __NR_getpgrp 65
+__SYSCALL(__NR_getpgrp, sys_getpgrp)
+#define __NR_setsid 66
+__SYSCALL(__NR_setsid, sys_setsid)
+#define __NR_sigaction 67
+__SYSCALL(__NR_sigaction, compat_sys_sigaction)
+                       /* 68 was sys_sgetmask */
+__SYSCALL(68, sys_ni_syscall)
+                       /* 69 was sys_ssetmask */
+__SYSCALL(69, sys_ni_syscall)
+#define __NR_setreuid 70
+__SYSCALL(__NR_setreuid, sys_setreuid16)
+#define __NR_setregid 71
+__SYSCALL(__NR_setregid, sys_setregid16)
+#define __NR_sigsuspend 72
+__SYSCALL(__NR_sigsuspend, sys_sigsuspend)
+#define __NR_sigpending 73
+__SYSCALL(__NR_sigpending, compat_sys_sigpending)
+#define __NR_sethostname 74
+__SYSCALL(__NR_sethostname, sys_sethostname)
+#define __NR_setrlimit 75
+__SYSCALL(__NR_setrlimit, compat_sys_setrlimit)
+                       /* 76 was compat_sys_getrlimit */
+__SYSCALL(76, sys_ni_syscall)
+#define __NR_getrusage 77
+__SYSCALL(__NR_getrusage, compat_sys_getrusage)
+#define __NR_gettimeofday 78
+__SYSCALL(__NR_gettimeofday, compat_sys_gettimeofday)
+#define __NR_settimeofday 79
+__SYSCALL(__NR_settimeofday, compat_sys_settimeofday)
+#define __NR_getgroups 80
+__SYSCALL(__NR_getgroups, sys_getgroups16)
+#define __NR_setgroups 81
+__SYSCALL(__NR_setgroups, sys_setgroups16)
+                       /* 82 was compat_sys_select */
+__SYSCALL(82, sys_ni_syscall)
+#define __NR_symlink 83
+__SYSCALL(__NR_symlink, sys_symlink)
+                       /* 84 was sys_lstat */
+__SYSCALL(84, sys_ni_syscall)
+#define __NR_readlink 85
+__SYSCALL(__NR_readlink, sys_readlink)
+#define __NR_uselib 86
+__SYSCALL(__NR_uselib, sys_uselib)
+#define __NR_swapon 87
+__SYSCALL(__NR_swapon, sys_swapon)
+#define __NR_reboot 88
+__SYSCALL(__NR_reboot, sys_reboot)
+                       /* 89 was sys_readdir */
+__SYSCALL(89, sys_ni_syscall)
+                       /* 90 was sys_mmap */
+__SYSCALL(90, sys_ni_syscall)
+#define __NR_munmap 91
+__SYSCALL(__NR_munmap, sys_munmap)
+#define __NR_truncate 92
+__SYSCALL(__NR_truncate, compat_sys_truncate)
+#define __NR_ftruncate 93
+__SYSCALL(__NR_ftruncate, compat_sys_ftruncate)
+#define __NR_fchmod 94
+__SYSCALL(__NR_fchmod, sys_fchmod)
+#define __NR_fchown 95
+__SYSCALL(__NR_fchown, sys_fchown16)
+#define __NR_getpriority 96
+__SYSCALL(__NR_getpriority, sys_getpriority)
+#define __NR_setpriority 97
+__SYSCALL(__NR_setpriority, sys_setpriority)
+                       /* 98 was sys_profil */
+__SYSCALL(98, sys_ni_syscall)
+#define __NR_statfs 99
+__SYSCALL(__NR_statfs, compat_sys_statfs)
+#define __NR_fstatfs 100
+__SYSCALL(__NR_fstatfs, compat_sys_fstatfs)
+                       /* 101 was sys_ioperm */
+__SYSCALL(101, sys_ni_syscall)
+                       /* 102 was sys_socketcall */
+__SYSCALL(102, sys_ni_syscall)
+#define __NR_syslog 103
+__SYSCALL(__NR_syslog, sys_syslog)
+#define __NR_setitimer 104
+__SYSCALL(__NR_setitimer, compat_sys_setitimer)
+#define __NR_getitimer 105
+__SYSCALL(__NR_getitimer, compat_sys_getitimer)
+#define __NR_stat 106
+__SYSCALL(__NR_stat, compat_sys_newstat)
+#define __NR_lstat 107
+__SYSCALL(__NR_lstat, compat_sys_newlstat)
+#define __NR_fstat 108
+__SYSCALL(__NR_fstat, compat_sys_newfstat)
+                       /* 109 was sys_uname */
+__SYSCALL(109, sys_ni_syscall)
+                       /* 110 was sys_iopl */
+__SYSCALL(110, sys_ni_syscall)
+#define __NR_vhangup 111
+__SYSCALL(__NR_vhangup, sys_vhangup)
+                       /* 112 was sys_idle */
+__SYSCALL(112, sys_ni_syscall)
+                       /* 113 was sys_syscall */
+__SYSCALL(113, sys_ni_syscall)
+#define __NR_wait4 114
+__SYSCALL(__NR_wait4, compat_sys_wait4)
+#define __NR_swapoff 115
+__SYSCALL(__NR_swapoff, sys_swapoff)
+#define __NR_sysinfo 116
+__SYSCALL(__NR_sysinfo, compat_sys_sysinfo)
+                       /* 117 was sys_ipc */
+__SYSCALL(117, sys_ni_syscall)
+#define __NR_fsync 118
+__SYSCALL(__NR_fsync, sys_fsync)
+#define __NR_sigreturn 119
+__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper)
+#define __NR_clone 120
+__SYSCALL(__NR_clone, sys_clone)
+#define __NR_setdomainname 121
+__SYSCALL(__NR_setdomainname, sys_setdomainname)
+#define __NR_uname 122
+__SYSCALL(__NR_uname, sys_newuname)
+                       /* 123 was sys_modify_ldt */
+__SYSCALL(123, sys_ni_syscall)
+#define __NR_adjtimex 124
+__SYSCALL(__NR_adjtimex, compat_sys_adjtimex)
+#define __NR_mprotect 125
+__SYSCALL(__NR_mprotect, sys_mprotect)
+#define __NR_sigprocmask 126
+__SYSCALL(__NR_sigprocmask, compat_sys_sigprocmask)
+                       /* 127 was sys_create_module */
+__SYSCALL(127, sys_ni_syscall)
+#define __NR_init_module 128
+__SYSCALL(__NR_init_module, sys_init_module)
+#define __NR_delete_module 129
+__SYSCALL(__NR_delete_module, sys_delete_module)
+                       /* 130 was sys_get_kernel_syms */
+__SYSCALL(130, sys_ni_syscall)
+#define __NR_quotactl 131
+__SYSCALL(__NR_quotactl, sys_quotactl)
+#define __NR_getpgid 132
+__SYSCALL(__NR_getpgid, sys_getpgid)
+#define __NR_fchdir 133
+__SYSCALL(__NR_fchdir, sys_fchdir)
+#define __NR_bdflush 134
+__SYSCALL(__NR_bdflush, sys_bdflush)
+#define __NR_sysfs 135
+__SYSCALL(__NR_sysfs, sys_sysfs)
+#define __NR_personality 136
+__SYSCALL(__NR_personality, sys_personality)
+                       /* 137 was sys_afs_syscall */
+__SYSCALL(137, sys_ni_syscall)
+#define __NR_setfsuid 138
+__SYSCALL(__NR_setfsuid, sys_setfsuid16)
+#define __NR_setfsgid 139
+__SYSCALL(__NR_setfsgid, sys_setfsgid16)
+#define __NR__llseek 140
+__SYSCALL(__NR__llseek, sys_llseek)
+#define __NR_getdents 141
+__SYSCALL(__NR_getdents, compat_sys_getdents)
+#define __NR__newselect 142
+__SYSCALL(__NR__newselect, compat_sys_select)
+#define __NR_flock 143
+__SYSCALL(__NR_flock, sys_flock)
+#define __NR_msync 144
+__SYSCALL(__NR_msync, sys_msync)
+#define __NR_readv 145
+__SYSCALL(__NR_readv, compat_sys_readv)
+#define __NR_writev 146
+__SYSCALL(__NR_writev, compat_sys_writev)
+#define __NR_getsid 147
+__SYSCALL(__NR_getsid, sys_getsid)
+#define __NR_fdatasync 148
+__SYSCALL(__NR_fdatasync, sys_fdatasync)
+#define __NR__sysctl 149
+__SYSCALL(__NR__sysctl, compat_sys_sysctl)
+#define __NR_mlock 150
+__SYSCALL(__NR_mlock, sys_mlock)
+#define __NR_munlock 151
+__SYSCALL(__NR_munlock, sys_munlock)
+#define __NR_mlockall 152
+__SYSCALL(__NR_mlockall, sys_mlockall)
+#define __NR_munlockall 153
+__SYSCALL(__NR_munlockall, sys_munlockall)
+#define __NR_sched_setparam 154
+__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
+#define __NR_sched_getparam 155
+__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
+#define __NR_sched_setscheduler 156
+__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
+#define __NR_sched_getscheduler 157
+__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
+#define __NR_sched_yield 158
+__SYSCALL(__NR_sched_yield, sys_sched_yield)
+#define __NR_sched_get_priority_max 159
+__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
+#define __NR_sched_get_priority_min 160
+__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#define __NR_sched_rr_get_interval 161
+__SYSCALL(__NR_sched_rr_get_interval, compat_sys_sched_rr_get_interval)
+#define __NR_nanosleep 162
+__SYSCALL(__NR_nanosleep, compat_sys_nanosleep)
+#define __NR_mremap 163
+__SYSCALL(__NR_mremap, sys_mremap)
+#define __NR_setresuid 164
+__SYSCALL(__NR_setresuid, sys_setresuid16)
+#define __NR_getresuid 165
+__SYSCALL(__NR_getresuid, sys_getresuid16)
+                       /* 166 was sys_vm86 */
+__SYSCALL(166, sys_ni_syscall)
+                       /* 167 was sys_query_module */
+__SYSCALL(167, sys_ni_syscall)
+#define __NR_poll 168
+__SYSCALL(__NR_poll, sys_poll)
+#define __NR_nfsservctl 169
+__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
+#define __NR_setresgid 170
+__SYSCALL(__NR_setresgid, sys_setresgid16)
+#define __NR_getresgid 171
+__SYSCALL(__NR_getresgid, sys_getresgid16)
+#define __NR_prctl 172
+__SYSCALL(__NR_prctl, sys_prctl)
+#define __NR_rt_sigreturn 173
+__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper)
+#define __NR_rt_sigaction 174
+__SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction)
+#define __NR_rt_sigprocmask 175
+__SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask)
+#define __NR_rt_sigpending 176
+__SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending)
+#define __NR_rt_sigtimedwait 177
+__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait)
+#define __NR_rt_sigqueueinfo 178
+__SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
+#define __NR_rt_sigsuspend 179
+__SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend)
+#define __NR_pread64 180
+__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper)
+#define __NR_pwrite64 181
+__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper)
+#define __NR_chown 182
+__SYSCALL(__NR_chown, sys_chown16)
+#define __NR_getcwd 183
+__SYSCALL(__NR_getcwd, sys_getcwd)
+#define __NR_capget 184
+__SYSCALL(__NR_capget, sys_capget)
+#define __NR_capset 185
+__SYSCALL(__NR_capset, sys_capset)
+#define __NR_sigaltstack 186
+__SYSCALL(__NR_sigaltstack, compat_sys_sigaltstack)
+#define __NR_sendfile 187
+__SYSCALL(__NR_sendfile, compat_sys_sendfile)
+                       /* 188 reserved */
+__SYSCALL(188, sys_ni_syscall)
+                       /* 189 reserved */
+__SYSCALL(189, sys_ni_syscall)
+#define __NR_vfork 190
+__SYSCALL(__NR_vfork, sys_vfork)
+#define __NR_ugetrlimit 191    /* SuS compliant getrlimit */
+__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit)               /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+__SYSCALL(__NR_mmap2, sys_mmap_pgoff)
+#define __NR_truncate64 193
+__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper)
+#define __NR_ftruncate64 194
+__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper)
+#define __NR_stat64 195
+__SYSCALL(__NR_stat64, sys_stat64)
+#define __NR_lstat64 196
+__SYSCALL(__NR_lstat64, sys_lstat64)
+#define __NR_fstat64 197
+__SYSCALL(__NR_fstat64, sys_fstat64)
+#define __NR_lchown32 198
+__SYSCALL(__NR_lchown32, sys_lchown)
+#define __NR_getuid32 199
+__SYSCALL(__NR_getuid32, sys_getuid)
+#define __NR_getgid32 200
+__SYSCALL(__NR_getgid32, sys_getgid)
+#define __NR_geteuid32 201
+__SYSCALL(__NR_geteuid32, sys_geteuid)
+#define __NR_getegid32 202
+__SYSCALL(__NR_getegid32, sys_getegid)
+#define __NR_setreuid32 203
+__SYSCALL(__NR_setreuid32, sys_setreuid)
+#define __NR_setregid32 204
+__SYSCALL(__NR_setregid32, sys_setregid)
+#define __NR_getgroups32 205
+__SYSCALL(__NR_getgroups32, sys_getgroups)
+#define __NR_setgroups32 206
+__SYSCALL(__NR_setgroups32, sys_setgroups)
+#define __NR_fchown32 207
+__SYSCALL(__NR_fchown32, sys_fchown)
+#define __NR_setresuid32 208
+__SYSCALL(__NR_setresuid32, sys_setresuid)
+#define __NR_getresuid32 209
+__SYSCALL(__NR_getresuid32, sys_getresuid)
+#define __NR_setresgid32 210
+__SYSCALL(__NR_setresgid32, sys_setresgid)
+#define __NR_getresgid32 211
+__SYSCALL(__NR_getresgid32, sys_getresgid)
+#define __NR_chown32 212
+__SYSCALL(__NR_chown32, sys_chown)
+#define __NR_setuid32 213
+__SYSCALL(__NR_setuid32, sys_setuid)
+#define __NR_setgid32 214
+__SYSCALL(__NR_setgid32, sys_setgid)
+#define __NR_setfsuid32 215
+__SYSCALL(__NR_setfsuid32, sys_setfsuid)
+#define __NR_setfsgid32 216
+__SYSCALL(__NR_setfsgid32, sys_setfsgid)
+#define __NR_getdents64 217
+__SYSCALL(__NR_getdents64, compat_sys_getdents64)
+#define __NR_pivot_root 218
+__SYSCALL(__NR_pivot_root, sys_pivot_root)
+#define __NR_mincore 219
+__SYSCALL(__NR_mincore, sys_mincore)
+#define __NR_madvise 220
+__SYSCALL(__NR_madvise, sys_madvise)
+#define __NR_fcntl64 221
+__SYSCALL(__NR_fcntl64, compat_sys_fcntl64)
+                       /* 222 for tux */
+__SYSCALL(222, sys_ni_syscall)
+                       /* 223 is unused */
+__SYSCALL(223, sys_ni_syscall)
+#define __NR_gettid 224
+__SYSCALL(__NR_gettid, sys_gettid)
+#define __NR_readahead 225
+__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper)
+#define __NR_setxattr 226
+__SYSCALL(__NR_setxattr, sys_setxattr)
+#define __NR_lsetxattr 227
+__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
+#define __NR_fsetxattr 228
+__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
+#define __NR_getxattr 229
+__SYSCALL(__NR_getxattr, sys_getxattr)
+#define __NR_lgetxattr 230
+__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
+#define __NR_fgetxattr 231
+__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
+#define __NR_listxattr 232
+__SYSCALL(__NR_listxattr, sys_listxattr)
+#define __NR_llistxattr 233
+__SYSCALL(__NR_llistxattr, sys_llistxattr)
+#define __NR_flistxattr 234
+__SYSCALL(__NR_flistxattr, sys_flistxattr)
+#define __NR_removexattr 235
+__SYSCALL(__NR_removexattr, sys_removexattr)
+#define __NR_lremovexattr 236
+__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
+#define __NR_fremovexattr 237
+__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
+#define __NR_tkill 238
+__SYSCALL(__NR_tkill, sys_tkill)
+#define __NR_sendfile64 239
+__SYSCALL(__NR_sendfile64, sys_sendfile64)
+#define __NR_futex 240
+__SYSCALL(__NR_futex, compat_sys_futex)
+#define __NR_sched_setaffinity 241
+__SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity)
+#define __NR_sched_getaffinity 242
+__SYSCALL(__NR_sched_getaffinity, compat_sys_sched_getaffinity)
+#define __NR_io_setup 243
+__SYSCALL(__NR_io_setup, compat_sys_io_setup)
+#define __NR_io_destroy 244
+__SYSCALL(__NR_io_destroy, sys_io_destroy)
+#define __NR_io_getevents 245
+__SYSCALL(__NR_io_getevents, compat_sys_io_getevents)
+#define __NR_io_submit 246
+__SYSCALL(__NR_io_submit, compat_sys_io_submit)
+#define __NR_io_cancel 247
+__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#define __NR_exit_group 248
+__SYSCALL(__NR_exit_group, sys_exit_group)
+#define __NR_lookup_dcookie 249
+__SYSCALL(__NR_lookup_dcookie, compat_sys_lookup_dcookie)
+#define __NR_epoll_create 250
+__SYSCALL(__NR_epoll_create, sys_epoll_create)
+#define __NR_epoll_ctl 251
+__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
+#define __NR_epoll_wait 252
+__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
+#define __NR_remap_file_pages 253
+__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
+                       /* 254 for set_thread_area */
+__SYSCALL(254, sys_ni_syscall)
+                       /* 255 for get_thread_area */
+__SYSCALL(255, sys_ni_syscall)
+#define __NR_set_tid_address 256
+__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
+#define __NR_timer_create 257
+__SYSCALL(__NR_timer_create, compat_sys_timer_create)
+#define __NR_timer_settime 258
+__SYSCALL(__NR_timer_settime, compat_sys_timer_settime)
+#define __NR_timer_gettime 259
+__SYSCALL(__NR_timer_gettime, compat_sys_timer_gettime)
+#define __NR_timer_getoverrun 260
+__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#define __NR_timer_delete 261
+__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#define __NR_clock_settime 262
+__SYSCALL(__NR_clock_settime, compat_sys_clock_settime)
+#define __NR_clock_gettime 263
+__SYSCALL(__NR_clock_gettime, compat_sys_clock_gettime)
+#define __NR_clock_getres 264
+__SYSCALL(__NR_clock_getres, compat_sys_clock_getres)
+#define __NR_clock_nanosleep 265
+__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep)
+#define __NR_statfs64 266
+__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper)
+#define __NR_fstatfs64 267
+__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper)
+#define __NR_tgkill 268
+__SYSCALL(__NR_tgkill, sys_tgkill)
+#define __NR_utimes 269
+__SYSCALL(__NR_utimes, compat_sys_utimes)
+#define __NR_arm_fadvise64_64 270
+__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper)
+#define __NR_pciconfig_iobase 271
+__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase)
+#define __NR_pciconfig_read 272
+__SYSCALL(__NR_pciconfig_read, sys_pciconfig_read)
+#define __NR_pciconfig_write 273
+__SYSCALL(__NR_pciconfig_write, sys_pciconfig_write)
+#define __NR_mq_open 274
+__SYSCALL(__NR_mq_open, compat_sys_mq_open)
+#define __NR_mq_unlink 275
+__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#define __NR_mq_timedsend 276
+__SYSCALL(__NR_mq_timedsend, compat_sys_mq_timedsend)
+#define __NR_mq_timedreceive 277
+__SYSCALL(__NR_mq_timedreceive, compat_sys_mq_timedreceive)
+#define __NR_mq_notify 278
+__SYSCALL(__NR_mq_notify, compat_sys_mq_notify)
+#define __NR_mq_getsetattr 279
+__SYSCALL(__NR_mq_getsetattr, compat_sys_mq_getsetattr)
+#define __NR_waitid 280
+__SYSCALL(__NR_waitid, compat_sys_waitid)
+#define __NR_socket 281
+__SYSCALL(__NR_socket, sys_socket)
+#define __NR_bind 282
+__SYSCALL(__NR_bind, sys_bind)
+#define __NR_connect 283
+__SYSCALL(__NR_connect, sys_connect)
+#define __NR_listen 284
+__SYSCALL(__NR_listen, sys_listen)
+#define __NR_accept 285
+__SYSCALL(__NR_accept, sys_accept)
+#define __NR_getsockname 286
+__SYSCALL(__NR_getsockname, sys_getsockname)
+#define __NR_getpeername 287
+__SYSCALL(__NR_getpeername, sys_getpeername)
+#define __NR_socketpair 288
+__SYSCALL(__NR_socketpair, sys_socketpair)
+#define __NR_send 289
+__SYSCALL(__NR_send, sys_send)
+#define __NR_sendto 290
+__SYSCALL(__NR_sendto, sys_sendto)
+#define __NR_recv 291
+__SYSCALL(__NR_recv, compat_sys_recv)
+#define __NR_recvfrom 292
+__SYSCALL(__NR_recvfrom, compat_sys_recvfrom)
+#define __NR_shutdown 293
+__SYSCALL(__NR_shutdown, sys_shutdown)
+#define __NR_setsockopt 294
+__SYSCALL(__NR_setsockopt, compat_sys_setsockopt)
+#define __NR_getsockopt 295
+__SYSCALL(__NR_getsockopt, compat_sys_getsockopt)
+#define __NR_sendmsg 296
+__SYSCALL(__NR_sendmsg, compat_sys_sendmsg)
+#define __NR_recvmsg 297
+__SYSCALL(__NR_recvmsg, compat_sys_recvmsg)
+#define __NR_semop 298
+__SYSCALL(__NR_semop, sys_semop)
+#define __NR_semget 299
+__SYSCALL(__NR_semget, sys_semget)
+#define __NR_semctl 300
+__SYSCALL(__NR_semctl, compat_sys_semctl)
+#define __NR_msgsnd 301
+__SYSCALL(__NR_msgsnd, compat_sys_msgsnd)
+#define __NR_msgrcv 302
+__SYSCALL(__NR_msgrcv, compat_sys_msgrcv)
+#define __NR_msgget 303
+__SYSCALL(__NR_msgget, sys_msgget)
+#define __NR_msgctl 304
+__SYSCALL(__NR_msgctl, compat_sys_msgctl)
+#define __NR_shmat 305
+__SYSCALL(__NR_shmat, compat_sys_shmat)
+#define __NR_shmdt 306
+__SYSCALL(__NR_shmdt, sys_shmdt)
+#define __NR_shmget 307
+__SYSCALL(__NR_shmget, sys_shmget)
+#define __NR_shmctl 308
+__SYSCALL(__NR_shmctl, compat_sys_shmctl)
+#define __NR_add_key 309
+__SYSCALL(__NR_add_key, sys_add_key)
+#define __NR_request_key 310
+__SYSCALL(__NR_request_key, sys_request_key)
+#define __NR_keyctl 311
+__SYSCALL(__NR_keyctl, compat_sys_keyctl)
+#define __NR_semtimedop 312
+__SYSCALL(__NR_semtimedop, compat_sys_semtimedop)
+#define __NR_vserver 313
+__SYSCALL(__NR_vserver, sys_ni_syscall)
+#define __NR_ioprio_set 314
+__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
+#define __NR_ioprio_get 315
+__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
+#define __NR_inotify_init 316
+__SYSCALL(__NR_inotify_init, sys_inotify_init)
+#define __NR_inotify_add_watch 317
+__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
+#define __NR_inotify_rm_watch 318
+__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
+#define __NR_mbind 319
+__SYSCALL(__NR_mbind, compat_sys_mbind)
+#define __NR_get_mempolicy 320
+__SYSCALL(__NR_get_mempolicy, compat_sys_get_mempolicy)
+#define __NR_set_mempolicy 321
+__SYSCALL(__NR_set_mempolicy, compat_sys_set_mempolicy)
+#define __NR_openat 322
+__SYSCALL(__NR_openat, compat_sys_openat)
+#define __NR_mkdirat 323
+__SYSCALL(__NR_mkdirat, sys_mkdirat)
+#define __NR_mknodat 324
+__SYSCALL(__NR_mknodat, sys_mknodat)
+#define __NR_fchownat 325
+__SYSCALL(__NR_fchownat, sys_fchownat)
+#define __NR_futimesat 326
+__SYSCALL(__NR_futimesat, compat_sys_futimesat)
+#define __NR_fstatat64 327
+__SYSCALL(__NR_fstatat64, sys_fstatat64)
+#define __NR_unlinkat 328
+__SYSCALL(__NR_unlinkat, sys_unlinkat)
+#define __NR_renameat 329
+__SYSCALL(__NR_renameat, sys_renameat)
+#define __NR_linkat 330
+__SYSCALL(__NR_linkat, sys_linkat)
+#define __NR_symlinkat 331
+__SYSCALL(__NR_symlinkat, sys_symlinkat)
+#define __NR_readlinkat 332
+__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#define __NR_fchmodat 333
+__SYSCALL(__NR_fchmodat, sys_fchmodat)
+#define __NR_faccessat 334
+__SYSCALL(__NR_faccessat, sys_faccessat)
+#define __NR_pselect6 335
+__SYSCALL(__NR_pselect6, compat_sys_pselect6)
+#define __NR_ppoll 336
+__SYSCALL(__NR_ppoll, compat_sys_ppoll)
+#define __NR_unshare 337
+__SYSCALL(__NR_unshare, sys_unshare)
+#define __NR_set_robust_list 338
+__SYSCALL(__NR_set_robust_list, compat_sys_set_robust_list)
+#define __NR_get_robust_list 339
+__SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list)
+#define __NR_splice 340
+__SYSCALL(__NR_splice, sys_splice)
+#define __NR_sync_file_range2 341
+__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper)
+#define __NR_tee 342
+__SYSCALL(__NR_tee, sys_tee)
+#define __NR_vmsplice 343
+__SYSCALL(__NR_vmsplice, compat_sys_vmsplice)
+#define __NR_move_pages 344
+__SYSCALL(__NR_move_pages, compat_sys_move_pages)
+#define __NR_getcpu 345
+__SYSCALL(__NR_getcpu, sys_getcpu)
+#define __NR_epoll_pwait 346
+__SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait)
+#define __NR_kexec_load 347
+__SYSCALL(__NR_kexec_load, compat_sys_kexec_load)
+#define __NR_utimensat 348
+__SYSCALL(__NR_utimensat, compat_sys_utimensat)
+#define __NR_signalfd 349
+__SYSCALL(__NR_signalfd, compat_sys_signalfd)
+#define __NR_timerfd_create 350
+__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#define __NR_eventfd 351
+__SYSCALL(__NR_eventfd, sys_eventfd)
+#define __NR_fallocate 352
+__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper)
+#define __NR_timerfd_settime 353
+__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime)
+#define __NR_timerfd_gettime 354
+__SYSCALL(__NR_timerfd_gettime, compat_sys_timerfd_gettime)
+#define __NR_signalfd4 355
+__SYSCALL(__NR_signalfd4, compat_sys_signalfd4)
+#define __NR_eventfd2 356
+__SYSCALL(__NR_eventfd2, sys_eventfd2)
+#define __NR_epoll_create1 357
+__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
+#define __NR_dup3 358
+__SYSCALL(__NR_dup3, sys_dup3)
+#define __NR_pipe2 359
+__SYSCALL(__NR_pipe2, sys_pipe2)
+#define __NR_inotify_init1 360
+__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
+#define __NR_preadv 361
+__SYSCALL(__NR_preadv, compat_sys_preadv)
+#define __NR_pwritev 362
+__SYSCALL(__NR_pwritev, compat_sys_pwritev)
+#define __NR_rt_tgsigqueueinfo 363
+__SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo)
+#define __NR_perf_event_open 364
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
+#define __NR_recvmmsg 365
+__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg)
+#define __NR_accept4 366
+__SYSCALL(__NR_accept4, sys_accept4)
+#define __NR_fanotify_init 367
+__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
+#define __NR_fanotify_mark 368
+__SYSCALL(__NR_fanotify_mark, compat_sys_fanotify_mark)
+#define __NR_prlimit64 369
+__SYSCALL(__NR_prlimit64, sys_prlimit64)
+#define __NR_name_to_handle_at 370
+__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
+#define __NR_open_by_handle_at 371
+__SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at)
+#define __NR_clock_adjtime 372
+__SYSCALL(__NR_clock_adjtime, compat_sys_clock_adjtime)
+#define __NR_syncfs 373
+__SYSCALL(__NR_syncfs, sys_syncfs)
+#define __NR_sendmmsg 374
+__SYSCALL(__NR_sendmmsg, compat_sys_sendmmsg)
+#define __NR_setns 375
+__SYSCALL(__NR_setns, sys_setns)
+#define __NR_process_vm_readv 376
+__SYSCALL(__NR_process_vm_readv, compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 377
+__SYSCALL(__NR_process_vm_writev, compat_sys_process_vm_writev)
+#define __NR_kcmp 378
+__SYSCALL(__NR_kcmp, sys_kcmp)
+#define __NR_finit_module 379
+__SYSCALL(__NR_finit_module, sys_finit_module)
+/* #define __NR_sched_setattr 380 */
+__SYSCALL(380, sys_ni_syscall)
+/* #define __NR_sched_getattr 381 */
+__SYSCALL(381, sys_ni_syscall)
+/* #define __NR_renameat2 382 */
+__SYSCALL(382, sys_ni_syscall)
+#define __NR_seccomp 383
+__SYSCALL(__NR_seccomp, sys_seccomp)
index 215ad4649dd7d7492c7d566fda85146359291c20..7a5df5252dd736e4038e04e40510351820056183 100644 (file)
@@ -50,6 +50,10 @@ static inline bool is_hyp_mode_mismatched(void)
        return __boot_cpu_mode[0] != __boot_cpu_mode[1];
 }
 
+/* The section containing the hypervisor text */
+extern char __hyp_text_start[];
+extern char __hyp_text_end[];
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..8e38878
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/include/uapi/asm/kvm.h:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_H__
+#define __ARM_KVM_H__
+
+#define KVM_SPSR_EL1   0
+#define KVM_SPSR_SVC   KVM_SPSR_EL1
+#define KVM_SPSR_ABT   1
+#define KVM_SPSR_UND   2
+#define KVM_SPSR_IRQ   3
+#define KVM_SPSR_FIQ   4
+#define KVM_NR_SPSR    5
+
+#ifndef __ASSEMBLY__
+#include <linux/psci.h>
+#include <asm/types.h>
+#include <asm/ptrace.h>
+
+#define __KVM_HAVE_GUEST_DEBUG
+#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_READONLY_MEM
+
+#define KVM_REG_SIZE(id)                                               \
+       (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+struct kvm_regs {
+       struct user_pt_regs regs;       /* sp = sp_el0 */
+
+       __u64   sp_el1;
+       __u64   elr_el1;
+
+       __u64   spsr[KVM_NR_SPSR];
+
+       struct user_fpsimd_state fp_regs;
+};
+
+/* Supported Processor Types */
+#define KVM_ARM_TARGET_AEM_V8          0
+#define KVM_ARM_TARGET_FOUNDATION_V8   1
+#define KVM_ARM_TARGET_CORTEX_A57      2
+#define KVM_ARM_TARGET_XGENE_POTENZA   3
+#define KVM_ARM_TARGET_CORTEX_A53      4
+
+#define KVM_ARM_NUM_TARGETS            5
+
+/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
+#define KVM_ARM_DEVICE_TYPE_SHIFT      0
+#define KVM_ARM_DEVICE_TYPE_MASK       (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_ID_SHIFT                16
+#define KVM_ARM_DEVICE_ID_MASK         (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
+
+/* Supported device IDs */
+#define KVM_ARM_DEVICE_VGIC_V2         0
+
+/* Supported VGIC address types  */
+#define KVM_VGIC_V2_ADDR_TYPE_DIST     0
+#define KVM_VGIC_V2_ADDR_TYPE_CPU      1
+
+#define KVM_VGIC_V2_DIST_SIZE          0x1000
+#define KVM_VGIC_V2_CPU_SIZE           0x2000
+
+#define KVM_ARM_VCPU_POWER_OFF         0 /* CPU is started in OFF state */
+#define KVM_ARM_VCPU_EL1_32BIT         1 /* CPU running a 32bit VM */
+#define KVM_ARM_VCPU_PSCI_0_2          2 /* CPU uses PSCI v0.2 */
+
+struct kvm_vcpu_init {
+       __u32 target;
+       __u32 features[7];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
+
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+struct kvm_sync_regs {
+};
+
+struct kvm_arch_memory_slot {
+};
+
+/* If you need to interpret the index values, here is the key: */
+#define KVM_REG_ARM_COPROC_MASK                0x000000000FFF0000
+#define KVM_REG_ARM_COPROC_SHIFT       16
+
+/* Normal registers are mapped as coprocessor 16. */
+#define KVM_REG_ARM_CORE               (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_CORE_REG(name)     (offsetof(struct kvm_regs, name) / sizeof(__u32))
+
+/* Some registers need more space to represent values. */
+#define KVM_REG_ARM_DEMUX              (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_DEMUX_ID_MASK      0x000000000000FF00
+#define KVM_REG_ARM_DEMUX_ID_SHIFT     8
+#define KVM_REG_ARM_DEMUX_ID_CCSIDR    (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
+#define KVM_REG_ARM_DEMUX_VAL_MASK     0x00000000000000FF
+#define KVM_REG_ARM_DEMUX_VAL_SHIFT    0
+
+/* AArch64 system registers */
+#define KVM_REG_ARM64_SYSREG           (0x0013 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM64_SYSREG_OP0_MASK  0x000000000000c000
+#define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14
+#define KVM_REG_ARM64_SYSREG_OP1_MASK  0x0000000000003800
+#define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11
+#define KVM_REG_ARM64_SYSREG_CRN_MASK  0x0000000000000780
+#define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7
+#define KVM_REG_ARM64_SYSREG_CRM_MASK  0x0000000000000078
+#define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3
+#define KVM_REG_ARM64_SYSREG_OP2_MASK  0x0000000000000007
+#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0
+
+#define ARM64_SYS_REG_SHIFT_MASK(x,n) \
+       (((x) << KVM_REG_ARM64_SYSREG_ ## n ## _SHIFT) & \
+       KVM_REG_ARM64_SYSREG_ ## n ## _MASK)
+
+#define __ARM64_SYS_REG(op0,op1,crn,crm,op2) \
+       (KVM_REG_ARM64 | KVM_REG_ARM64_SYSREG | \
+       ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \
+       ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \
+       ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \
+       ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \
+       ARM64_SYS_REG_SHIFT_MASK(op2, OP2))
+
+#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
+
+#define KVM_REG_ARM_TIMER_CTL          ARM64_SYS_REG(3, 3, 14, 3, 1)
+#define KVM_REG_ARM_TIMER_CNT          ARM64_SYS_REG(3, 3, 14, 3, 2)
+#define KVM_REG_ARM_TIMER_CVAL         ARM64_SYS_REG(3, 3, 14, 0, 2)
+
+/* Device Control API: ARM VGIC */
+#define KVM_DEV_ARM_VGIC_GRP_ADDR      0
+#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
+#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS  2
+#define   KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
+#define   KVM_DEV_ARM_VGIC_CPUID_MASK  (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT        0
+#define   KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS   3
+
+/* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_TYPE_SHIFT         24
+#define KVM_ARM_IRQ_TYPE_MASK          0xff
+#define KVM_ARM_IRQ_VCPU_SHIFT         16
+#define KVM_ARM_IRQ_VCPU_MASK          0xff
+#define KVM_ARM_IRQ_NUM_SHIFT          0
+#define KVM_ARM_IRQ_NUM_MASK           0xffff
+
+/* irq_type field */
+#define KVM_ARM_IRQ_TYPE_CPU           0
+#define KVM_ARM_IRQ_TYPE_SPI           1
+#define KVM_ARM_IRQ_TYPE_PPI           2
+
+/* out-of-kernel GIC cpu interrupt injection irq_number field */
+#define KVM_ARM_IRQ_CPU_IRQ            0
+#define KVM_ARM_IRQ_CPU_FIQ            1
+
+/* Highest supported SPI, from VGIC_NR_IRQS */
+#define KVM_ARM_IRQ_GIC_MAX            127
+
+/* PSCI interface */
+#define KVM_PSCI_FN_BASE               0x95c1ba5e
+#define KVM_PSCI_FN(n)                 (KVM_PSCI_FN_BASE + (n))
+
+#define KVM_PSCI_FN_CPU_SUSPEND                KVM_PSCI_FN(0)
+#define KVM_PSCI_FN_CPU_OFF            KVM_PSCI_FN(1)
+#define KVM_PSCI_FN_CPU_ON             KVM_PSCI_FN(2)
+#define KVM_PSCI_FN_MIGRATE            KVM_PSCI_FN(3)
+
+#define KVM_PSCI_RET_SUCCESS           PSCI_RET_SUCCESS
+#define KVM_PSCI_RET_NI                        PSCI_RET_NOT_SUPPORTED
+#define KVM_PSCI_RET_INVAL             PSCI_RET_INVALID_PARAMS
+#define KVM_PSCI_RET_DENIED            PSCI_RET_DENIED
+
+#endif
+
+#endif /* __ARM_KVM_H__ */
index 6913643bbe54ebd5005bfeb74302329fa5e2d0f5..49c61746297d81398c544b48fc556d777f684235 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <asm/hwcap.h>
 
+#define PTRACE_SET_SYSCALL     23
 
 /*
  * PSR bits
index ac389d32ccde1c626f5f0de5ed02cf6bb03c6dfc..23e19f94d44911b05ed175bf65084720932fe450 100644 (file)
@@ -15,9 +15,10 @@ CFLAGS_REMOVE_return_address.o = -pg
 arm64-obj-y            := cputable.o debug-monitors.o entry.o irq.o fpsimd.o   \
                           entry-fpsimd.o process.o ptrace.o setup.o signal.o   \
                           sys.o stacktrace.o time.o traps.o io.o vdso.o        \
-                          hyp-stub.o psci.o cpu_ops.o insn.o return_address.o
+                          hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \
+                          opcodes.o
 
-arm64-obj-$(CONFIG_COMPAT)             += sys32.o kuser32.o signal32.o         \
+arm64-obj-$(CONFIG_COMPAT)             += sys32.o kuser32.o signal32.o \
                                           sys_compat.o
 arm64-obj-$(CONFIG_FUNCTION_TRACER)    += ftrace.o entry-ftrace.o
 arm64-obj-$(CONFIG_MODULES)            += arm64ksyms.o module.o
@@ -33,6 +34,8 @@ arm64-obj-$(CONFIG_JUMP_LABEL)                += jump_label.o
 arm64-obj-$(CONFIG_KGDB)               += kgdb.o
 arm64-obj-$(CONFIG_EFI)                        += efi.o efi-stub.o efi-entry.o
 
+obj-$(CONFIG_SWP_EMULATE)      += swp_emulate.o
+
 obj-y                                  += $(arm64-obj-y) vdso/
 obj-m                                  += $(arm64-obj-m)
 head-y                                 := head.o
index c481a119b98ae08f4952178079f6263a04441c88..9a9fce090d58fd1fd8741bfabbc84672066ea448 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
+#include <linux/kvm_host.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/cputable.h>
@@ -119,6 +120,7 @@ int main(void)
   DEFINE(VCPU_ESR_EL2,         offsetof(struct kvm_vcpu, arch.fault.esr_el2));
   DEFINE(VCPU_FAR_EL2,         offsetof(struct kvm_vcpu, arch.fault.far_el2));
   DEFINE(VCPU_HPFAR_EL2,       offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
+  DEFINE(VCPU_DEBUG_FLAGS,     offsetof(struct kvm_vcpu, arch.debug_flags));
   DEFINE(VCPU_HCR_EL2,         offsetof(struct kvm_vcpu, arch.hcr_el2));
   DEFINE(VCPU_IRQ_LINES,       offsetof(struct kvm_vcpu, arch.irq_lines));
   DEFINE(VCPU_HOST_CONTEXT,    offsetof(struct kvm_vcpu, arch.host_cpu_context));
@@ -128,13 +130,24 @@ int main(void)
   DEFINE(KVM_TIMER_ENABLED,    offsetof(struct kvm, arch.timer.enabled));
   DEFINE(VCPU_KVM,             offsetof(struct kvm_vcpu, kvm));
   DEFINE(VCPU_VGIC_CPU,                offsetof(struct kvm_vcpu, arch.vgic_cpu));
-  DEFINE(VGIC_CPU_HCR,         offsetof(struct vgic_cpu, vgic_hcr));
-  DEFINE(VGIC_CPU_VMCR,                offsetof(struct vgic_cpu, vgic_vmcr));
-  DEFINE(VGIC_CPU_MISR,                offsetof(struct vgic_cpu, vgic_misr));
-  DEFINE(VGIC_CPU_EISR,                offsetof(struct vgic_cpu, vgic_eisr));
-  DEFINE(VGIC_CPU_ELRSR,       offsetof(struct vgic_cpu, vgic_elrsr));
-  DEFINE(VGIC_CPU_APR,         offsetof(struct vgic_cpu, vgic_apr));
-  DEFINE(VGIC_CPU_LR,          offsetof(struct vgic_cpu, vgic_lr));
+  DEFINE(VGIC_SAVE_FN,         offsetof(struct vgic_sr_vectors, save_vgic));
+  DEFINE(VGIC_RESTORE_FN,      offsetof(struct vgic_sr_vectors, restore_vgic));
+  DEFINE(VGIC_SR_VECTOR_SZ,    sizeof(struct vgic_sr_vectors));
+  DEFINE(VGIC_V2_CPU_HCR,      offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
+  DEFINE(VGIC_V2_CPU_VMCR,     offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
+  DEFINE(VGIC_V2_CPU_MISR,     offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
+  DEFINE(VGIC_V2_CPU_EISR,     offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
+  DEFINE(VGIC_V2_CPU_ELRSR,    offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
+  DEFINE(VGIC_V2_CPU_APR,      offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
+  DEFINE(VGIC_V2_CPU_LR,       offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
+  DEFINE(VGIC_V3_CPU_HCR,      offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
+  DEFINE(VGIC_V3_CPU_VMCR,     offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
+  DEFINE(VGIC_V3_CPU_MISR,     offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
+  DEFINE(VGIC_V3_CPU_EISR,     offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
+  DEFINE(VGIC_V3_CPU_ELRSR,    offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
+  DEFINE(VGIC_V3_CPU_AP0R,     offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
+  DEFINE(VGIC_V3_CPU_AP1R,     offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
+  DEFINE(VGIC_V3_CPU_LR,       offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
   DEFINE(VGIC_CPU_NR_LR,       offsetof(struct vgic_cpu, nr_lr));
   DEFINE(KVM_VTTBR,            offsetof(struct kvm, arch.vttbr));
   DEFINE(KVM_VGIC_VCTRL,       offsetof(struct kvm, arch.vgic.vctrl_base));
index 7f66fe150265af8c7991c38b6ee8bb5d66885476..b33051d501e6d5b4e3768cac94a453097e85185b 100644 (file)
 #include <asm/cputype.h>
 #include <asm/system_misc.h>
 
-/* Low-level stepping controls. */
-#define DBG_MDSCR_SS           (1 << 0)
-#define DBG_SPSR_SS            (1 << 21)
-
-/* MDSCR_EL1 enabling bits */
-#define DBG_MDSCR_KDE          (1 << 13)
-#define DBG_MDSCR_MDE          (1 << 15)
-#define DBG_MDSCR_MASK         ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
-
 /* Determine debug architecture. */
 u8 debug_monitors_arch(void)
 {
index 6a27cd6dbfa6dd81be8a6042e5366262a8146801..d358ccacfc00275bd8ef8462e8839ac9f12161f8 100644 (file)
@@ -41,3 +41,27 @@ ENTRY(fpsimd_load_state)
        fpsimd_restore x0, 8
        ret
 ENDPROC(fpsimd_load_state)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Save the bottom n FP registers.
+ *
+ * x0 - pointer to struct fpsimd_partial_state
+ */
+ENTRY(fpsimd_save_partial_state)
+       fpsimd_save_partial x0, 1, 8, 9
+       ret
+ENDPROC(fpsimd_load_partial_state)
+
+/*
+ * Load the bottom n FP registers.
+ *
+ * x0 - pointer to struct fpsimd_partial_state
+ */
+ENTRY(fpsimd_load_partial_state)
+       fpsimd_restore_partial x0, 8, 9
+       ret
+ENDPROC(fpsimd_load_partial_state)
+
+#endif
index 56ef569b2b620dc11d7d8134de1c903bd3e36817..e6681c26d4899a58637f6f9cc4e2ef020e0e4100 100644 (file)
@@ -25,9 +25,9 @@
 #include <asm/asm-offsets.h>
 #include <asm/errno.h>
 #include <asm/esr.h>
+#include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
-#include <asm/unistd32.h>
 
 /*
  * Bad Abort numbers
@@ -590,7 +590,7 @@ fast_work_pending:
        str     x0, [sp, #S_X0]                 // returned x0
 work_pending:
        tbnz    x1, #TIF_NEED_RESCHED, work_resched
-       /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
+       /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
        ldr     x2, [sp, #S_PSTATE]
        mov     x0, sp                          // 'regs'
        tst     x2, #PSR_MODE_MASK              // user mode regs?
@@ -666,6 +666,10 @@ __sys_trace:
        mov     x0, sp
        bl      syscall_trace_enter
        adr     lr, __sys_trace_return          // return address
+       cmp     w0, #RET_SKIP_SYSCALL_TRACE     // skip syscall and tracing?
+       b.eq    ret_to_user
+       cmp     w0, #RET_SKIP_SYSCALL           // skip syscall?
+       b.eq    __sys_trace_return_skipped
        uxtw    scno, w0                        // syscall number (possibly new)
        mov     x1, sp                          // pointer to regs
        cmp     scno, sc_nr                     // check upper syscall limit
@@ -679,6 +683,7 @@ __sys_trace:
 
 __sys_trace_return:
        str     x0, [sp]                        // save returned x0
+__sys_trace_return_skipped:                    // x0 already in regs[0]
        mov     x0, sp
        bl      syscall_trace_exit
        b       ret_to_user
index 522df9c7f3a4288cf5c8e5bb07f564304bd6ab15..5ba0217df39b7463bb76e7b96cb499c412470ffe 100644 (file)
 #define FPEXC_IXF      (1 << 4)
 #define FPEXC_IDF      (1 << 7)
 
+/*
+ * In order to reduce the number of times the FPSIMD state is needlessly saved
+ * and restored, we need to keep track of two things:
+ * (a) for each task, we need to remember which CPU was the last one to have
+ *     the task's FPSIMD state loaded into its FPSIMD registers;
+ * (b) for each CPU, we need to remember which task's userland FPSIMD state has
+ *     been loaded into its FPSIMD registers most recently, or whether it has
+ *     been used to perform kernel mode NEON in the meantime.
+ *
+ * For (a), we add a 'cpu' field to struct fpsimd_state, which gets updated to
+ * the id of the current CPU everytime the state is loaded onto a CPU. For (b),
+ * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
+ * address of the userland FPSIMD state of the task that was loaded onto the CPU
+ * the most recently, or NULL if kernel mode NEON has been performed after that.
+ *
+ * With this in place, we no longer have to restore the next FPSIMD state right
+ * when switching between tasks. Instead, we can defer this check to userland
+ * resume, at which time we verify whether the CPU's fpsimd_last_state and the
+ * task's fpsimd_state.cpu are still mutually in sync. If this is the case, we
+ * can omit the FPSIMD restore.
+ *
+ * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
+ * indicate whether or not the userland FPSIMD state of the current task is
+ * present in the registers. The flag is set unless the FPSIMD registers of this
+ * CPU currently contain the most recent userland FPSIMD state of the current
+ * task.
+ *
+ * For a certain task, the sequence may look something like this:
+ * - the task gets scheduled in; if both the task's fpsimd_state.cpu field
+ *   contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
+ *   variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
+ *   cleared, otherwise it is set;
+ *
+ * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
+ *   userland FPSIMD state is copied from memory to the registers, the task's
+ *   fpsimd_state.cpu field is set to the id of the current CPU, the current
+ *   CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
+ *   TIF_FOREIGN_FPSTATE flag is cleared;
+ *
+ * - the task executes an ordinary syscall; upon return to userland, the
+ *   TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
+ *   restored;
+ *
+ * - the task executes a syscall which executes some NEON instructions; this is
+ *   preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
+ *   register contents to memory, clears the fpsimd_last_state per-cpu variable
+ *   and sets the TIF_FOREIGN_FPSTATE flag;
+ *
+ * - the task gets preempted after kernel_neon_end() is called; as we have not
+ *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
+ *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
+ */
+static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
+
 /*
  * Trapped FP/ASIMD access.
  */
@@ -71,44 +125,140 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
 
 void fpsimd_thread_switch(struct task_struct *next)
 {
-       /* check if not kernel threads */
-       if (current->mm)
+       /*
+        * Save the current FPSIMD state to memory, but only if whatever is in
+        * the registers is in fact the most recent userland FPSIMD state of
+        * 'current'.
+        */
+       if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
                fpsimd_save_state(&current->thread.fpsimd_state);
-       if (next->mm)
-               fpsimd_load_state(&next->thread.fpsimd_state);
+
+       if (next->mm) {
+               /*
+                * If we are switching to a task whose most recent userland
+                * FPSIMD state is already in the registers of *this* cpu,
+                * we can skip loading the state from memory. Otherwise, set
+                * the TIF_FOREIGN_FPSTATE flag so the state will be loaded
+                * upon the next return to userland.
+                */
+               struct fpsimd_state *st = &next->thread.fpsimd_state;
+
+               if (__this_cpu_read(fpsimd_last_state) == st
+                   && st->cpu == smp_processor_id())
+                       clear_ti_thread_flag(task_thread_info(next),
+                                            TIF_FOREIGN_FPSTATE);
+               else
+                       set_ti_thread_flag(task_thread_info(next),
+                                          TIF_FOREIGN_FPSTATE);
+       }
 }
 
 void fpsimd_flush_thread(void)
 {
        preempt_disable();
        memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
-       fpsimd_load_state(&current->thread.fpsimd_state);
+       set_thread_flag(TIF_FOREIGN_FPSTATE);
        preempt_enable();
 }
 
-#ifdef CONFIG_KERNEL_MODE_NEON
+/*
+ * Save the userland FPSIMD state of 'current' to memory, but only if the state
+ * currently held in the registers does in fact belong to 'current'
+ */
+void fpsimd_preserve_current_state(void)
+{
+       preempt_disable();
+       if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
+               fpsimd_save_state(&current->thread.fpsimd_state);
+       preempt_enable();
+}
 
 /*
- * Kernel-side NEON support functions
+ * Load the userland FPSIMD state of 'current' from memory, but only if the
+ * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
+ * state of 'current'
  */
-void kernel_neon_begin(void)
+void fpsimd_restore_current_state(void)
 {
-       /* Avoid using the NEON in interrupt context */
-       BUG_ON(in_interrupt());
        preempt_disable();
+       if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
+               struct fpsimd_state *st = &current->thread.fpsimd_state;
 
-       if (current->mm)
-               fpsimd_save_state(&current->thread.fpsimd_state);
+               fpsimd_load_state(st);
+               this_cpu_write(fpsimd_last_state, st);
+               st->cpu = smp_processor_id();
+       }
+       preempt_enable();
 }
-EXPORT_SYMBOL(kernel_neon_begin);
 
-void kernel_neon_end(void)
+/*
+ * Load an updated userland FPSIMD state for 'current' from memory and set the
+ * flag that indicates that the FPSIMD register contents are the most recent
+ * FPSIMD state of 'current'
+ */
+void fpsimd_update_current_state(struct fpsimd_state *state)
 {
-       if (current->mm)
-               fpsimd_load_state(&current->thread.fpsimd_state);
+       preempt_disable();
+       fpsimd_load_state(state);
+       if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
+               struct fpsimd_state *st = &current->thread.fpsimd_state;
 
+               this_cpu_write(fpsimd_last_state, st);
+               st->cpu = smp_processor_id();
+       }
        preempt_enable();
 }
+
+/*
+ * Invalidate live CPU copies of task t's FPSIMD state
+ */
+void fpsimd_flush_task_state(struct task_struct *t)
+{
+       t->thread.fpsimd_state.cpu = NR_CPUS;
+}
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+static DEFINE_PER_CPU(struct fpsimd_partial_state, hardirq_fpsimdstate);
+static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate);
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin_partial(u32 num_regs)
+{
+       if (in_interrupt()) {
+               struct fpsimd_partial_state *s = this_cpu_ptr(
+                       in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
+
+               BUG_ON(num_regs > 32);
+               fpsimd_save_partial_state(s, roundup(num_regs, 2));
+       } else {
+               /*
+                * Save the userland FPSIMD state if we have one and if we
+                * haven't done so already. Clear fpsimd_last_state to indicate
+                * that there is no longer userland FPSIMD state in the
+                * registers.
+                */
+               preempt_disable();
+               if (current->mm &&
+                   !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
+                       fpsimd_save_state(&current->thread.fpsimd_state);
+               this_cpu_write(fpsimd_last_state, NULL);
+       }
+}
+EXPORT_SYMBOL(kernel_neon_begin_partial);
+
+void kernel_neon_end(void)
+{
+       if (in_interrupt()) {
+               struct fpsimd_partial_state *s = this_cpu_ptr(
+                       in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
+               fpsimd_load_partial_state(s);
+       } else {
+               preempt_enable();
+       }
+}
 EXPORT_SYMBOL(kernel_neon_end);
 
 #endif /* CONFIG_KERNEL_MODE_NEON */
@@ -119,12 +269,13 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
 {
        switch (cmd) {
        case CPU_PM_ENTER:
-               if (current->mm)
+               if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
                        fpsimd_save_state(&current->thread.fpsimd_state);
+               this_cpu_write(fpsimd_last_state, NULL);
                break;
        case CPU_PM_EXIT:
                if (current->mm)
-                       fpsimd_load_state(&current->thread.fpsimd_state);
+                       set_thread_flag(TIF_FOREIGN_FPSTATE);
                break;
        case CPU_PM_ENTER_FAILED:
        default:
index f1d3f693cac665d221b5eaff32fe67e56e3a29cb..bbc9fe1658fa7a9f064a6b63e3e9698242cb6dd2 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/linkage.h>
 #include <linux/init.h>
+#include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
 #include <asm/ptrace.h>
@@ -284,6 +285,23 @@ CPU_LE(    bic     x0, x0, #(3 << 24)      )       // Clear the EE and E0E bits for EL1
        msr     cnthctl_el2, x0
        msr     cntvoff_el2, xzr                // Clear virtual offset
 
+#ifdef CONFIG_ARM_GIC_V3
+       /* GICv3 system register access */
+       mrs     x0, id_aa64pfr0_el1
+       ubfx    x0, x0, #24, #4
+       cmp     x0, #1
+       b.ne    3f
+
+       mrs_s   x0, ICC_SRE_EL2
+       orr     x0, x0, #ICC_SRE_EL2_SRE        // Set ICC_SRE_EL2.SRE==1
+       orr     x0, x0, #ICC_SRE_EL2_ENABLE     // Set ICC_SRE_EL2.Enable==1
+       msr_s   ICC_SRE_EL2, x0
+       isb                                     // Make sure SRE is now set
+       msr_s   ICH_HCR_EL2, xzr                // Reset ICC_HCR_EL2 to defaults
+
+3:
+#endif
+
        /* Populate ID registers. */
        mrs     x0, midr_el1
        mrs     x1, mpidr_el1
index 0959611d9ff141c4dde314b5bd5fd75fea1672aa..a272f335c289dcb5f52144c815edf6938757a218 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/init.h>
 #include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
 #include <asm/ptrace.h>
index 63c48ffdf230125dedea62d631b3067f4d7a28e1..9fb6d5a3cea791b177c08ca5282ab0e194fc9219 100644 (file)
@@ -28,6 +28,7 @@
  * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
  */
 
+#include <asm/unistd.h>
 #include <asm/unistd32.h>
 
        .align  5
@@ -38,12 +39,13 @@ __kuser_cmpxchg64:                  // 0xffff0f60
        .inst   0xe92d00f0              //      push            {r4, r5, r6, r7}
        .inst   0xe1c040d0              //      ldrd            r4, r5, [r0]
        .inst   0xe1c160d0              //      ldrd            r6, r7, [r1]
-       .inst   0xe1b20e9f              // 1:   ldaexd          r0, r1, [r2]
+       .inst   0xe1b20f9f              // 1:   ldrexd          r0, r1, [r2]
        .inst   0xe0303004              //      eors            r3, r0, r4
        .inst   0x00313005              //      eoreqs          r3, r1, r5
        .inst   0x01a23e96              //      stlexdeq        r3, r6, [r2]
        .inst   0x03330001              //      teqeq           r3, #1
        .inst   0x0afffff9              //      beq             1b
+       .inst   0xf57ff05b              //      dmb             ish
        .inst   0xe2730000              //      rsbs            r0, r3, #0
        .inst   0xe8bd00f0              //      pop             {r4, r5, r6, r7}
        .inst   0xe12fff1e              //      bx              lr
@@ -55,11 +57,12 @@ __kuser_memory_barrier:                     // 0xffff0fa0
 
        .align  5
 __kuser_cmpxchg:                       // 0xffff0fc0
-       .inst   0xe1923e9f              // 1:   ldaex           r3, [r2]
+       .inst   0xe1923f9f              // 1:   ldrex           r3, [r2]
        .inst   0xe0533000              //      subs            r3, r3, r0
        .inst   0x01823e91              //      stlexeq         r3, r1, [r2]
        .inst   0x03330001              //      teqeq           r3, #1
        .inst   0x0afffffa              //      beq             1b
+       .inst   0xf57ff05b              //      dmb             ish
        .inst   0xe2730000              //      rsbs            r0, r3, #0
        .inst   0xe12fff1e              //      bx              lr
 
diff --git a/arch/arm64/kernel/opcodes.c b/arch/arm64/kernel/opcodes.c
new file mode 100644 (file)
index 0000000..ceb5a04
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ *  Copied from linux/arch/arm/kernel/opcodes.c
+ *
+ *  A32 condition code lookup feature moved from nwfpe/fpopcode.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <asm/opcodes.h>
+
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+/*
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+       0xF0F0,                 /* EQ == Z set            */
+       0x0F0F,                 /* NE                     */
+       0xCCCC,                 /* CS == C set            */
+       0x3333,                 /* CC                     */
+       0xFF00,                 /* MI == N set            */
+       0x00FF,                 /* PL                     */
+       0xAAAA,                 /* VS == V set            */
+       0x5555,                 /* VC                     */
+       0x0C0C,                 /* HI == C set && Z clear */
+       0xF3F3,                 /* LS == C clear || Z set */
+       0xAA55,                 /* GE == (N==V)           */
+       0x55AA,                 /* LT == (N!=V)           */
+       0x0A05,                 /* GT == (!Z && (N==V))   */
+       0xF5FA,                 /* LE == (Z || (N!=V))    */
+       0xFFFF,                 /* AL always              */
+       0                       /* NV                     */
+};
+
+/*
+ * Returns:
+ * ARM_OPCODE_CONDTEST_FAIL   - if condition fails
+ * ARM_OPCODE_CONDTEST_PASS   - if condition passes (including AL)
+ * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional
+ *                              opcode space from v5 onwards
+ *
+ * Code that tests whether a conditional instruction would pass its condition
+ * check should check that return value == ARM_OPCODE_CONDTEST_PASS.
+ *
+ * Code that tests if a condition means that the instruction would be executed
+ * (regardless of conditional or unconditional) should instead check that the
+ * return value != ARM_OPCODE_CONDTEST_FAIL.
+ */
+asmlinkage unsigned int arm_check_condition(u32 opcode, u64 psr)
+{
+       u32 cc_bits  = opcode >> 28;
+       u32 psr_cond = (u32)(psr & 0xffffffff) >> 28;
+       unsigned int ret;
+
+       if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+               if ((cc_map[cc_bits] >> (psr_cond)) & 1)
+                       ret = ARM_OPCODE_CONDTEST_PASS;
+               else
+                       ret = ARM_OPCODE_CONDTEST_FAIL;
+       } else {
+               ret = ARM_OPCODE_CONDTEST_UNCOND;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(arm_check_condition);
index 3193bf35dbc893beb073b260e5d2dc14286cf540..c09f85f80a9e8dc71a97889c2c9451ae440ced32 100644 (file)
@@ -161,6 +161,70 @@ void machine_restart(char *cmd)
        while (1);
 }
 
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+       int     i, j;
+       int     nlines;
+       u32     *p;
+
+       /*
+        * don't attempt to dump non-kernel addresses or
+        * values that are probably just small negative numbers
+        */
+       if (addr < PAGE_OFFSET || addr > -256UL)
+               return;
+
+       printk("\n%s: %#lx:\n", name, addr);
+
+       /*
+        * round address down to a 32 bit boundary
+        * and always dump a multiple of 32 bytes
+        */
+       p = (u32 *)(addr & ~(sizeof(u32) - 1));
+       nbytes += (addr & (sizeof(u32) - 1));
+       nlines = (nbytes + 31) / 32;
+
+
+       for (i = 0; i < nlines; i++) {
+               /*
+                * just display low 16 bits of address to keep
+                * each line of the dump < 80 characters
+                */
+               printk("%04lx ", (unsigned long)p & 0xffff);
+               for (j = 0; j < 8; j++) {
+                       u32     data;
+                       if (probe_kernel_address(p, data)) {
+                               printk(" ********");
+                       } else {
+                               printk(" %08x", data);
+                       }
+                       ++p;
+               }
+               printk("\n");
+       }
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+       mm_segment_t fs;
+       unsigned int i;
+
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       show_data(regs->pc - nbytes, nbytes * 2, "PC");
+       show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
+       show_data(regs->sp - nbytes, nbytes * 2, "SP");
+       for (i = 0; i < 30; i++) {
+               char name[4];
+               snprintf(name, sizeof(name), "X%u", i);
+               show_data(regs->regs[i] - nbytes, nbytes * 2, name);
+       }
+       set_fs(fs);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
        int i, top_reg;
@@ -187,6 +251,8 @@ void __show_regs(struct pt_regs *regs)
                if (i % 2 == 0)
                        printk("\n");
        }
+       if (!user_mode(regs))
+               show_extra_register_data(regs, 128);
        printk("\n");
 }
 
@@ -203,9 +269,27 @@ void exit_thread(void)
 {
 }
 
+static void tls_thread_flush(void)
+{
+       asm ("msr tpidr_el0, xzr");
+
+       if (is_compat_task()) {
+               current->thread.tp_value = 0;
+
+               /*
+                * We need to ensure ordering between the shadow state and the
+                * hardware state, so that we don't corrupt the hardware state
+                * with a stale shadow state during context switch.
+                */
+               barrier();
+               asm ("msr tpidrro_el0, xzr");
+       }
+}
+
 void flush_thread(void)
 {
        fpsimd_flush_thread();
+       tls_thread_flush();
        flush_ptrace_hw_breakpoint(current);
 }
 
@@ -215,7 +299,7 @@ void release_thread(struct task_struct *dead_task)
 
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
-       fpsimd_save_state(&current->thread.fpsimd_state);
+       fpsimd_preserve_current_state();
        *dst = *src;
        return 0;
 }
index 096a7ad5f004c867a7e50d4cf5fbe333b4bb68c5..e1f70225a919e392076f0371679e1ce864443931 100644 (file)
@@ -19,6 +19,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/audit.h>
 #include <linux/compat.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -26,6 +27,7 @@
 #include <linux/smp.h>
 #include <linux/ptrace.h>
 #include <linux/user.h>
+#include <linux/seccomp.h>
 #include <linux/security.h>
 #include <linux/init.h>
 #include <linux/signal.h>
@@ -39,6 +41,7 @@
 #include <asm/compat.h>
 #include <asm/debug-monitors.h>
 #include <asm/pgtable.h>
+#include <asm/syscall.h>
 #include <asm/traps.h>
 #include <asm/system_misc.h>
 
@@ -85,7 +88,8 @@ static void ptrace_hbptriggered(struct perf_event *bp,
                        break;
                }
        }
-       for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
+
+       for (i = 0; i < ARM_MAX_WRP; ++i) {
                if (current->thread.debug.hbp_watch[i] == bp) {
                        info.si_errno = -((i << 1) + 1);
                        break;
@@ -521,6 +525,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
                return ret;
 
        target->thread.fpsimd_state.user_fpsimd = newstate;
+       fpsimd_flush_task_state(target);
        return ret;
 }
 
@@ -768,6 +773,7 @@ static int compat_vfp_set(struct task_struct *target,
                uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
        }
 
+       fpsimd_flush_task_state(target);
        return ret;
 }
 
@@ -1063,7 +1069,19 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 long arch_ptrace(struct task_struct *child, long request,
                 unsigned long addr, unsigned long data)
 {
-       return ptrace_request(child, request, addr, data);
+       int ret;
+
+       switch (request) {
+               case PTRACE_SET_SYSCALL:
+                       task_pt_regs(child)->syscallno = data;
+                       ret = 0;
+                       break;
+               default:
+                       ret = ptrace_request(child, request, addr, data);
+                       break;
+       }
+
+       return ret;
 }
 
 enum ptrace_syscall_dir {
@@ -1101,6 +1119,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_enter(regs, regs->syscallno);
 
+       audit_syscall_entry(syscall_get_arch(), regs->syscallno,
+               regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
+
        return regs->syscallno;
 }
 
@@ -1109,6 +1130,8 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_exit(regs, regs_return_value(regs));
 
+       audit_syscall_exit(regs);
+
        if (test_thread_flag(TIF_SYSCALL_TRACE))
                tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
 }
index e87b5fd07b8c3e4e5c2cba3c2bdfab3a30df29a6..fdf3c5f4ce9103f7e7ebe78be2e68ca0e7f9a9e6 100644 (file)
@@ -507,9 +507,20 @@ static int c_show(struct seq_file *m, void *v)
        for (i = 0; hwcap_str[i]; i++)
                if (elf_hwcap & (1 << i))
                        seq_printf(m, "%s ", hwcap_str[i]);
+#ifdef CONFIG_ARMV7_COMPAT_CPUINFO
+       if (is_compat_task()) {
+               /* Print out the non-optional ARMv8 HW capabilities */
+               seq_printf(m, "wp half thumb fastmult vfp edsp neon vfpv3 tlsi ");
+               seq_printf(m, "vfpv4 idiva idivt ");
+       }
+#endif
 
        seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
-       seq_printf(m, "CPU architecture: AArch64\n");
+       seq_printf(m, "CPU architecture: %s\n",
+#if IS_ENABLED(CONFIG_ARMV7_COMPAT_CPUINFO)
+                       is_compat_task() ? "8" :
+#endif
+                       "AArch64");
        seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
        seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
        seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
index e3cf09626245aea9440aafbbbe5d84ad2d0ea5a6..bbc1aad21ce6be3d190967b6690bc9323c3a7dd5 100644 (file)
@@ -51,7 +51,7 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
        int err;
 
        /* dump the hardware registers to the fpsimd_state structure */
-       fpsimd_save_state(fpsimd);
+       fpsimd_preserve_current_state();
 
        /* copy the FP and status/control registers */
        err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
@@ -86,11 +86,8 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
        __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
 
        /* load the hardware registers from the fpsimd_state structure */
-       if (!err) {
-               preempt_disable();
-               fpsimd_load_state(&fpsimd);
-               preempt_enable();
-       }
+       if (!err)
+               fpsimd_update_current_state(&fpsimd);
 
        return err ? -EFAULT : 0;
 }
@@ -423,4 +420,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
        }
+
+       if (thread_flags & _TIF_FOREIGN_FPSTATE)
+               fpsimd_restore_current_state();
+
 }
index e51bbe79f5b5b9c850207a577e42018caf5fa846..e5cf0ab84bed04900212958addeb2ff3337ad5de 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/fpsimd.h>
 #include <asm/signal32.h>
 #include <asm/uaccess.h>
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
 
 struct compat_sigcontext {
        /* We always set these two fields to 0 */
@@ -183,6 +183,14 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
                err |= __put_user(from->si_uid, &to->si_uid);
                err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
                break;
+#ifdef __ARCH_SIGSYS
+       case __SI_SYS:
+               err |= __put_user((compat_uptr_t)(unsigned long)
+                               from->si_call_addr, &to->si_call_addr);
+               err |= __put_user(from->si_syscall, &to->si_syscall);
+               err |= __put_user(from->si_arch, &to->si_arch);
+               break;
+#endif
        default: /* this is just in case for now ... */
                err |= __put_user(from->si_pid, &to->si_pid);
                err |= __put_user(from->si_uid, &to->si_uid);
@@ -219,7 +227,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
         * Note that this also saves V16-31, which aren't visible
         * in AArch32.
         */
-       fpsimd_save_state(fpsimd);
+       fpsimd_preserve_current_state();
 
        /* Place structure header on the stack */
        __put_user_error(magic, &frame->magic, err);
@@ -282,11 +290,8 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
         * We don't need to touch the exception register, so
         * reload the hardware state.
         */
-       if (!err) {
-               preempt_disable();
-               fpsimd_load_state(&fpsimd);
-               preempt_enable();
-       }
+       if (!err)
+               fpsimd_update_current_state(&fpsimd);
 
        return err ? -EFAULT : 0;
 }
diff --git a/arch/arm64/kernel/swp_emulate.c b/arch/arm64/kernel/swp_emulate.c
new file mode 100644 (file)
index 0000000..508fd2e
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ *  Derived from from linux/arch/arm/kernel/swp_emulate.c
+ *
+ *  Copyright (C) 2009 ARM Limited
+ *  Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Implements emulation of the SWP/SWPB instructions using load-exclusive and
+ *  store-exclusive for processors that have them disabled (or future ones that
+ *  might not implement them).
+ *
+ *  Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
+ *  Where: Rt  = destination
+ *        Rt2 = source
+ *        Rn  = address
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/perf_event.h>
+
+#include <asm/opcodes.h>
+#include <asm/traps.h>
+#include <asm/uaccess.h>
+#include <asm/system_misc.h>
+#include <linux/debugfs.h>
+
+/*
+ * Error-checking SWP macros implemented using ldrex{b}/strex{b}
+ */
+
+static int swpb(u8 in, u8 *out, u8 *addr)
+{
+       u8 _out;
+       int res;
+       int err;
+
+       do {
+               __asm__ __volatile__(
+               "0:     ldxrb   %w1, %4\n"
+               "1:     stxrb   %w0, %w3, %4\n"
+               "       mov     %w2, #0\n"
+               "2:\n"
+               "       .section         .fixup,\"ax\"\n"
+               "       .align          2\n"
+               "3:     mov     %w2, %5\n"
+               "       b       2b\n"
+               "       .previous\n"
+               "       .section         __ex_table,\"a\"\n"
+               "       .align          3\n"
+               "       .quad           0b, 3b\n"
+               "       .quad           1b, 3b\n"
+               "       .previous"
+               : "=&r" (res), "=r" (_out), "=r" (err)
+               : "r" (in), "Q" (*addr), "i" (-EFAULT)
+               : "cc", "memory");
+       } while (err == 0 && res != 0);
+
+       if (err == 0)
+               *out = _out;
+       return err;
+}
+
+static int swp(u32 in, u32 *out, u32 *addr)
+{
+       u32 _out;
+       int res;
+       int err = 0;
+
+       do {
+               __asm__ __volatile__(
+               "0:     ldxr    %w1, %4\n"
+               "1:     stxr    %w0, %w3, %4\n"
+               "       mov     %w2, #0\n"
+               "2:\n"
+               "       .section         .fixup,\"ax\"\n"
+               "       .align          2\n"
+               "3:     mov     %w2, %5\n"
+               "       b       2b\n"
+               "       .previous\n"
+               "       .section         __ex_table,\"a\"\n"
+               "       .align          3\n"
+               "       .quad           0b, 3b\n"
+               "       .quad           1b, 3b\n"
+               "       .previous"
+               : "=&r" (res), "=r" (_out), "=r" (err)
+               : "r" (in), "Q" (*addr), "i" (-EFAULT)
+               : "cc", "memory");
+       } while (err == 0 && res != 0);
+
+       if (err == 0)
+               *out = _out;
+       return err;
+}
+/*
+ * Macros/defines for extracting register numbers from instruction.
+ */
+#define EXTRACT_REG_NUM(instruction, offset) \
+       (((instruction) & (0xf << (offset))) >> (offset))
+#define RN_OFFSET  16
+#define RT_OFFSET  12
+#define RT2_OFFSET  0
+/*
+ * Bit 22 of the instruction encoding distinguishes between
+ * the SWP and SWPB variants (bit set means SWPB).
+ */
+#define TYPE_SWPB (1 << 22)
+
+static pid_t previous_pid;
+
+u64 swpb_count = 0;
+u64 swp_count = 0;
+
+/*
+ * swp_handler logs the id of calling process, dissects the instruction, sanity
+ * checks the memory location, calls emulate_swpX for the actual operation and
+ * deals with fixup/error handling before returning
+ */
+static int swp_handler(struct pt_regs *regs, unsigned int instr)
+{
+       u32 destreg, data, type;
+       uintptr_t address;
+       unsigned int res = 0;
+       int err;
+       u32 temp32;
+       u8 temp8;
+
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+       res = arm_check_condition(instr, regs->pstate);
+       switch (res) {
+       case ARM_OPCODE_CONDTEST_PASS:
+               break;
+       case ARM_OPCODE_CONDTEST_FAIL:
+               /* Condition failed - return to next instruction */
+               regs->pc += 4;
+               return 0;
+       case ARM_OPCODE_CONDTEST_UNCOND:
+               /* If unconditional encoding - not a SWP, undef */
+               return -EFAULT;
+       default:
+               return -EINVAL;
+       }
+
+       if (current->pid != previous_pid) {
+               pr_warn("\"%s\" (%ld) uses obsolete SWP{B} instruction\n",
+                        current->comm, (unsigned long)current->pid);
+               previous_pid = current->pid;
+       }
+
+       address = regs->regs[EXTRACT_REG_NUM(instr, RN_OFFSET)] & 0xffffffff;
+       data = regs->regs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
+       destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
+
+       type = instr & TYPE_SWPB;
+
+       /* Check access in reasonable access range for both SWP and SWPB */
+       if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
+               pr_debug("SWP{B} emulation: access to %p not allowed!\n",
+                        (void *)address);
+               res = -EFAULT;
+       }
+       if (type == TYPE_SWPB) {
+               err = swpb((u8) data, &temp8, (u8 *) address);
+               if (err)
+                       return err;
+               regs->regs[destreg] = temp8;
+               regs->pc += 4;
+               swpb_count++;
+       } else if (address & 0x3) {
+               /* SWP to unaligned address not permitted */
+               pr_debug("SWP instruction on unaligned pointer!\n");
+               return -EFAULT;
+       } else {
+               err = swp((u32) data, &temp32, (u32 *) address);
+               if (err)
+                       return err;
+               regs->regs[destreg] = temp32;
+               regs->pc += 4;
+               swp_count++;
+       }
+
+       return 0;
+}
+
+/*
+ * Only emulate SWP/SWPB executed in ARM state/User mode.
+ * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
+ */
+static struct undef_hook swp_hook = {
+       .instr_mask     = 0x0fb00ff0,
+       .instr_val      = 0x01000090,
+       .pstate_mask    = COMPAT_PSR_MODE_MASK | COMPAT_PSR_T_BIT,
+       .pstate_val     = COMPAT_PSR_MODE_USR,
+       .fn             = swp_handler
+};
+
+/*
+ * Register handler and create status file in /proc/cpu
+ * Invoked as late_initcall, since not needed before init spawned.
+ */
+static int __init swp_emulation_init(void)
+{
+       struct dentry *dir;
+       dir = debugfs_create_dir("swp_emulate", NULL);
+       debugfs_create_u64("swp_count", S_IRUGO | S_IWUSR, dir, &swp_count);
+       debugfs_create_u64("swpb_count", S_IRUGO | S_IWUSR, dir, &swpb_count);
+
+       pr_notice("Registering SWP/SWPB emulation handler\n");
+       register_undef_hook(&swp_hook);
+
+
+       return 0;
+}
+
+late_initcall(swp_emulation_init);
index 26e9c4eeaba82a20f539991c754d5e40cb161e3a..dc47e53e9e28c15da99e62976a9ca29f71da8bc4 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
 
 static inline void
 do_compat_cache_op(unsigned long start, unsigned long end, int flags)
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
 
        case __ARM_NR_compat_set_tls:
                current->thread.tp_value = regs->regs[0];
+
+               /*
+                * Protect against register corruption from context switch.
+                * See comment in tls_thread_flush.
+                */
+               barrier();
                asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
                return 0;
 
index 7ffadddb645d32cda5fd54a0c2a5a0c5456b4018..0da47699510b685f5d56d9d4cde2a0dfb9ca9242 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Copyright (C) 1995-2009 Russell King
  * Copyright (C) 2012 ARM Ltd.
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -257,15 +258,58 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
                die(str, regs, err);
 }
 
+static LIST_HEAD(undef_hook);
+
+void register_undef_hook(struct undef_hook *hook)
+{
+       list_add(&hook->node, &undef_hook);
+}
+
+static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+{
+       struct undef_hook *hook;
+       int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
+
+       list_for_each_entry(hook, &undef_hook, node)
+               if ((instr & hook->instr_mask) == hook->instr_val &&
+                   (regs->pstate & hook->pstate_mask) == hook->pstate_val)
+                       fn = hook->fn;
+
+       return fn ? fn(regs, instr) : 1;
+}
+
 asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 {
+       u32 instr;
        siginfo_t info;
        void __user *pc = (void __user *)instruction_pointer(regs);
 
        /* check for AArch32 breakpoint instructions */
        if (!aarch32_break_handler(regs))
                return;
+       if (user_mode(regs)) {
+               if (compat_thumb_mode(regs)) {
+                       if (get_user(instr, (u16 __user *)pc))
+                               goto die_sig;
+                       if (is_wide_instruction(instr)) {
+                               u32 instr2;
+                               if (get_user(instr2, (u16 __user *)pc+1))
+                                       goto die_sig;
+                               instr <<= 16;
+                               instr |= instr2;
+                       }
+               } else if (get_user(instr, (u32 __user *)pc)) {
+                       goto die_sig;
+               }
+       } else {
+               /* kernel mode */
+               instr = *((u32 *)pc);
+       }
+
+       if (call_undef_hook(regs, instr) == 0)
+               return;
 
+die_sig:
        if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
            printk_ratelimit()) {
                pr_info("%s[%d]: undefined instruction: pc=%p\n",
index ce2d97255ba950837c9946f032454070ee3344e3..55d0e035205f0156e6b272ac6fe9fd83c6ea4fa8 100644 (file)
@@ -17,6 +17,19 @@ ENTRY(stext)
 
 jiffies = jiffies_64;
 
+#define HYPERVISOR_TEXT                                        \
+       /*                                              \
+        * Force the alignment to be compatible with    \
+        * the vectors requirements                     \
+        */                                             \
+       . = ALIGN(2048);                                \
+       VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;     \
+       *(.hyp.idmap.text)                              \
+       VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;       \
+       VMLINUX_SYMBOL(__hyp_text_start) = .;           \
+       *(.hyp.text)                                    \
+       VMLINUX_SYMBOL(__hyp_text_end) = .;
+
 SECTIONS
 {
        /*
@@ -48,6 +61,7 @@ SECTIONS
                        TEXT_TEXT
                        SCHED_TEXT
                        LOCK_TEXT
+                       HYPERVISOR_TEXT
                        *(.fixup)
                        *(.gnu.warning)
                . = ALIGN(16);
@@ -102,3 +116,9 @@ SECTIONS
        STABS_DEBUG
        .comment 0 : { *(.comment) }
 }
+
+/*
+ * The HYP init code can't be more than a page long.
+ */
+ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
+       "HYP init code too big")
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
new file mode 100644 (file)
index 0000000..8ba85e9
--- /dev/null
@@ -0,0 +1,63 @@
+#
+# KVM configuration
+#
+
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       ---help---
+         Say Y here to get to see options for using your Linux host to run
+         other operating systems inside virtual machines (guests).
+         This option alone does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and
+         disabled.
+
+if VIRTUALIZATION
+
+config KVM
+       bool "Kernel-based Virtual Machine (KVM) support"
+       select MMU_NOTIFIER
+       select PREEMPT_NOTIFIERS
+       select ANON_INODES
+       select HAVE_KVM_CPU_RELAX_INTERCEPT
+       select KVM_MMIO
+       select KVM_ARM_HOST
+       select KVM_ARM_VGIC
+       select KVM_ARM_TIMER
+       ---help---
+         Support hosting virtualized guest machines.
+
+         If unsure, say N.
+
+config KVM_ARM_HOST
+       bool
+       ---help---
+         Provides host support for ARM processors.
+
+config KVM_ARM_MAX_VCPUS
+       int "Number maximum supported virtual CPUs per VM"
+       depends on KVM_ARM_HOST
+       default 4
+       help
+         Static number of max supported virtual CPUs per VM.
+
+         If you choose a high number, the vcpu structures will be quite
+         large, so only choose a reasonable number that you expect to
+         actually use.
+
+config KVM_ARM_VGIC
+       bool
+       depends on KVM_ARM_HOST && OF
+       select HAVE_KVM_IRQCHIP
+       ---help---
+         Adds support for a hardware assisted, in-kernel GIC emulation.
+
+config KVM_ARM_TIMER
+       bool
+       depends on KVM_ARM_VGIC
+       ---help---
+         Adds support for the Architected Timers in virtual machines.
+
+endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
new file mode 100644 (file)
index 0000000..32a0961
--- /dev/null
@@ -0,0 +1,27 @@
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm
+CFLAGS_arm.o := -I.
+CFLAGS_mmu.o := -I.
+
+KVM=../../../virt/kvm
+ARM=../../../arch/arm/kvm
+
+obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
+
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
+
+kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
+kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
+kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
+
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
+kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c
new file mode 100644 (file)
index 0000000..124418d
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ * (not much of an) Emulation layer for 32bit guests.
+ *
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * based on arch/arm/kvm/emulate.c
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+
+/*
+ * stolen from arch/arm/kernel/opcodes.c
+ *
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+       0xF0F0,                 /* EQ == Z set            */
+       0x0F0F,                 /* NE                     */
+       0xCCCC,                 /* CS == C set            */
+       0x3333,                 /* CC                     */
+       0xFF00,                 /* MI == N set            */
+       0x00FF,                 /* PL                     */
+       0xAAAA,                 /* VS == V set            */
+       0x5555,                 /* VC                     */
+       0x0C0C,                 /* HI == C set && Z clear */
+       0xF3F3,                 /* LS == C clear || Z set */
+       0xAA55,                 /* GE == (N==V)           */
+       0x55AA,                 /* LT == (N!=V)           */
+       0x0A05,                 /* GT == (!Z && (N==V))   */
+       0xF5FA,                 /* LE == (Z || (N!=V))    */
+       0xFFFF,                 /* AL always              */
+       0                       /* NV                     */
+};
+
+static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+{
+       u32 esr = kvm_vcpu_get_hsr(vcpu);
+
+       if (esr & ESR_EL2_CV)
+               return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
+
+       return -1;
+}
+
+/*
+ * Check if a trapped instruction should have been executed or not.
+ */
+bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
+{
+       unsigned long cpsr;
+       u32 cpsr_cond;
+       int cond;
+
+       /* Top two bits non-zero?  Unconditional. */
+       if (kvm_vcpu_get_hsr(vcpu) >> 30)
+               return true;
+
+       /* Is condition field valid? */
+       cond = kvm_vcpu_get_condition(vcpu);
+       if (cond == 0xE)
+               return true;
+
+       cpsr = *vcpu_cpsr(vcpu);
+
+       if (cond < 0) {
+               /* This can happen in Thumb mode: examine IT state. */
+               unsigned long it;
+
+               it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
+
+               /* it == 0 => unconditional. */
+               if (it == 0)
+                       return true;
+
+               /* The cond for this insn works out as the top 4 bits. */
+               cond = (it >> 4);
+       }
+
+       cpsr_cond = cpsr >> 28;
+
+       if (!((cc_map[cond] >> cpsr_cond) & 1))
+               return false;
+
+       return true;
+}
+
+/**
+ * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * @vcpu:      The VCPU pointer
+ *
+ * When exceptions occur while instructions are executed in Thumb IF-THEN
+ * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
+ * to do this little bit of work manually. The fields map like this:
+ *
+ * IT[7:0] -> CPSR[26:25],CPSR[15:10]
+ */
+static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
+{
+       unsigned long itbits, cond;
+       unsigned long cpsr = *vcpu_cpsr(vcpu);
+       bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
+
+       BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK));
+
+       if (!(cpsr & COMPAT_PSR_IT_MASK))
+               return;
+
+       cond = (cpsr & 0xe000) >> 13;
+       itbits = (cpsr & 0x1c00) >> (10 - 2);
+       itbits |= (cpsr & (0x3 << 25)) >> 25;
+
+       /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
+       if ((itbits & 0x7) == 0)
+               itbits = cond = 0;
+       else
+               itbits = (itbits << 1) & 0x1f;
+
+       cpsr &= ~COMPAT_PSR_IT_MASK;
+       cpsr |= cond << 13;
+       cpsr |= (itbits & 0x1c) << (10 - 2);
+       cpsr |= (itbits & 0x3) << 25;
+       *vcpu_cpsr(vcpu) = cpsr;
+}
+
+/**
+ * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * @vcpu: The vcpu pointer
+ */
+void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+       bool is_thumb;
+
+       is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
+       if (is_thumb && !is_wide_instr)
+               *vcpu_pc(vcpu) += 2;
+       else
+               *vcpu_pc(vcpu) += 4;
+       kvm_adjust_itstate(vcpu);
+}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
new file mode 100644 (file)
index 0000000..7679469
--- /dev/null
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/guest.c:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/cputype.h>
+#include <asm/uaccess.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { NULL }
+};
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+       return 0;
+}
+
+static u64 core_reg_offset_from_id(u64 id)
+{
+       return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
+}
+
+static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       /*
+        * Because the kvm_regs structure is a mix of 32, 64 and
+        * 128bit fields, we index it as if it was a 32bit
+        * array. Hence below, nr_regs is the number of entries, and
+        * off the index in the "array".
+        */
+       __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
+       struct kvm_regs *regs = vcpu_gp_regs(vcpu);
+       int nr_regs = sizeof(*regs) / sizeof(__u32);
+       u32 off;
+
+       /* Our ID is an index into the kvm_regs struct. */
+       off = core_reg_offset_from_id(reg->id);
+       if (off >= nr_regs ||
+           (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
+               return -ENOENT;
+
+       if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
+       struct kvm_regs *regs = vcpu_gp_regs(vcpu);
+       int nr_regs = sizeof(*regs) / sizeof(__u32);
+       __uint128_t tmp;
+       void *valp = &tmp;
+       u64 off;
+       int err = 0;
+
+       /* Our ID is an index into the kvm_regs struct. */
+       off = core_reg_offset_from_id(reg->id);
+       if (off >= nr_regs ||
+           (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
+               return -ENOENT;
+
+       if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
+               return -EINVAL;
+
+       if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
+               err = -EFAULT;
+               goto out;
+       }
+
+       if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
+               u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+               switch (mode) {
+               case COMPAT_PSR_MODE_USR:
+               case COMPAT_PSR_MODE_FIQ:
+               case COMPAT_PSR_MODE_IRQ:
+               case COMPAT_PSR_MODE_SVC:
+               case COMPAT_PSR_MODE_ABT:
+               case COMPAT_PSR_MODE_UND:
+               case PSR_MODE_EL0t:
+               case PSR_MODE_EL1t:
+               case PSR_MODE_EL1h:
+                       break;
+               default:
+                       err = -EINVAL;
+                       goto out;
+               }
+       }
+
+       memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
+out:
+       return err;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       return -EINVAL;
+}
+
+static unsigned long num_core_regs(void)
+{
+       return sizeof(struct kvm_regs) / sizeof(__u32);
+}
+
+/**
+ * ARM64 versions of the TIMER registers, always available on arm64
+ */
+
+#define NUM_TIMER_REGS 3
+
+static bool is_timer_reg(u64 index)
+{
+       switch (index) {
+       case KVM_REG_ARM_TIMER_CTL:
+       case KVM_REG_ARM_TIMER_CNT:
+       case KVM_REG_ARM_TIMER_CVAL:
+               return true;
+       }
+       return false;
+}
+
+static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
+               return -EFAULT;
+       uindices++;
+       if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
+               return -EFAULT;
+       uindices++;
+       if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       void __user *uaddr = (void __user *)(long)reg->addr;
+       u64 val;
+       int ret;
+
+       ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
+       if (ret != 0)
+               return -EFAULT;
+
+       return kvm_arm_timer_set_reg(vcpu, reg->id, val);
+}
+
+static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       void __user *uaddr = (void __user *)(long)reg->addr;
+       u64 val;
+
+       val = kvm_arm_timer_get_reg(vcpu, reg->id);
+       return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+}
+
+/**
+ * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
+ *
+ * This is for all registers.
+ */
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
+{
+       return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
+                + NUM_TIMER_REGS;
+}
+
+/**
+ * kvm_arm_copy_reg_indices - get indices of all registers.
+ *
+ * We do core registers right here, then we apppend system regs.
+ */
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       unsigned int i;
+       const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
+       int ret;
+
+       for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
+               if (put_user(core_reg | i, uindices))
+                       return -EFAULT;
+               uindices++;
+       }
+
+       ret = copy_timer_indices(vcpu, uindices);
+       if (ret)
+               return ret;
+       uindices += NUM_TIMER_REGS;
+
+       return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
+}
+
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       /* We currently use nothing arch-specific in upper 32 bits */
+       if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
+               return -EINVAL;
+
+       /* Register group 16 means we want a core register. */
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+               return get_core_reg(vcpu, reg);
+
+       if (is_timer_reg(reg->id))
+               return get_timer_reg(vcpu, reg);
+
+       return kvm_arm_sys_reg_get_reg(vcpu, reg);
+}
+
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       /* We currently use nothing arch-specific in upper 32 bits */
+       if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
+               return -EINVAL;
+
+       /* Register group 16 means we set a core register. */
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+               return set_core_reg(vcpu, reg);
+
+       if (is_timer_reg(reg->id))
+               return set_timer_reg(vcpu, reg);
+
+       return kvm_arm_sys_reg_set_reg(vcpu, reg);
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       return -EINVAL;
+}
+
+int __attribute_const__ kvm_target_cpu(void)
+{
+       unsigned long implementor = read_cpuid_implementor();
+       unsigned long part_number = read_cpuid_part_number();
+
+       switch (implementor) {
+       case ARM_CPU_IMP_ARM:
+               switch (part_number) {
+               case ARM_CPU_PART_AEM_V8:
+                       return KVM_ARM_TARGET_AEM_V8;
+               case ARM_CPU_PART_FOUNDATION:
+                       return KVM_ARM_TARGET_FOUNDATION_V8;
+               case ARM_CPU_PART_CORTEX_A53:
+                       return KVM_ARM_TARGET_CORTEX_A53;
+               case ARM_CPU_PART_CORTEX_A57:
+                       return KVM_ARM_TARGET_CORTEX_A57;
+               };
+               break;
+       case ARM_CPU_IMP_APM:
+               switch (part_number) {
+               case APM_CPU_PART_POTENZA:
+                       return KVM_ARM_TARGET_XGENE_POTENZA;
+               };
+               break;
+       };
+
+       return -EINVAL;
+}
+
+int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+                       const struct kvm_vcpu_init *init)
+{
+       unsigned int i;
+       int phys_target = kvm_target_cpu();
+
+       if (init->target != phys_target)
+               return -EINVAL;
+
+       vcpu->arch.target = phys_target;
+       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+
+       /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
+       for (i = 0; i < sizeof(init->features) * 8; i++) {
+               if (init->features[i / 32] & (1 << (i % 32))) {
+                       if (i >= KVM_VCPU_MAX_FEATURES)
+                               return -ENOENT;
+                       set_bit(i, vcpu->arch.features);
+               }
+       }
+
+       /* Now we know what it is, we can reset it. */
+       return kvm_reset_vcpu(vcpu);
+}
+
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
+{
+       int target = kvm_target_cpu();
+
+       if (target < 0)
+               return -ENODEV;
+
+       memset(init, 0, sizeof(*init));
+
+       /*
+        * For now, we don't return any features.
+        * In future, we might use features to return target
+        * specific features available for the preferred
+        * target type.
+        */
+       init->target = (__u32)target;
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+                                 struct kvm_translation *tr)
+{
+       return -EINVAL;
+}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
new file mode 100644 (file)
index 0000000..e28be51
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/handle_exit.c:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_psci.h>
+
+typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+
+static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int ret;
+
+       ret = kvm_psci_call(vcpu);
+       if (ret < 0) {
+               kvm_inject_undefined(vcpu);
+               return 1;
+       }
+
+       return ret;
+}
+
+static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+/**
+ * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
+ *                 instruction executed by a guest
+ *
+ * @vcpu:      the vcpu pointer
+ *
+ * WFE: Yield the CPU and come back to this vcpu when the scheduler
+ * decides to.
+ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * world-switches and schedule other host processes until there is an
+ * incoming IRQ or FIQ to the VM.
+ */
+static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
+               kvm_vcpu_on_spin(vcpu);
+       else
+               kvm_vcpu_block(vcpu);
+
+       return 1;
+}
+
+static exit_handle_fn arm_exit_handlers[] = {
+       [ESR_EL2_EC_WFI]        = kvm_handle_wfx,
+       [ESR_EL2_EC_CP15_32]    = kvm_handle_cp15_32,
+       [ESR_EL2_EC_CP15_64]    = kvm_handle_cp15_64,
+       [ESR_EL2_EC_CP14_MR]    = kvm_handle_cp14_32,
+       [ESR_EL2_EC_CP14_LS]    = kvm_handle_cp14_load_store,
+       [ESR_EL2_EC_CP14_64]    = kvm_handle_cp14_64,
+       [ESR_EL2_EC_HVC32]      = handle_hvc,
+       [ESR_EL2_EC_SMC32]      = handle_smc,
+       [ESR_EL2_EC_HVC64]      = handle_hvc,
+       [ESR_EL2_EC_SMC64]      = handle_smc,
+       [ESR_EL2_EC_SYS64]      = kvm_handle_sys_reg,
+       [ESR_EL2_EC_IABT]       = kvm_handle_guest_abort,
+       [ESR_EL2_EC_DABT]       = kvm_handle_guest_abort,
+};
+
+static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
+{
+       u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+
+       if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
+           !arm_exit_handlers[hsr_ec]) {
+               kvm_err("Unknown exception class: hsr: %#08x\n",
+                       (unsigned int)kvm_vcpu_get_hsr(vcpu));
+               BUG();
+       }
+
+       return arm_exit_handlers[hsr_ec];
+}
+
+/*
+ * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
+ * proper exit to userspace.
+ */
+int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                      int exception_index)
+{
+       exit_handle_fn exit_handler;
+
+       switch (exception_index) {
+       case ARM_EXCEPTION_IRQ:
+               return 1;
+       case ARM_EXCEPTION_TRAP:
+               /*
+                * See ARM ARM B1.14.1: "Hyp traps on instructions
+                * that fail their condition code check"
+                */
+               if (!kvm_condition_valid(vcpu)) {
+                       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+                       return 1;
+               }
+
+               exit_handler = kvm_get_exit_handler(vcpu);
+
+               return exit_handler(vcpu, run);
+       default:
+               kvm_pr_unimpl("Unsupported exception type: %d",
+                             exception_index);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               return 0;
+       }
+}
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
new file mode 100644 (file)
index 0000000..d968796
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+       .text
+       .pushsection    .hyp.idmap.text, "ax"
+
+       .align  11
+
+ENTRY(__kvm_hyp_init)
+       ventry  __invalid               // Synchronous EL2t
+       ventry  __invalid               // IRQ EL2t
+       ventry  __invalid               // FIQ EL2t
+       ventry  __invalid               // Error EL2t
+
+       ventry  __invalid               // Synchronous EL2h
+       ventry  __invalid               // IRQ EL2h
+       ventry  __invalid               // FIQ EL2h
+       ventry  __invalid               // Error EL2h
+
+       ventry  __do_hyp_init           // Synchronous 64-bit EL1
+       ventry  __invalid               // IRQ 64-bit EL1
+       ventry  __invalid               // FIQ 64-bit EL1
+       ventry  __invalid               // Error 64-bit EL1
+
+       ventry  __invalid               // Synchronous 32-bit EL1
+       ventry  __invalid               // IRQ 32-bit EL1
+       ventry  __invalid               // FIQ 32-bit EL1
+       ventry  __invalid               // Error 32-bit EL1
+
+__invalid:
+       b       .
+
+       /*
+        * x0: HYP boot pgd
+        * x1: HYP pgd
+        * x2: HYP stack
+        * x3: HYP vectors
+        */
+__do_hyp_init:
+
+       msr     ttbr0_el2, x0
+
+       mrs     x4, tcr_el1
+       ldr     x5, =TCR_EL2_MASK
+       and     x4, x4, x5
+       ldr     x5, =TCR_EL2_FLAGS
+       orr     x4, x4, x5
+       msr     tcr_el2, x4
+
+       ldr     x4, =VTCR_EL2_FLAGS
+       /*
+        * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
+        * VTCR_EL2.
+        */
+       mrs     x5, ID_AA64MMFR0_EL1
+       bfi     x4, x5, #16, #3
+       msr     vtcr_el2, x4
+
+       mrs     x4, mair_el1
+       msr     mair_el2, x4
+       isb
+
+       mrs     x4, sctlr_el2
+       and     x4, x4, #SCTLR_EL2_EE   // preserve endianness of EL2
+       ldr     x5, =SCTLR_EL2_FLAGS
+       orr     x4, x4, x5
+       msr     sctlr_el2, x4
+       isb
+
+       /* MMU is now enabled. Get ready for the trampoline dance */
+       ldr     x4, =TRAMPOLINE_VA
+       adr     x5, target
+       bfi     x4, x5, #0, #PAGE_SHIFT
+       br      x4
+
+target: /* We're now in the trampoline code, switch page tables */
+       msr     ttbr0_el2, x1
+       isb
+
+       /* Invalidate the old TLBs */
+       tlbi    alle2
+       dsb     sy
+
+       /* Set the stack and new vectors */
+       kern_hyp_va     x2
+       mov     sp, x2
+       kern_hyp_va     x3
+       msr     vbar_el2, x3
+
+       /* Hello, World! */
+       eret
+ENDPROC(__kvm_hyp_init)
+
+       .ltorg
+
+       .popsection
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
new file mode 100644 (file)
index 0000000..b72aa9f
--- /dev/null
@@ -0,0 +1,1274 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/debug-monitors.h>
+#include <asm/fpsimdmacros.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#define CPU_GP_REG_OFFSET(x)   (CPU_GP_REGS + x)
+#define CPU_XREG_OFFSET(x)     CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
+#define CPU_SPSR_OFFSET(x)     CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
+#define CPU_SYSREG_OFFSET(x)   (CPU_SYSREGS + 8*x)
+
+       .text
+       .pushsection    .hyp.text, "ax"
+       .align  PAGE_SHIFT
+
+.macro save_common_regs
+       // x2: base address for cpu context
+       // x3: tmp register
+
+       add     x3, x2, #CPU_XREG_OFFSET(19)
+       stp     x19, x20, [x3]
+       stp     x21, x22, [x3, #16]
+       stp     x23, x24, [x3, #32]
+       stp     x25, x26, [x3, #48]
+       stp     x27, x28, [x3, #64]
+       stp     x29, lr, [x3, #80]
+
+       mrs     x19, sp_el0
+       mrs     x20, elr_el2            // EL1 PC
+       mrs     x21, spsr_el2           // EL1 pstate
+
+       stp     x19, x20, [x3, #96]
+       str     x21, [x3, #112]
+
+       mrs     x22, sp_el1
+       mrs     x23, elr_el1
+       mrs     x24, spsr_el1
+
+       str     x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
+       str     x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
+       str     x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
+.endm
+
+.macro restore_common_regs
+       // x2: base address for cpu context
+       // x3: tmp register
+
+       ldr     x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
+       ldr     x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
+       ldr     x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
+
+       msr     sp_el1, x22
+       msr     elr_el1, x23
+       msr     spsr_el1, x24
+
+       add     x3, x2, #CPU_XREG_OFFSET(31)    // SP_EL0
+       ldp     x19, x20, [x3]
+       ldr     x21, [x3, #16]
+
+       msr     sp_el0, x19
+       msr     elr_el2, x20                            // EL1 PC
+       msr     spsr_el2, x21                           // EL1 pstate
+
+       add     x3, x2, #CPU_XREG_OFFSET(19)
+       ldp     x19, x20, [x3]
+       ldp     x21, x22, [x3, #16]
+       ldp     x23, x24, [x3, #32]
+       ldp     x25, x26, [x3, #48]
+       ldp     x27, x28, [x3, #64]
+       ldp     x29, lr, [x3, #80]
+.endm
+
+.macro save_host_regs
+       save_common_regs
+.endm
+
+.macro restore_host_regs
+       restore_common_regs
+.endm
+
+.macro save_fpsimd
+       // x2: cpu context address
+       // x3, x4: tmp regs
+       add     x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+       fpsimd_save x3, 4
+.endm
+
+.macro restore_fpsimd
+       // x2: cpu context address
+       // x3, x4: tmp regs
+       add     x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+       fpsimd_restore x3, 4
+.endm
+
+.macro save_guest_regs
+       // x0 is the vcpu address
+       // x1 is the return code, do not corrupt!
+       // x2 is the cpu context
+       // x3 is a tmp register
+       // Guest's x0-x3 are on the stack
+
+       // Compute base to save registers
+       add     x3, x2, #CPU_XREG_OFFSET(4)
+       stp     x4, x5, [x3]
+       stp     x6, x7, [x3, #16]
+       stp     x8, x9, [x3, #32]
+       stp     x10, x11, [x3, #48]
+       stp     x12, x13, [x3, #64]
+       stp     x14, x15, [x3, #80]
+       stp     x16, x17, [x3, #96]
+       str     x18, [x3, #112]
+
+       pop     x6, x7                  // x2, x3
+       pop     x4, x5                  // x0, x1
+
+       add     x3, x2, #CPU_XREG_OFFSET(0)
+       stp     x4, x5, [x3]
+       stp     x6, x7, [x3, #16]
+
+       save_common_regs
+.endm
+
+.macro restore_guest_regs
+       // x0 is the vcpu address.
+       // x2 is the cpu context
+       // x3 is a tmp register
+
+       // Prepare x0-x3 for later restore
+       add     x3, x2, #CPU_XREG_OFFSET(0)
+       ldp     x4, x5, [x3]
+       ldp     x6, x7, [x3, #16]
+       push    x4, x5          // Push x0-x3 on the stack
+       push    x6, x7
+
+       // x4-x18
+       ldp     x4, x5, [x3, #32]
+       ldp     x6, x7, [x3, #48]
+       ldp     x8, x9, [x3, #64]
+       ldp     x10, x11, [x3, #80]
+       ldp     x12, x13, [x3, #96]
+       ldp     x14, x15, [x3, #112]
+       ldp     x16, x17, [x3, #128]
+       ldr     x18, [x3, #144]
+
+       // x19-x29, lr, sp*, elr*, spsr*
+       restore_common_regs
+
+       // Last bits of the 64bit state
+       pop     x2, x3
+       pop     x0, x1
+
+       // Do not touch any register after this!
+.endm
+
+/*
+ * Macros to perform system register save/restore.
+ *
+ * Ordering here is absolutely critical, and must be kept consistent
+ * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
+ * and in kvm_asm.h.
+ *
+ * In other words, don't touch any of these unless you know what
+ * you are doing.
+ */
+.macro save_sysregs
+       // x2: base address for cpu context
+       // x3: tmp register
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
+
+       mrs     x4,     vmpidr_el2
+       mrs     x5,     csselr_el1
+       mrs     x6,     sctlr_el1
+       mrs     x7,     actlr_el1
+       mrs     x8,     cpacr_el1
+       mrs     x9,     ttbr0_el1
+       mrs     x10,    ttbr1_el1
+       mrs     x11,    tcr_el1
+       mrs     x12,    esr_el1
+       mrs     x13,    afsr0_el1
+       mrs     x14,    afsr1_el1
+       mrs     x15,    far_el1
+       mrs     x16,    mair_el1
+       mrs     x17,    vbar_el1
+       mrs     x18,    contextidr_el1
+       mrs     x19,    tpidr_el0
+       mrs     x20,    tpidrro_el0
+       mrs     x21,    tpidr_el1
+       mrs     x22,    amair_el1
+       mrs     x23,    cntkctl_el1
+       mrs     x24,    par_el1
+       mrs     x25,    mdscr_el1
+
+       stp     x4, x5, [x3]
+       stp     x6, x7, [x3, #16]
+       stp     x8, x9, [x3, #32]
+       stp     x10, x11, [x3, #48]
+       stp     x12, x13, [x3, #64]
+       stp     x14, x15, [x3, #80]
+       stp     x16, x17, [x3, #96]
+       stp     x18, x19, [x3, #112]
+       stp     x20, x21, [x3, #128]
+       stp     x22, x23, [x3, #144]
+       stp     x24, x25, [x3, #160]
+.endm
+
+.macro save_debug
+       // x2: base address for cpu context
+       // x3: tmp register
+
+       mrs     x26, id_aa64dfr0_el1
+       ubfx    x24, x26, #12, #4       // Extract BRPs
+       ubfx    x25, x26, #20, #4       // Extract WRPs
+       mov     w26, #15
+       sub     w24, w26, w24           // How many BPs to skip
+       sub     w25, w26, w25           // How many WPs to skip
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+
+       adr     x26, 1f
+       add     x26, x26, x24, lsl #2
+       br      x26
+1:
+       mrs     x20, dbgbcr15_el1
+       mrs     x19, dbgbcr14_el1
+       mrs     x18, dbgbcr13_el1
+       mrs     x17, dbgbcr12_el1
+       mrs     x16, dbgbcr11_el1
+       mrs     x15, dbgbcr10_el1
+       mrs     x14, dbgbcr9_el1
+       mrs     x13, dbgbcr8_el1
+       mrs     x12, dbgbcr7_el1
+       mrs     x11, dbgbcr6_el1
+       mrs     x10, dbgbcr5_el1
+       mrs     x9, dbgbcr4_el1
+       mrs     x8, dbgbcr3_el1
+       mrs     x7, dbgbcr2_el1
+       mrs     x6, dbgbcr1_el1
+       mrs     x5, dbgbcr0_el1
+
+       adr     x26, 1f
+       add     x26, x26, x24, lsl #2
+       br      x26
+
+1:
+       str     x20, [x3, #(15 * 8)]
+       str     x19, [x3, #(14 * 8)]
+       str     x18, [x3, #(13 * 8)]
+       str     x17, [x3, #(12 * 8)]
+       str     x16, [x3, #(11 * 8)]
+       str     x15, [x3, #(10 * 8)]
+       str     x14, [x3, #(9 * 8)]
+       str     x13, [x3, #(8 * 8)]
+       str     x12, [x3, #(7 * 8)]
+       str     x11, [x3, #(6 * 8)]
+       str     x10, [x3, #(5 * 8)]
+       str     x9, [x3, #(4 * 8)]
+       str     x8, [x3, #(3 * 8)]
+       str     x7, [x3, #(2 * 8)]
+       str     x6, [x3, #(1 * 8)]
+       str     x5, [x3, #(0 * 8)]
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
+
+       adr     x26, 1f
+       add     x26, x26, x24, lsl #2
+       br      x26
+1:
+       mrs     x20, dbgbvr15_el1
+       mrs     x19, dbgbvr14_el1
+       mrs     x18, dbgbvr13_el1
+       mrs     x17, dbgbvr12_el1
+       mrs     x16, dbgbvr11_el1
+       mrs     x15, dbgbvr10_el1
+       mrs     x14, dbgbvr9_el1
+       mrs     x13, dbgbvr8_el1
+       mrs     x12, dbgbvr7_el1
+       mrs     x11, dbgbvr6_el1
+       mrs     x10, dbgbvr5_el1
+       mrs     x9, dbgbvr4_el1
+       mrs     x8, dbgbvr3_el1
+       mrs     x7, dbgbvr2_el1
+       mrs     x6, dbgbvr1_el1
+       mrs     x5, dbgbvr0_el1
+
+       adr     x26, 1f
+       add     x26, x26, x24, lsl #2
+       br      x26
+
+1:
+       str     x20, [x3, #(15 * 8)]
+       str     x19, [x3, #(14 * 8)]
+       str     x18, [x3, #(13 * 8)]
+       str     x17, [x3, #(12 * 8)]
+       str     x16, [x3, #(11 * 8)]
+       str     x15, [x3, #(10 * 8)]
+       str     x14, [x3, #(9 * 8)]
+       str     x13, [x3, #(8 * 8)]
+       str     x12, [x3, #(7 * 8)]
+       str     x11, [x3, #(6 * 8)]
+       str     x10, [x3, #(5 * 8)]
+       str     x9, [x3, #(4 * 8)]
+       str     x8, [x3, #(3 * 8)]
+       str     x7, [x3, #(2 * 8)]
+       str     x6, [x3, #(1 * 8)]
+       str     x5, [x3, #(0 * 8)]
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
+
+       adr     x26, 1f
+       add     x26, x26, x25, lsl #2
+       br      x26
+1:
+       mrs     x20, dbgwcr15_el1
+       mrs     x19, dbgwcr14_el1
+       mrs     x18, dbgwcr13_el1
+       mrs     x17, dbgwcr12_el1
+       mrs     x16, dbgwcr11_el1
+       mrs     x15, dbgwcr10_el1
+       mrs     x14, dbgwcr9_el1
+       mrs     x13, dbgwcr8_el1
+       mrs     x12, dbgwcr7_el1
+       mrs     x11, dbgwcr6_el1
+       mrs     x10, dbgwcr5_el1
+       mrs     x9, dbgwcr4_el1
+       mrs     x8, dbgwcr3_el1
+       mrs     x7, dbgwcr2_el1
+       mrs     x6, dbgwcr1_el1
+       mrs     x5, dbgwcr0_el1
+
+       adr     x26, 1f
+       add     x26, x26, x25, lsl #2
+       br      x26
+
+1:
+       str     x20, [x3, #(15 * 8)]
+       str     x19, [x3, #(14 * 8)]
+       str     x18, [x3, #(13 * 8)]
+       str     x17, [x3, #(12 * 8)]
+       str     x16, [x3, #(11 * 8)]
+       str     x15, [x3, #(10 * 8)]
+       str     x14, [x3, #(9 * 8)]
+       str     x13, [x3, #(8 * 8)]
+       str     x12, [x3, #(7 * 8)]
+       str     x11, [x3, #(6 * 8)]
+       str     x10, [x3, #(5 * 8)]
+       str     x9, [x3, #(4 * 8)]
+       str     x8, [x3, #(3 * 8)]
+       str     x7, [x3, #(2 * 8)]
+       str     x6, [x3, #(1 * 8)]
+       str     x5, [x3, #(0 * 8)]
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
+
+       adr     x26, 1f
+       add     x26, x26, x25, lsl #2
+       br      x26
+1:
+       mrs     x20, dbgwvr15_el1
+       mrs     x19, dbgwvr14_el1
+       mrs     x18, dbgwvr13_el1
+       mrs     x17, dbgwvr12_el1
+       mrs     x16, dbgwvr11_el1
+       mrs     x15, dbgwvr10_el1
+       mrs     x14, dbgwvr9_el1
+       mrs     x13, dbgwvr8_el1
+       mrs     x12, dbgwvr7_el1
+       mrs     x11, dbgwvr6_el1
+       mrs     x10, dbgwvr5_el1
+       mrs     x9, dbgwvr4_el1
+       mrs     x8, dbgwvr3_el1
+       mrs     x7, dbgwvr2_el1
+       mrs     x6, dbgwvr1_el1
+       mrs     x5, dbgwvr0_el1
+
+       adr     x26, 1f
+       add     x26, x26, x25, lsl #2
+       br      x26
+
+1:
+       str     x20, [x3, #(15 * 8)]
+       str     x19, [x3, #(14 * 8)]
+       str     x18, [x3, #(13 * 8)]
+       str     x17, [x3, #(12 * 8)]
+       str     x16, [x3, #(11 * 8)]
+       str     x15, [x3, #(10 * 8)]
+       str     x14, [x3, #(9 * 8)]
+       str     x13, [x3, #(8 * 8)]
+       str     x12, [x3, #(7 * 8)]
+       str     x11, [x3, #(6 * 8)]
+       str     x10, [x3, #(5 * 8)]
+       str     x9, [x3, #(4 * 8)]
+       str     x8, [x3, #(3 * 8)]
+       str     x7, [x3, #(2 * 8)]
+       str     x6, [x3, #(1 * 8)]
+       str     x5, [x3, #(0 * 8)]
+
+       mrs     x21, mdccint_el1
+       str     x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+.endm
+
+.macro restore_sysregs
+       // x2: base address for cpu context
+       // x3: tmp register
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
+
+       ldp     x4, x5, [x3]
+       ldp     x6, x7, [x3, #16]
+       ldp     x8, x9, [x3, #32]
+       ldp     x10, x11, [x3, #48]
+       ldp     x12, x13, [x3, #64]
+       ldp     x14, x15, [x3, #80]
+       ldp     x16, x17, [x3, #96]
+       ldp     x18, x19, [x3, #112]
+       ldp     x20, x21, [x3, #128]
+       ldp     x22, x23, [x3, #144]
+       ldp     x24, x25, [x3, #160]
+
+       msr     vmpidr_el2,     x4
+       msr     csselr_el1,     x5
+       msr     sctlr_el1,      x6
+       msr     actlr_el1,      x7
+       msr     cpacr_el1,      x8
+       msr     ttbr0_el1,      x9
+       msr     ttbr1_el1,      x10
+       msr     tcr_el1,        x11
+       msr     esr_el1,        x12
+       msr     afsr0_el1,      x13
+       msr     afsr1_el1,      x14
+       msr     far_el1,        x15
+       msr     mair_el1,       x16
+       msr     vbar_el1,       x17
+       msr     contextidr_el1, x18
+       msr     tpidr_el0,      x19
+       msr     tpidrro_el0,    x20
+       msr     tpidr_el1,      x21
+       msr     amair_el1,      x22
+       msr     cntkctl_el1,    x23
+       msr     par_el1,        x24
+       msr     mdscr_el1,      x25
+.endm
+
+.macro restore_debug
+       // x2: base address for cpu context
+       // x3: tmp register
+
+       mrs     x26, id_aa64dfr0_el1
+       ubfx    x24, x26, #12, #4       // Extract BRPs
+       ubfx    x25, x26, #20, #4       // Extract WRPs
+       mov     w26, #15
+       sub     w24, w26, w24           // How many BPs to skip
+       sub     w25, w26, w25           // How many WPs to skip
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+
+       adr     x26, 1f
+       add     x26, x26, x24, lsl #2
+       br      x26
+1:
+       ldr     x20, [x3, #(15 * 8)]
+       ldr     x19, [x3, #(14 * 8)]
+       ldr     x18, [x3, #(13 * 8)]
+       ldr     x17, [x3, #(12 * 8)]
+       ldr     x16, [x3, #(11 * 8)]
+       ldr     x15, [x3, #(10 * 8)]
+       ldr     x14, [x3, #(9 * 8)]
+       ldr     x13, [x3, #(8 * 8)]
+       ldr     x12, [x3, #(7 * 8)]
+       ldr     x11, [x3, #(6 * 8)]
+       ldr     x10, [x3, #(5 * 8)]
+       ldr     x9, [x3, #(4 * 8)]
+       ldr     x8, [x3, #(3 * 8)]
+       ldr     x7, [x3, #(2 * 8)]
+       ldr     x6, [x3, #(1 * 8)]
+       ldr     x5, [x3, #(0 * 8)]
+
+       adr     x26, 1f
+       add     x26, x26, x24, lsl #2
+       br      x26
+1:
+       msr     dbgbcr15_el1, x20
+       msr     dbgbcr14_el1, x19
+       msr     dbgbcr13_el1, x18
+       msr     dbgbcr12_el1, x17
+       msr     dbgbcr11_el1, x16
+       msr     dbgbcr10_el1, x15
+       msr     dbgbcr9_el1, x14
+       msr     dbgbcr8_el1, x13
+       msr     dbgbcr7_el1, x12
+       msr     dbgbcr6_el1, x11
+       msr     dbgbcr5_el1, x10
+       msr     dbgbcr4_el1, x9
+       msr     dbgbcr3_el1, x8
+       msr     dbgbcr2_el1, x7
+       msr     dbgbcr1_el1, x6
+       msr     dbgbcr0_el1, x5
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
+
+       adr     x26, 1f
+       add     x26, x26, x24, lsl #2
+       br      x26
+1:
+       ldr     x20, [x3, #(15 * 8)]
+       ldr     x19, [x3, #(14 * 8)]
+       ldr     x18, [x3, #(13 * 8)]
+       ldr     x17, [x3, #(12 * 8)]
+       ldr     x16, [x3, #(11 * 8)]
+       ldr     x15, [x3, #(10 * 8)]
+       ldr     x14, [x3, #(9 * 8)]
+       ldr     x13, [x3, #(8 * 8)]
+       ldr     x12, [x3, #(7 * 8)]
+       ldr     x11, [x3, #(6 * 8)]
+       ldr     x10, [x3, #(5 * 8)]
+       ldr     x9, [x3, #(4 * 8)]
+       ldr     x8, [x3, #(3 * 8)]
+       ldr     x7, [x3, #(2 * 8)]
+       ldr     x6, [x3, #(1 * 8)]
+       ldr     x5, [x3, #(0 * 8)]
+
+       adr     x26, 1f
+       add     x26, x26, x24, lsl #2
+       br      x26
+1:
+       msr     dbgbvr15_el1, x20
+       msr     dbgbvr14_el1, x19
+       msr     dbgbvr13_el1, x18
+       msr     dbgbvr12_el1, x17
+       msr     dbgbvr11_el1, x16
+       msr     dbgbvr10_el1, x15
+       msr     dbgbvr9_el1, x14
+       msr     dbgbvr8_el1, x13
+       msr     dbgbvr7_el1, x12
+       msr     dbgbvr6_el1, x11
+       msr     dbgbvr5_el1, x10
+       msr     dbgbvr4_el1, x9
+       msr     dbgbvr3_el1, x8
+       msr     dbgbvr2_el1, x7
+       msr     dbgbvr1_el1, x6
+       msr     dbgbvr0_el1, x5
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
+
+       adr     x26, 1f
+       add     x26, x26, x25, lsl #2
+       br      x26
+1:
+       ldr     x20, [x3, #(15 * 8)]
+       ldr     x19, [x3, #(14 * 8)]
+       ldr     x18, [x3, #(13 * 8)]
+       ldr     x17, [x3, #(12 * 8)]
+       ldr     x16, [x3, #(11 * 8)]
+       ldr     x15, [x3, #(10 * 8)]
+       ldr     x14, [x3, #(9 * 8)]
+       ldr     x13, [x3, #(8 * 8)]
+       ldr     x12, [x3, #(7 * 8)]
+       ldr     x11, [x3, #(6 * 8)]
+       ldr     x10, [x3, #(5 * 8)]
+       ldr     x9, [x3, #(4 * 8)]
+       ldr     x8, [x3, #(3 * 8)]
+       ldr     x7, [x3, #(2 * 8)]
+       ldr     x6, [x3, #(1 * 8)]
+       ldr     x5, [x3, #(0 * 8)]
+
+       adr     x26, 1f
+       add     x26, x26, x25, lsl #2
+       br      x26
+1:
+       msr     dbgwcr15_el1, x20
+       msr     dbgwcr14_el1, x19
+       msr     dbgwcr13_el1, x18
+       msr     dbgwcr12_el1, x17
+       msr     dbgwcr11_el1, x16
+       msr     dbgwcr10_el1, x15
+       msr     dbgwcr9_el1, x14
+       msr     dbgwcr8_el1, x13
+       msr     dbgwcr7_el1, x12
+       msr     dbgwcr6_el1, x11
+       msr     dbgwcr5_el1, x10
+       msr     dbgwcr4_el1, x9
+       msr     dbgwcr3_el1, x8
+       msr     dbgwcr2_el1, x7
+       msr     dbgwcr1_el1, x6
+       msr     dbgwcr0_el1, x5
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
+
+       adr     x26, 1f
+       add     x26, x26, x25, lsl #2
+       br      x26
+1:
+       ldr     x20, [x3, #(15 * 8)]
+       ldr     x19, [x3, #(14 * 8)]
+       ldr     x18, [x3, #(13 * 8)]
+       ldr     x17, [x3, #(12 * 8)]
+       ldr     x16, [x3, #(11 * 8)]
+       ldr     x15, [x3, #(10 * 8)]
+       ldr     x14, [x3, #(9 * 8)]
+       ldr     x13, [x3, #(8 * 8)]
+       ldr     x12, [x3, #(7 * 8)]
+       ldr     x11, [x3, #(6 * 8)]
+       ldr     x10, [x3, #(5 * 8)]
+       ldr     x9, [x3, #(4 * 8)]
+       ldr     x8, [x3, #(3 * 8)]
+       ldr     x7, [x3, #(2 * 8)]
+       ldr     x6, [x3, #(1 * 8)]
+       ldr     x5, [x3, #(0 * 8)]
+
+       adr     x26, 1f
+       add     x26, x26, x25, lsl #2
+       br      x26
+1:
+       msr     dbgwvr15_el1, x20
+       msr     dbgwvr14_el1, x19
+       msr     dbgwvr13_el1, x18
+       msr     dbgwvr12_el1, x17
+       msr     dbgwvr11_el1, x16
+       msr     dbgwvr10_el1, x15
+       msr     dbgwvr9_el1, x14
+       msr     dbgwvr8_el1, x13
+       msr     dbgwvr7_el1, x12
+       msr     dbgwvr6_el1, x11
+       msr     dbgwvr5_el1, x10
+       msr     dbgwvr4_el1, x9
+       msr     dbgwvr3_el1, x8
+       msr     dbgwvr2_el1, x7
+       msr     dbgwvr1_el1, x6
+       msr     dbgwvr0_el1, x5
+
+       ldr     x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+       msr     mdccint_el1, x21
+.endm
+
+.macro skip_32bit_state tmp, target
+       // Skip 32bit state if not needed
+       mrs     \tmp, hcr_el2
+       tbnz    \tmp, #HCR_RW_SHIFT, \target
+.endm
+
+.macro skip_tee_state tmp, target
+       // Skip ThumbEE state if not needed
+       mrs     \tmp, id_pfr0_el1
+       tbz     \tmp, #12, \target
+.endm
+
+.macro skip_debug_state tmp, target
+       ldr     \tmp, [x0, #VCPU_DEBUG_FLAGS]
+       tbz     \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
+.endm
+
+.macro compute_debug_state target
+       // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
+       // is set, we do a full save/restore cycle and disable trapping.
+       add     x25, x0, #VCPU_CONTEXT
+
+       // Check the state of MDSCR_EL1
+       ldr     x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
+       and     x26, x25, #DBG_MDSCR_KDE
+       and     x25, x25, #DBG_MDSCR_MDE
+       adds    xzr, x25, x26
+       b.eq    9998f           // Nothing to see there
+
+       // If any interesting bits was set, we must set the flag
+       mov     x26, #KVM_ARM64_DEBUG_DIRTY
+       str     x26, [x0, #VCPU_DEBUG_FLAGS]
+       b       9999f           // Don't skip restore
+
+9998:
+       // Otherwise load the flags from memory in case we recently
+       // trapped
+       skip_debug_state x25, \target
+9999:
+.endm
+
+.macro save_guest_32bit_state
+       skip_32bit_state x3, 1f
+
+       add     x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
+       mrs     x4, spsr_abt
+       mrs     x5, spsr_und
+       mrs     x6, spsr_irq
+       mrs     x7, spsr_fiq
+       stp     x4, x5, [x3]
+       stp     x6, x7, [x3, #16]
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
+       mrs     x4, dacr32_el2
+       mrs     x5, ifsr32_el2
+       mrs     x6, fpexc32_el2
+       stp     x4, x5, [x3]
+       str     x6, [x3, #16]
+
+       skip_debug_state x8, 2f
+       mrs     x7, dbgvcr32_el2
+       str     x7, [x3, #24]
+2:
+       skip_tee_state x8, 1f
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
+       mrs     x4, teecr32_el1
+       mrs     x5, teehbr32_el1
+       stp     x4, x5, [x3]
+1:
+.endm
+
+.macro restore_guest_32bit_state
+       skip_32bit_state x3, 1f
+
+       add     x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
+       ldp     x4, x5, [x3]
+       ldp     x6, x7, [x3, #16]
+       msr     spsr_abt, x4
+       msr     spsr_und, x5
+       msr     spsr_irq, x6
+       msr     spsr_fiq, x7
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
+       ldp     x4, x5, [x3]
+       ldr     x6, [x3, #16]
+       msr     dacr32_el2, x4
+       msr     ifsr32_el2, x5
+       msr     fpexc32_el2, x6
+
+       skip_debug_state x8, 2f
+       ldr     x7, [x3, #24]
+       msr     dbgvcr32_el2, x7
+2:
+       skip_tee_state x8, 1f
+
+       add     x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
+       ldp     x4, x5, [x3]
+       msr     teecr32_el1, x4
+       msr     teehbr32_el1, x5
+1:
+.endm
+
+.macro activate_traps
+       ldr     x2, [x0, #VCPU_HCR_EL2]
+       msr     hcr_el2, x2
+       ldr     x2, =(CPTR_EL2_TTA)
+       msr     cptr_el2, x2
+
+       ldr     x2, =(1 << 15)  // Trap CP15 Cr=15
+       msr     hstr_el2, x2
+
+       mrs     x2, mdcr_el2
+       and     x2, x2, #MDCR_EL2_HPMN_MASK
+       orr     x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
+       orr     x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
+
+       // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
+       // if not dirty.
+       ldr     x3, [x0, #VCPU_DEBUG_FLAGS]
+       tbnz    x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
+       orr     x2, x2,  #MDCR_EL2_TDA
+1:
+       msr     mdcr_el2, x2
+.endm
+
+.macro deactivate_traps
+       mov     x2, #HCR_RW
+       msr     hcr_el2, x2
+       msr     cptr_el2, xzr
+       msr     hstr_el2, xzr
+
+       mrs     x2, mdcr_el2
+       and     x2, x2, #MDCR_EL2_HPMN_MASK
+       msr     mdcr_el2, x2
+.endm
+
+.macro activate_vm
+       ldr     x1, [x0, #VCPU_KVM]
+       kern_hyp_va     x1
+       ldr     x2, [x1, #KVM_VTTBR]
+       msr     vttbr_el2, x2
+.endm
+
+.macro deactivate_vm
+       msr     vttbr_el2, xzr
+.endm
+
+/*
+ * Call into the vgic backend for state saving
+ */
+.macro save_vgic_state
+       adr     x24, __vgic_sr_vectors
+       ldr     x24, [x24, VGIC_SAVE_FN]
+       kern_hyp_va     x24
+       blr     x24
+       mrs     x24, hcr_el2
+       mov     x25, #HCR_INT_OVERRIDE
+       neg     x25, x25
+       and     x24, x24, x25
+       msr     hcr_el2, x24
+.endm
+
+/*
+ * Call into the vgic backend for state restoring
+ */
+.macro restore_vgic_state
+       mrs     x24, hcr_el2
+       ldr     x25, [x0, #VCPU_IRQ_LINES]
+       orr     x24, x24, #HCR_INT_OVERRIDE
+       orr     x24, x24, x25
+       msr     hcr_el2, x24
+       adr     x24, __vgic_sr_vectors
+       ldr     x24, [x24, #VGIC_RESTORE_FN]
+       kern_hyp_va     x24
+       blr     x24
+.endm
+
+.macro save_timer_state
+       // x0: vcpu pointer
+       ldr     x2, [x0, #VCPU_KVM]
+       kern_hyp_va x2
+       ldr     w3, [x2, #KVM_TIMER_ENABLED]
+       cbz     w3, 1f
+
+       mrs     x3, cntv_ctl_el0
+       and     x3, x3, #3
+       str     w3, [x0, #VCPU_TIMER_CNTV_CTL]
+       bic     x3, x3, #1              // Clear Enable
+       msr     cntv_ctl_el0, x3
+
+       isb
+
+       mrs     x3, cntv_cval_el0
+       str     x3, [x0, #VCPU_TIMER_CNTV_CVAL]
+
+1:
+       // Allow physical timer/counter access for the host
+       mrs     x2, cnthctl_el2
+       orr     x2, x2, #3
+       msr     cnthctl_el2, x2
+
+       // Clear cntvoff for the host
+       msr     cntvoff_el2, xzr
+.endm
+
+.macro restore_timer_state
+       // x0: vcpu pointer
+       // Disallow physical timer access for the guest
+       // Physical counter access is allowed
+       mrs     x2, cnthctl_el2
+       orr     x2, x2, #1
+       bic     x2, x2, #2
+       msr     cnthctl_el2, x2
+
+       ldr     x2, [x0, #VCPU_KVM]
+       kern_hyp_va x2
+       ldr     w3, [x2, #KVM_TIMER_ENABLED]
+       cbz     w3, 1f
+
+       ldr     x3, [x2, #KVM_TIMER_CNTVOFF]
+       msr     cntvoff_el2, x3
+       ldr     x2, [x0, #VCPU_TIMER_CNTV_CVAL]
+       msr     cntv_cval_el0, x2
+       isb
+
+       ldr     w2, [x0, #VCPU_TIMER_CNTV_CTL]
+       and     x2, x2, #3
+       msr     cntv_ctl_el0, x2
+1:
+.endm
+
+__save_sysregs:
+       save_sysregs
+       ret
+
+__restore_sysregs:
+       restore_sysregs
+       ret
+
+__save_debug:
+       save_debug
+       ret
+
+__restore_debug:
+       restore_debug
+       ret
+
+__save_fpsimd:
+       save_fpsimd
+       ret
+
+__restore_fpsimd:
+       restore_fpsimd
+       ret
+
+/*
+ * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+ *
+ * This is the world switch. The first half of the function
+ * deals with entering the guest, and anything from __kvm_vcpu_return
+ * to the end of the function deals with reentering the host.
+ * On the enter path, only x0 (vcpu pointer) must be preserved until
+ * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
+ * code) must both be preserved until the epilogue.
+ * In both cases, x2 points to the CPU context we're saving/restoring from/to.
+ */
+ENTRY(__kvm_vcpu_run)
+       kern_hyp_va     x0
+       msr     tpidr_el2, x0   // Save the vcpu register
+
+       // Host context
+       ldr     x2, [x0, #VCPU_HOST_CONTEXT]
+       kern_hyp_va x2
+
+       save_host_regs
+       bl __save_fpsimd
+       bl __save_sysregs
+
+       compute_debug_state 1f
+       bl      __save_debug
+1:
+       activate_traps
+       activate_vm
+
+       restore_vgic_state
+       restore_timer_state
+
+       // Guest context
+       add     x2, x0, #VCPU_CONTEXT
+
+       bl __restore_sysregs
+       bl __restore_fpsimd
+
+       skip_debug_state x3, 1f
+       bl      __restore_debug
+1:
+       restore_guest_32bit_state
+       restore_guest_regs
+
+       // That's it, no more messing around.
+       eret
+
+__kvm_vcpu_return:
+       // Assume x0 is the vcpu pointer, x1 the return code
+       // Guest's x0-x3 are on the stack
+
+       // Guest context
+       add     x2, x0, #VCPU_CONTEXT
+
+       save_guest_regs
+       bl __save_fpsimd
+       bl __save_sysregs
+
+       skip_debug_state x3, 1f
+       bl      __save_debug
+1:
+       save_guest_32bit_state
+
+       save_timer_state
+       save_vgic_state
+
+       deactivate_traps
+       deactivate_vm
+
+       // Host context
+       ldr     x2, [x0, #VCPU_HOST_CONTEXT]
+       kern_hyp_va x2
+
+       bl __restore_sysregs
+       bl __restore_fpsimd
+
+       skip_debug_state x3, 1f
+       // Clear the dirty flag for the next run, as all the state has
+       // already been saved. Note that we nuke the whole 64bit word.
+       // If we ever add more flags, we'll have to be more careful...
+       str     xzr, [x0, #VCPU_DEBUG_FLAGS]
+       bl      __restore_debug
+1:
+       restore_host_regs
+
+       mov     x0, x1
+       ret
+END(__kvm_vcpu_run)
+
+// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+ENTRY(__kvm_tlb_flush_vmid_ipa)
+       dsb     ishst
+
+       kern_hyp_va     x0
+       ldr     x2, [x0, #KVM_VTTBR]
+       msr     vttbr_el2, x2
+       isb
+
+       /*
+        * We could do so much better if we had the VA as well.
+        * Instead, we invalidate Stage-2 for this IPA, and the
+        * whole of Stage-1. Weep...
+        */
+       tlbi    ipas2e1is, x1
+       /*
+        * We have to ensure completion of the invalidation at Stage-2,
+        * since a table walk on another CPU could refill a TLB with a
+        * complete (S1 + S2) walk based on the old Stage-2 mapping if
+        * the Stage-1 invalidation happened first.
+        */
+       dsb     ish
+       tlbi    vmalle1is
+       dsb     ish
+       isb
+
+       msr     vttbr_el2, xzr
+       ret
+ENDPROC(__kvm_tlb_flush_vmid_ipa)
+
+ENTRY(__kvm_flush_vm_context)
+       dsb     ishst
+       tlbi    alle1is
+       ic      ialluis
+       dsb     ish
+       ret
+ENDPROC(__kvm_flush_vm_context)
+
+       // struct vgic_sr_vectors __vgi_sr_vectors;
+       .align 3
+ENTRY(__vgic_sr_vectors)
+       .skip   VGIC_SR_VECTOR_SZ
+ENDPROC(__vgic_sr_vectors)
+
+__kvm_hyp_panic:
+       // Guess the context by looking at VTTBR:
+       // If zero, then we're already a host.
+       // Otherwise restore a minimal host context before panicing.
+       mrs     x0, vttbr_el2
+       cbz     x0, 1f
+
+       mrs     x0, tpidr_el2
+
+       deactivate_traps
+       deactivate_vm
+
+       ldr     x2, [x0, #VCPU_HOST_CONTEXT]
+       kern_hyp_va x2
+
+       bl __restore_sysregs
+
+1:     adr     x0, __hyp_panic_str
+       adr     x1, 2f
+       ldp     x2, x3, [x1]
+       sub     x0, x0, x2
+       add     x0, x0, x3
+       mrs     x1, spsr_el2
+       mrs     x2, elr_el2
+       mrs     x3, esr_el2
+       mrs     x4, far_el2
+       mrs     x5, hpfar_el2
+       mrs     x6, par_el1
+       mrs     x7, tpidr_el2
+
+       mov     lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
+                     PSR_MODE_EL1h)
+       msr     spsr_el2, lr
+       ldr     lr, =panic
+       msr     elr_el2, lr
+       eret
+
+       .align  3
+2:     .quad   HYP_PAGE_OFFSET
+       .quad   PAGE_OFFSET
+ENDPROC(__kvm_hyp_panic)
+
+__hyp_panic_str:
+       .ascii  "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
+
+       .align  2
+
+/*
+ * u64 kvm_call_hyp(void *hypfn, ...);
+ *
+ * This is not really a variadic function in the classic C-way and care must
+ * be taken when calling this to ensure parameters are passed in registers
+ * only, since the stack will change between the caller and the callee.
+ *
+ * Call the function with the first argument containing a pointer to the
+ * function you wish to call in Hyp mode, and subsequent arguments will be
+ * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
+ * function pointer can be passed).  The function being called must be mapped
+ * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
+ * passed in r0 and r1.
+ *
+ * A function pointer with a value of 0 has a special meaning, and is
+ * used to implement __hyp_get_vectors in the same way as in
+ * arch/arm64/kernel/hyp_stub.S.
+ */
+ENTRY(kvm_call_hyp)
+       hvc     #0
+       ret
+ENDPROC(kvm_call_hyp)
+
+.macro invalid_vector  label, target
+       .align  2
+\label:
+       b \target
+ENDPROC(\label)
+.endm
+
+       /* None of these should ever happen */
+       invalid_vector  el2t_sync_invalid, __kvm_hyp_panic
+       invalid_vector  el2t_irq_invalid, __kvm_hyp_panic
+       invalid_vector  el2t_fiq_invalid, __kvm_hyp_panic
+       invalid_vector  el2t_error_invalid, __kvm_hyp_panic
+       invalid_vector  el2h_sync_invalid, __kvm_hyp_panic
+       invalid_vector  el2h_irq_invalid, __kvm_hyp_panic
+       invalid_vector  el2h_fiq_invalid, __kvm_hyp_panic
+       invalid_vector  el2h_error_invalid, __kvm_hyp_panic
+       invalid_vector  el1_sync_invalid, __kvm_hyp_panic
+       invalid_vector  el1_irq_invalid, __kvm_hyp_panic
+       invalid_vector  el1_fiq_invalid, __kvm_hyp_panic
+       invalid_vector  el1_error_invalid, __kvm_hyp_panic
+
+el1_sync:                                      // Guest trapped into EL2
+       push    x0, x1
+       push    x2, x3
+
+       mrs     x1, esr_el2
+       lsr     x2, x1, #ESR_EL2_EC_SHIFT
+
+       cmp     x2, #ESR_EL2_EC_HVC64
+       b.ne    el1_trap
+
+       mrs     x3, vttbr_el2                   // If vttbr is valid, the 64bit guest
+       cbnz    x3, el1_trap                    // called HVC
+
+       /* Here, we're pretty sure the host called HVC. */
+       pop     x2, x3
+       pop     x0, x1
+
+       /* Check for __hyp_get_vectors */
+       cbnz    x0, 1f
+       mrs     x0, vbar_el2
+       b       2f
+
+1:     push    lr, xzr
+
+       /*
+        * Compute the function address in EL2, and shuffle the parameters.
+        */
+       kern_hyp_va     x0
+       mov     lr, x0
+       mov     x0, x1
+       mov     x1, x2
+       mov     x2, x3
+       blr     lr
+
+       pop     lr, xzr
+2:     eret
+
+el1_trap:
+       /*
+        * x1: ESR
+        * x2: ESR_EC
+        */
+       cmp     x2, #ESR_EL2_EC_DABT
+       mov     x0, #ESR_EL2_EC_IABT
+       ccmp    x2, x0, #4, ne
+       b.ne    1f              // Not an abort we care about
+
+       /* This is an abort. Check for permission fault */
+       and     x2, x1, #ESR_EL2_FSC_TYPE
+       cmp     x2, #FSC_PERM
+       b.ne    1f              // Not a permission fault
+
+       /*
+        * Check for Stage-1 page table walk, which is guaranteed
+        * to give a valid HPFAR_EL2.
+        */
+       tbnz    x1, #7, 1f      // S1PTW is set
+
+       /* Preserve PAR_EL1 */
+       mrs     x3, par_el1
+       push    x3, xzr
+
+       /*
+        * Permission fault, HPFAR_EL2 is invalid.
+        * Resolve the IPA the hard way using the guest VA.
+        * Stage-1 translation already validated the memory access rights.
+        * As such, we can use the EL1 translation regime, and don't have
+        * to distinguish between EL0 and EL1 access.
+        */
+       mrs     x2, far_el2
+       at      s1e1r, x2
+       isb
+
+       /* Read result */
+       mrs     x3, par_el1
+       pop     x0, xzr                 // Restore PAR_EL1 from the stack
+       msr     par_el1, x0
+       tbnz    x3, #0, 3f              // Bail out if we failed the translation
+       ubfx    x3, x3, #12, #36        // Extract IPA
+       lsl     x3, x3, #4              // and present it like HPFAR
+       b       2f
+
+1:     mrs     x3, hpfar_el2
+       mrs     x2, far_el2
+
+2:     mrs     x0, tpidr_el2
+       str     w1, [x0, #VCPU_ESR_EL2]
+       str     x2, [x0, #VCPU_FAR_EL2]
+       str     x3, [x0, #VCPU_HPFAR_EL2]
+
+       mov     x1, #ARM_EXCEPTION_TRAP
+       b       __kvm_vcpu_return
+
+       /*
+        * Translation failed. Just return to the guest and
+        * let it fault again. Another CPU is probably playing
+        * behind our back.
+        */
+3:     pop     x2, x3
+       pop     x0, x1
+
+       eret
+
+el1_irq:
+       push    x0, x1
+       push    x2, x3
+       mrs     x0, tpidr_el2
+       mov     x1, #ARM_EXCEPTION_IRQ
+       b       __kvm_vcpu_return
+
+       .ltorg
+
+       .align 11
+
+ENTRY(__kvm_hyp_vector)
+       ventry  el2t_sync_invalid               // Synchronous EL2t
+       ventry  el2t_irq_invalid                // IRQ EL2t
+       ventry  el2t_fiq_invalid                // FIQ EL2t
+       ventry  el2t_error_invalid              // Error EL2t
+
+       ventry  el2h_sync_invalid               // Synchronous EL2h
+       ventry  el2h_irq_invalid                // IRQ EL2h
+       ventry  el2h_fiq_invalid                // FIQ EL2h
+       ventry  el2h_error_invalid              // Error EL2h
+
+       ventry  el1_sync                        // Synchronous 64-bit EL1
+       ventry  el1_irq                         // IRQ 64-bit EL1
+       ventry  el1_fiq_invalid                 // FIQ 64-bit EL1
+       ventry  el1_error_invalid               // Error 64-bit EL1
+
+       ventry  el1_sync                        // Synchronous 32-bit EL1
+       ventry  el1_irq                         // IRQ 32-bit EL1
+       ventry  el1_fiq_invalid                 // FIQ 32-bit EL1
+       ventry  el1_error_invalid               // Error 32-bit EL1
+ENDPROC(__kvm_hyp_vector)
+
+       .popsection
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
new file mode 100644 (file)
index 0000000..81a02a8
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Fault injection for both 32 and 64bit guests.
+ *
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Based on arch/arm/kvm/emulate.c
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/esr.h>
+
+#define PSTATE_FAULT_BITS_64   (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
+                                PSR_I_BIT | PSR_D_BIT)
+#define EL1_EXCEPT_SYNC_OFFSET 0x200
+
+static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+{
+       unsigned long cpsr;
+       unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
+       bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
+       u32 return_offset = (is_thumb) ? 4 : 0;
+       u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+
+       cpsr = mode | COMPAT_PSR_I_BIT;
+
+       if (sctlr & (1 << 30))
+               cpsr |= COMPAT_PSR_T_BIT;
+       if (sctlr & (1 << 25))
+               cpsr |= COMPAT_PSR_E_BIT;
+
+       *vcpu_cpsr(vcpu) = cpsr;
+
+       /* Note: These now point to the banked copies */
+       *vcpu_spsr(vcpu) = new_spsr_value;
+       *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+
+       /* Branch to exception vector */
+       if (sctlr & (1 << 13))
+               vect_offset += 0xffff0000;
+       else /* always have security exceptions */
+               vect_offset += vcpu_cp15(vcpu, c12_VBAR);
+
+       *vcpu_pc(vcpu) = vect_offset;
+}
+
+static void inject_undef32(struct kvm_vcpu *vcpu)
+{
+       prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
+}
+
+/*
+ * Modelled after TakeDataAbortException() and TakePrefetchAbortException
+ * pseudocode.
+ */
+static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
+                        unsigned long addr)
+{
+       u32 vect_offset;
+       u32 *far, *fsr;
+       bool is_lpae;
+
+       if (is_pabt) {
+               vect_offset = 12;
+               far = &vcpu_cp15(vcpu, c6_IFAR);
+               fsr = &vcpu_cp15(vcpu, c5_IFSR);
+       } else { /* !iabt */
+               vect_offset = 16;
+               far = &vcpu_cp15(vcpu, c6_DFAR);
+               fsr = &vcpu_cp15(vcpu, c5_DFSR);
+       }
+
+       prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
+
+       *far = addr;
+
+       /* Give the guest an IMPLEMENTATION DEFINED exception */
+       is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
+       if (is_lpae)
+               *fsr = 1 << 9 | 0x34;
+       else
+               *fsr = 0x14;
+}
+
+static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
+{
+       unsigned long cpsr = *vcpu_cpsr(vcpu);
+       bool is_aarch32;
+       u32 esr = 0;
+
+       is_aarch32 = vcpu_mode_is_32bit(vcpu);
+
+       *vcpu_spsr(vcpu) = cpsr;
+       *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
+
+       *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+       *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
+
+       vcpu_sys_reg(vcpu, FAR_EL1) = addr;
+
+       /*
+        * Build an {i,d}abort, depending on the level and the
+        * instruction set. Report an external synchronous abort.
+        */
+       if (kvm_vcpu_trap_il_is32bit(vcpu))
+               esr |= ESR_EL1_IL;
+
+       /*
+        * Here, the guest runs in AArch64 mode when in EL1. If we get
+        * an AArch32 fault, it means we managed to trap an EL0 fault.
+        */
+       if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
+               esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT);
+       else
+               esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT);
+
+       if (!is_iabt)
+               esr |= ESR_EL1_EC_DABT_EL0;
+
+       vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT;
+}
+
+static void inject_undef64(struct kvm_vcpu *vcpu)
+{
+       unsigned long cpsr = *vcpu_cpsr(vcpu);
+       u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT);
+
+       *vcpu_spsr(vcpu) = cpsr;
+       *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
+
+       *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+       *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
+
+       /*
+        * Build an unknown exception, depending on the instruction
+        * set.
+        */
+       if (kvm_vcpu_trap_il_is32bit(vcpu))
+               esr |= ESR_EL1_IL;
+
+       vcpu_sys_reg(vcpu, ESR_EL1) = esr;
+}
+
+/**
+ * kvm_inject_dabt - inject a data abort into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+       if (!(vcpu->arch.hcr_el2 & HCR_RW))
+               inject_abt32(vcpu, false, addr);
+
+       inject_abt64(vcpu, false, addr);
+}
+
+/**
+ * kvm_inject_pabt - inject a prefetch abort into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+       if (!(vcpu->arch.hcr_el2 & HCR_RW))
+               inject_abt32(vcpu, true, addr);
+
+       inject_abt64(vcpu, true, addr);
+}
+
+/**
+ * kvm_inject_undefined - inject an undefined instruction into the guest
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+{
+       if (!(vcpu->arch.hcr_el2 & HCR_RW))
+               inject_undef32(vcpu);
+
+       inject_undef64(vcpu);
+}
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
new file mode 100644 (file)
index 0000000..bbc6ae3
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/emulate.c:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/ptrace.h>
+
+#define VCPU_NR_MODES 6
+#define REG_OFFSET(_reg) \
+       (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
+
+#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
+
+static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
+       /* USR Registers */
+       {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+               USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
+               REG_OFFSET(pc)
+       },
+
+       /* FIQ Registers */
+       {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7),
+               REG_OFFSET(compat_r8_fiq),  /* r8 */
+               REG_OFFSET(compat_r9_fiq),  /* r9 */
+               REG_OFFSET(compat_r10_fiq), /* r10 */
+               REG_OFFSET(compat_r11_fiq), /* r11 */
+               REG_OFFSET(compat_r12_fiq), /* r12 */
+               REG_OFFSET(compat_sp_fiq),  /* r13 */
+               REG_OFFSET(compat_lr_fiq),  /* r14 */
+               REG_OFFSET(pc)
+       },
+
+       /* IRQ Registers */
+       {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+               USR_REG_OFFSET(12),
+               REG_OFFSET(compat_sp_irq), /* r13 */
+               REG_OFFSET(compat_lr_irq), /* r14 */
+               REG_OFFSET(pc)
+       },
+
+       /* SVC Registers */
+       {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+               USR_REG_OFFSET(12),
+               REG_OFFSET(compat_sp_svc), /* r13 */
+               REG_OFFSET(compat_lr_svc), /* r14 */
+               REG_OFFSET(pc)
+       },
+
+       /* ABT Registers */
+       {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+               USR_REG_OFFSET(12),
+               REG_OFFSET(compat_sp_abt), /* r13 */
+               REG_OFFSET(compat_lr_abt), /* r14 */
+               REG_OFFSET(pc)
+       },
+
+       /* UND Registers */
+       {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+               USR_REG_OFFSET(12),
+               REG_OFFSET(compat_sp_und), /* r13 */
+               REG_OFFSET(compat_lr_und), /* r14 */
+               REG_OFFSET(pc)
+       },
+};
+
+/*
+ * Return a pointer to the register number valid in the current mode of
+ * the virtual CPU.
+ */
+unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
+{
+       unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
+       unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
+
+       switch (mode) {
+       case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
+               mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
+               break;
+
+       case COMPAT_PSR_MODE_ABT:
+               mode = 4;
+               break;
+
+       case COMPAT_PSR_MODE_UND:
+               mode = 5;
+               break;
+
+       case COMPAT_PSR_MODE_SYS:
+               mode = 0;       /* SYS maps to USR */
+               break;
+
+       default:
+               BUG();
+       }
+
+       return reg_array + vcpu_reg_offsets[mode][reg_num];
+}
+
+/*
+ * Return the SPSR for the current mode of the virtual CPU.
+ */
+unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu)
+{
+       unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
+       switch (mode) {
+       case COMPAT_PSR_MODE_SVC:
+               mode = KVM_SPSR_SVC;
+               break;
+       case COMPAT_PSR_MODE_ABT:
+               mode = KVM_SPSR_ABT;
+               break;
+       case COMPAT_PSR_MODE_UND:
+               mode = KVM_SPSR_UND;
+               break;
+       case COMPAT_PSR_MODE_IRQ:
+               mode = KVM_SPSR_IRQ;
+               break;
+       case COMPAT_PSR_MODE_FIQ:
+               mode = KVM_SPSR_FIQ;
+               break;
+       default:
+               BUG();
+       }
+
+       return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode];
+}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
new file mode 100644 (file)
index 0000000..70a7816
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/reset.c
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/errno.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+
+#include <kvm/arm_arch_timer.h>
+
+#include <asm/cputype.h>
+#include <asm/ptrace.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_coproc.h>
+
+/*
+ * ARMv8 Reset Values
+ */
+static const struct kvm_regs default_regs_reset = {
+       .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT |
+                       PSR_F_BIT | PSR_D_BIT),
+};
+
+static const struct kvm_regs default_regs_reset32 = {
+       .regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
+                       COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
+};
+
+static const struct kvm_irq_level default_vtimer_irq = {
+       .irq    = 27,
+       .level  = 1,
+};
+
+static bool cpu_has_32bit_el1(void)
+{
+       u64 pfr0;
+
+       pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+       return !!(pfr0 & 0x20);
+}
+
+int kvm_arch_dev_ioctl_check_extension(long ext)
+{
+       int r;
+
+       switch (ext) {
+       case KVM_CAP_ARM_EL1_32BIT:
+               r = cpu_has_32bit_el1();
+               break;
+       default:
+               r = 0;
+       }
+
+       return r;
+}
+
+/**
+ * kvm_reset_vcpu - sets core registers and sys_regs to reset value
+ * @vcpu: The VCPU pointer
+ *
+ * This function finds the right table above and sets the registers on
+ * the virtual CPU struct to their architectually defined reset
+ * values.
+ */
+int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+       const struct kvm_irq_level *cpu_vtimer_irq;
+       const struct kvm_regs *cpu_reset;
+
+       switch (vcpu->arch.target) {
+       default:
+               if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
+                       if (!cpu_has_32bit_el1())
+                               return -EINVAL;
+                       cpu_reset = &default_regs_reset32;
+                       vcpu->arch.hcr_el2 &= ~HCR_RW;
+               } else {
+                       cpu_reset = &default_regs_reset;
+               }
+
+               cpu_vtimer_irq = &default_vtimer_irq;
+               break;
+       }
+
+       /* Reset core registers */
+       memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
+
+       /* Reset system registers */
+       kvm_reset_sys_regs(vcpu);
+
+       /* Reset timer */
+       kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
+
+       return 0;
+}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
new file mode 100644 (file)
index 0000000..4cc3b71
--- /dev/null
@@ -0,0 +1,1528 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/coproc.c:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Rusty Russell <rusty@rustcorp.com.au>
+ *          Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mm.h>
+#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/debug-monitors.h>
+#include <trace/events/kvm.h>
+
+#include "sys_regs.h"
+
+/*
+ * All of this file is extremly similar to the ARM coproc.c, but the
+ * types are different. My gut feeling is that it should be pretty
+ * easy to merge, but that would be an ABI breakage -- again. VFP
+ * would also need to be abstracted.
+ *
+ * For AArch32, we only take care of what is being trapped. Anything
+ * that has to do with init and userspace access has to go via the
+ * 64bit interface.
+ */
+
+/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
+static u32 cache_levels;
+
+/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
+#define CSSELR_MAX 12
+
+/* Which cache CCSIDR represents depends on CSSELR value. */
+static u32 get_ccsidr(u32 csselr)
+{
+       u32 ccsidr;
+
+       /* Make sure noone else changes CSSELR during this! */
+       local_irq_disable();
+       /* Put value into CSSELR */
+       asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
+       isb();
+       /* Read result out of CCSIDR */
+       asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
+       local_irq_enable();
+
+       return ccsidr;
+}
+
+static void do_dc_cisw(u32 val)
+{
+       asm volatile("dc cisw, %x0" : : "r" (val));
+       dsb(ish);
+}
+
+static void do_dc_csw(u32 val)
+{
+       asm volatile("dc csw, %x0" : : "r" (val));
+       dsb(ish);
+}
+
+/* See note at ARM ARM B1.14.4 */
+static bool access_dcsw(struct kvm_vcpu *vcpu,
+                       const struct sys_reg_params *p,
+                       const struct sys_reg_desc *r)
+{
+       unsigned long val;
+       int cpu;
+
+       if (!p->is_write)
+               return read_from_write_only(vcpu, p);
+
+       cpu = get_cpu();
+
+       cpumask_setall(&vcpu->arch.require_dcache_flush);
+       cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
+
+       /* If we were already preempted, take the long way around */
+       if (cpu != vcpu->arch.last_pcpu) {
+               flush_cache_all();
+               goto done;
+       }
+
+       val = *vcpu_reg(vcpu, p->Rt);
+
+       switch (p->CRm) {
+       case 6:                 /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
+       case 14:                /* DCCISW */
+               do_dc_cisw(val);
+               break;
+
+       case 10:                /* DCCSW */
+               do_dc_csw(val);
+               break;
+       }
+
+done:
+       put_cpu();
+
+       return true;
+}
+
+/*
+ * Generic accessor for VM registers. Only called as long as HCR_TVM
+ * is set.
+ */
+static bool access_vm_reg(struct kvm_vcpu *vcpu,
+                         const struct sys_reg_params *p,
+                         const struct sys_reg_desc *r)
+{
+       unsigned long val;
+
+       BUG_ON(!p->is_write);
+
+       val = *vcpu_reg(vcpu, p->Rt);
+       if (!p->is_aarch32) {
+               vcpu_sys_reg(vcpu, r->reg) = val;
+       } else {
+               if (!p->is_32bit)
+                       vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
+               vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
+       }
+
+       return true;
+}
+
+/*
+ * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set.  If the
+ * guest enables the MMU, we stop trapping the VM sys_regs and leave
+ * it in complete control of the caches.
+ */
+static bool access_sctlr(struct kvm_vcpu *vcpu,
+                        const struct sys_reg_params *p,
+                        const struct sys_reg_desc *r)
+{
+       access_vm_reg(vcpu, p, r);
+
+       if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
+               vcpu->arch.hcr_el2 &= ~HCR_TVM;
+               stage2_flush_vm(vcpu->kvm);
+       }
+
+       return true;
+}
+
+static bool trap_raz_wi(struct kvm_vcpu *vcpu,
+                       const struct sys_reg_params *p,
+                       const struct sys_reg_desc *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+       else
+               return read_zero(vcpu, p);
+}
+
+static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
+                          const struct sys_reg_params *p,
+                          const struct sys_reg_desc *r)
+{
+       if (p->is_write) {
+               return ignore_write(vcpu, p);
+       } else {
+               *vcpu_reg(vcpu, p->Rt) = (1 << 3);
+               return true;
+       }
+}
+
+static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
+                                  const struct sys_reg_params *p,
+                                  const struct sys_reg_desc *r)
+{
+       if (p->is_write) {
+               return ignore_write(vcpu, p);
+       } else {
+               u32 val;
+               asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
+               *vcpu_reg(vcpu, p->Rt) = val;
+               return true;
+       }
+}
+
+/*
+ * We want to avoid world-switching all the DBG registers all the
+ * time:
+ * 
+ * - If we've touched any debug register, it is likely that we're
+ *   going to touch more of them. It then makes sense to disable the
+ *   traps and start doing the save/restore dance
+ * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
+ *   then mandatory to save/restore the registers, as the guest
+ *   depends on them.
+ * 
+ * For this, we use a DIRTY bit, indicating the guest has modified the
+ * debug registers, used as follow:
+ *
+ * On guest entry:
+ * - If the dirty bit is set (because we're coming back from trapping),
+ *   disable the traps, save host registers, restore guest registers.
+ * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
+ *   set the dirty bit, disable the traps, save host registers,
+ *   restore guest registers.
+ * - Otherwise, enable the traps
+ *
+ * On guest exit:
+ * - If the dirty bit is set, save guest registers, restore host
+ *   registers and clear the dirty bit. This ensure that the host can
+ *   now use the debug registers.
+ */
+static bool trap_debug_regs(struct kvm_vcpu *vcpu,
+                           const struct sys_reg_params *p,
+                           const struct sys_reg_desc *r)
+{
+       if (p->is_write) {
+               vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+               vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+       } else {
+               *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
+       }
+
+       return true;
+}
+
+static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+       u64 amair;
+
+       asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
+       vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
+}
+
+static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+       /*
+        * Simply map the vcpu_id into the Aff0 field of the MPIDR.
+        */
+       vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
+}
+
+/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
+#define DBG_BCR_BVR_WCR_WVR_EL1(n)                                     \
+       /* DBGBVRn_EL1 */                                               \
+       { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100),     \
+         trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 },         \
+       /* DBGBCRn_EL1 */                                               \
+       { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101),     \
+         trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 },         \
+       /* DBGWVRn_EL1 */                                               \
+       { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110),     \
+         trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 },         \
+       /* DBGWCRn_EL1 */                                               \
+       { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111),     \
+         trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
+
+/*
+ * Architected system registers.
+ * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
+ *
+ * We could trap ID_DFR0 and tell the guest we don't support performance
+ * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
+ * NAKed, so it will read the PMCR anyway.
+ *
+ * Therefore we tell the guest we have 0 counters.  Unfortunately, we
+ * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
+ * all PM registers, which doesn't crash the guest kernel at least.
+ *
+ * Debug handling: We do trap most, if not all debug related system
+ * registers. The implementation is good enough to ensure that a guest
+ * can use these with minimal performance degradation. The drawback is
+ * that we don't implement any of the external debug, none of the
+ * OSlock protocol. This should be revisited if we ever encounter a
+ * more demanding guest...
+ */
+static const struct sys_reg_desc sys_reg_descs[] = {
+       /* DC ISW */
+       { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
+         access_dcsw },
+       /* DC CSW */
+       { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
+         access_dcsw },
+       /* DC CISW */
+       { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
+         access_dcsw },
+
+       DBG_BCR_BVR_WCR_WVR_EL1(0),
+       DBG_BCR_BVR_WCR_WVR_EL1(1),
+       /* MDCCINT_EL1 */
+       { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
+         trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
+       /* MDSCR_EL1 */
+       { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
+         trap_debug_regs, reset_val, MDSCR_EL1, 0 },
+       DBG_BCR_BVR_WCR_WVR_EL1(2),
+       DBG_BCR_BVR_WCR_WVR_EL1(3),
+       DBG_BCR_BVR_WCR_WVR_EL1(4),
+       DBG_BCR_BVR_WCR_WVR_EL1(5),
+       DBG_BCR_BVR_WCR_WVR_EL1(6),
+       DBG_BCR_BVR_WCR_WVR_EL1(7),
+       DBG_BCR_BVR_WCR_WVR_EL1(8),
+       DBG_BCR_BVR_WCR_WVR_EL1(9),
+       DBG_BCR_BVR_WCR_WVR_EL1(10),
+       DBG_BCR_BVR_WCR_WVR_EL1(11),
+       DBG_BCR_BVR_WCR_WVR_EL1(12),
+       DBG_BCR_BVR_WCR_WVR_EL1(13),
+       DBG_BCR_BVR_WCR_WVR_EL1(14),
+       DBG_BCR_BVR_WCR_WVR_EL1(15),
+
+       /* MDRAR_EL1 */
+       { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
+         trap_raz_wi },
+       /* OSLAR_EL1 */
+       { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
+         trap_raz_wi },
+       /* OSLSR_EL1 */
+       { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
+         trap_oslsr_el1 },
+       /* OSDLR_EL1 */
+       { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
+         trap_raz_wi },
+       /* DBGPRCR_EL1 */
+       { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
+         trap_raz_wi },
+       /* DBGCLAIMSET_EL1 */
+       { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
+         trap_raz_wi },
+       /* DBGCLAIMCLR_EL1 */
+       { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
+         trap_raz_wi },
+       /* DBGAUTHSTATUS_EL1 */
+       { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
+         trap_dbgauthstatus_el1 },
+
+       /* TEECR32_EL1 */
+       { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
+         NULL, reset_val, TEECR32_EL1, 0 },
+       /* TEEHBR32_EL1 */
+       { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
+         NULL, reset_val, TEEHBR32_EL1, 0 },
+
+       /* MDCCSR_EL1 */
+       { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
+         trap_raz_wi },
+       /* DBGDTR_EL0 */
+       { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
+         trap_raz_wi },
+       /* DBGDTR[TR]X_EL0 */
+       { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
+         trap_raz_wi },
+
+       /* DBGVCR32_EL2 */
+       { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
+         NULL, reset_val, DBGVCR32_EL2, 0 },
+
+       /* MPIDR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
+         NULL, reset_mpidr, MPIDR_EL1 },
+       /* SCTLR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
+         access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+       /* CPACR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
+         NULL, reset_val, CPACR_EL1, 0 },
+       /* TTBR0_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
+         access_vm_reg, reset_unknown, TTBR0_EL1 },
+       /* TTBR1_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
+         access_vm_reg, reset_unknown, TTBR1_EL1 },
+       /* TCR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
+         access_vm_reg, reset_val, TCR_EL1, 0 },
+
+       /* AFSR0_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
+         access_vm_reg, reset_unknown, AFSR0_EL1 },
+       /* AFSR1_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
+         access_vm_reg, reset_unknown, AFSR1_EL1 },
+       /* ESR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
+         access_vm_reg, reset_unknown, ESR_EL1 },
+       /* FAR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
+         access_vm_reg, reset_unknown, FAR_EL1 },
+       /* PAR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
+         NULL, reset_unknown, PAR_EL1 },
+
+       /* PMINTENSET_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
+         trap_raz_wi },
+       /* PMINTENCLR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
+         trap_raz_wi },
+
+       /* MAIR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
+         access_vm_reg, reset_unknown, MAIR_EL1 },
+       /* AMAIR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
+         access_vm_reg, reset_amair_el1, AMAIR_EL1 },
+
+       /* VBAR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
+         NULL, reset_val, VBAR_EL1, 0 },
+       /* CONTEXTIDR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
+         access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
+       /* TPIDR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
+         NULL, reset_unknown, TPIDR_EL1 },
+
+       /* CNTKCTL_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
+         NULL, reset_val, CNTKCTL_EL1, 0},
+
+       /* CSSELR_EL1 */
+       { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
+         NULL, reset_unknown, CSSELR_EL1 },
+
+       /* PMCR_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
+         trap_raz_wi },
+       /* PMCNTENSET_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
+         trap_raz_wi },
+       /* PMCNTENCLR_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
+         trap_raz_wi },
+       /* PMOVSCLR_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
+         trap_raz_wi },
+       /* PMSWINC_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
+         trap_raz_wi },
+       /* PMSELR_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
+         trap_raz_wi },
+       /* PMCEID0_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
+         trap_raz_wi },
+       /* PMCEID1_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
+         trap_raz_wi },
+       /* PMCCNTR_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
+         trap_raz_wi },
+       /* PMXEVTYPER_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
+         trap_raz_wi },
+       /* PMXEVCNTR_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
+         trap_raz_wi },
+       /* PMUSERENR_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
+         trap_raz_wi },
+       /* PMOVSSET_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
+         trap_raz_wi },
+
+       /* TPIDR_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
+         NULL, reset_unknown, TPIDR_EL0 },
+       /* TPIDRRO_EL0 */
+       { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
+         NULL, reset_unknown, TPIDRRO_EL0 },
+
+       /* DACR32_EL2 */
+       { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
+         NULL, reset_unknown, DACR32_EL2 },
+       /* IFSR32_EL2 */
+       { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
+         NULL, reset_unknown, IFSR32_EL2 },
+       /* FPEXC32_EL2 */
+       { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
+         NULL, reset_val, FPEXC32_EL2, 0x70 },
+};
+
+static bool trap_dbgidr(struct kvm_vcpu *vcpu,
+                       const struct sys_reg_params *p,
+                       const struct sys_reg_desc *r)
+{
+       if (p->is_write) {
+               return ignore_write(vcpu, p);
+       } else {
+               u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
+               u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
+               u32 el3 = !!((pfr >> 12) & 0xf);
+
+               *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
+                                         (((dfr >> 12) & 0xf) << 24) |
+                                         (((dfr >> 28) & 0xf) << 20) |
+                                         (6 << 16) | (el3 << 14) | (el3 << 12));
+               return true;
+       }
+}
+
+static bool trap_debug32(struct kvm_vcpu *vcpu,
+                        const struct sys_reg_params *p,
+                        const struct sys_reg_desc *r)
+{
+       if (p->is_write) {
+               vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+               vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+       } else {
+               *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
+       }
+
+       return true;
+}
+
+#define DBG_BCR_BVR_WCR_WVR(n)                                 \
+       /* DBGBVRn */                                           \
+       { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32,    \
+         NULL, (cp14_DBGBVR0 + (n) * 2) },                     \
+       /* DBGBCRn */                                           \
+       { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32,    \
+         NULL, (cp14_DBGBCR0 + (n) * 2) },                     \
+       /* DBGWVRn */                                           \
+       { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32,    \
+         NULL, (cp14_DBGWVR0 + (n) * 2) },                     \
+       /* DBGWCRn */                                           \
+       { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32,    \
+         NULL, (cp14_DBGWCR0 + (n) * 2) }
+
+#define DBGBXVR(n)                                             \
+       { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32,    \
+         NULL, cp14_DBGBXVR0 + n * 2 }
+
+/*
+ * Trapped cp14 registers. We generally ignore most of the external
+ * debug, on the principle that they don't really make sense to a
+ * guest. Revisit this one day, whould this principle change.
+ */
+static const struct sys_reg_desc cp14_regs[] = {
+       /* DBGIDR */
+       { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
+       /* DBGDTRRXext */
+       { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
+
+       DBG_BCR_BVR_WCR_WVR(0),
+       /* DBGDSCRint */
+       { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
+       DBG_BCR_BVR_WCR_WVR(1),
+       /* DBGDCCINT */
+       { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
+       /* DBGDSCRext */
+       { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
+       DBG_BCR_BVR_WCR_WVR(2),
+       /* DBGDTR[RT]Xint */
+       { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
+       /* DBGDTR[RT]Xext */
+       { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
+       DBG_BCR_BVR_WCR_WVR(3),
+       DBG_BCR_BVR_WCR_WVR(4),
+       DBG_BCR_BVR_WCR_WVR(5),
+       /* DBGWFAR */
+       { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
+       /* DBGOSECCR */
+       { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
+       DBG_BCR_BVR_WCR_WVR(6),
+       /* DBGVCR */
+       { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
+       DBG_BCR_BVR_WCR_WVR(7),
+       DBG_BCR_BVR_WCR_WVR(8),
+       DBG_BCR_BVR_WCR_WVR(9),
+       DBG_BCR_BVR_WCR_WVR(10),
+       DBG_BCR_BVR_WCR_WVR(11),
+       DBG_BCR_BVR_WCR_WVR(12),
+       DBG_BCR_BVR_WCR_WVR(13),
+       DBG_BCR_BVR_WCR_WVR(14),
+       DBG_BCR_BVR_WCR_WVR(15),
+
+       /* DBGDRAR (32bit) */
+       { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
+
+       DBGBXVR(0),
+       /* DBGOSLAR */
+       { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
+       DBGBXVR(1),
+       /* DBGOSLSR */
+       { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
+       DBGBXVR(2),
+       DBGBXVR(3),
+       /* DBGOSDLR */
+       { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
+       DBGBXVR(4),
+       /* DBGPRCR */
+       { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
+       DBGBXVR(5),
+       DBGBXVR(6),
+       DBGBXVR(7),
+       DBGBXVR(8),
+       DBGBXVR(9),
+       DBGBXVR(10),
+       DBGBXVR(11),
+       DBGBXVR(12),
+       DBGBXVR(13),
+       DBGBXVR(14),
+       DBGBXVR(15),
+
+       /* DBGDSAR (32bit) */
+       { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
+
+       /* DBGDEVID2 */
+       { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
+       /* DBGDEVID1 */
+       { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
+       /* DBGDEVID */
+       { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
+       /* DBGCLAIMSET */
+       { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
+       /* DBGCLAIMCLR */
+       { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
+       /* DBGAUTHSTATUS */
+       { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
+};
+
+/* Trapped cp14 64bit registers */
+static const struct sys_reg_desc cp14_64_regs[] = {
+       /* DBGDRAR (64bit) */
+       { Op1( 0), CRm( 1), .access = trap_raz_wi },
+
+       /* DBGDSAR (64bit) */
+       { Op1( 0), CRm( 2), .access = trap_raz_wi },
+};
+
+/*
+ * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
+ * depending on the way they are accessed (as a 32bit or a 64bit
+ * register).
+ */
+static const struct sys_reg_desc cp15_regs[] = {
+       { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
+       { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
+       { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
+       { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
+       { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
+       { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
+       { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
+       { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
+       { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
+       { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
+       { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
+
+       /*
+        * DC{C,I,CI}SW operations:
+        */
+       { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
+       { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
+       { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
+
+       /* PMU */
+       { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
+
+       { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
+       { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
+       { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
+       { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
+       { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
+};
+
+static const struct sys_reg_desc cp15_64_regs[] = {
+       { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
+       { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
+};
+
+/* Target specific emulation tables */
+static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
+
+void kvm_register_target_sys_reg_table(unsigned int target,
+                                      struct kvm_sys_reg_target_table *table)
+{
+       target_tables[target] = table;
+}
+
+/* Get specific register table for this target. */
+static const struct sys_reg_desc *get_target_table(unsigned target,
+                                                  bool mode_is_64,
+                                                  size_t *num)
+{
+       struct kvm_sys_reg_target_table *table;
+
+       table = target_tables[target];
+       if (mode_is_64) {
+               *num = table->table64.num;
+               return table->table64.table;
+       } else {
+               *num = table->table32.num;
+               return table->table32.table;
+       }
+}
+
+static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
+                                        const struct sys_reg_desc table[],
+                                        unsigned int num)
+{
+       unsigned int i;
+
+       for (i = 0; i < num; i++) {
+               const struct sys_reg_desc *r = &table[i];
+
+               if (params->Op0 != r->Op0)
+                       continue;
+               if (params->Op1 != r->Op1)
+                       continue;
+               if (params->CRn != r->CRn)
+                       continue;
+               if (params->CRm != r->CRm)
+                       continue;
+               if (params->Op2 != r->Op2)
+                       continue;
+
+               return r;
+       }
+       return NULL;
+}
+
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+/*
+ * emulate_cp --  tries to match a sys_reg access in a handling table, and
+ *                call the corresponding trap handler.
+ *
+ * @params: pointer to the descriptor of the access
+ * @table: array of trap descriptors
+ * @num: size of the trap descriptor array
+ *
+ * Return 0 if the access has been handled, and -1 if not.
+ */
+static int emulate_cp(struct kvm_vcpu *vcpu,
+                     const struct sys_reg_params *params,
+                     const struct sys_reg_desc *table,
+                     size_t num)
+{
+       const struct sys_reg_desc *r;
+
+       if (!table)
+               return -1;      /* Not handled */
+
+       r = find_reg(params, table, num);
+
+       if (r) {
+               /*
+                * Not having an accessor means that we have
+                * configured a trap that we don't know how to
+                * handle. This certainly qualifies as a gross bug
+                * that should be fixed right away.
+                */
+               BUG_ON(!r->access);
+
+               if (likely(r->access(vcpu, params, r))) {
+                       /* Skip instruction, since it was emulated */
+                       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+               }
+
+               /* Handled */
+               return 0;
+       }
+
+       /* Not handled */
+       return -1;
+}
+
+static void unhandled_cp_access(struct kvm_vcpu *vcpu,
+                               struct sys_reg_params *params)
+{
+       u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+       int cp;
+
+       switch(hsr_ec) {
+       case ESR_EL2_EC_CP15_32:
+       case ESR_EL2_EC_CP15_64:
+               cp = 15;
+               break;
+       case ESR_EL2_EC_CP14_MR:
+       case ESR_EL2_EC_CP14_64:
+               cp = 14;
+               break;
+       default:
+               WARN_ON((cp = -1));
+       }
+
+       kvm_err("Unsupported guest CP%d access at: %08lx\n",
+               cp, *vcpu_pc(vcpu));
+       print_sys_reg_instr(params);
+       kvm_inject_undefined(vcpu);
+}
+
+/**
+ * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
+                           const struct sys_reg_desc *global,
+                           size_t nr_global,
+                           const struct sys_reg_desc *target_specific,
+                           size_t nr_specific)
+{
+       struct sys_reg_params params;
+       u32 hsr = kvm_vcpu_get_hsr(vcpu);
+       int Rt2 = (hsr >> 10) & 0xf;
+
+       params.is_aarch32 = true;
+       params.is_32bit = false;
+       params.CRm = (hsr >> 1) & 0xf;
+       params.Rt = (hsr >> 5) & 0xf;
+       params.is_write = ((hsr & 1) == 0);
+
+       params.Op0 = 0;
+       params.Op1 = (hsr >> 16) & 0xf;
+       params.Op2 = 0;
+       params.CRn = 0;
+
+       /*
+        * Massive hack here. Store Rt2 in the top 32bits so we only
+        * have one register to deal with. As we use the same trap
+        * backends between AArch32 and AArch64, we get away with it.
+        */
+       if (params.is_write) {
+               u64 val = *vcpu_reg(vcpu, params.Rt);
+               val &= 0xffffffff;
+               val |= *vcpu_reg(vcpu, Rt2) << 32;
+               *vcpu_reg(vcpu, params.Rt) = val;
+       }
+
+       if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
+               goto out;
+       if (!emulate_cp(vcpu, &params, global, nr_global))
+               goto out;
+
+       unhandled_cp_access(vcpu, &params);
+
+out:
+       /* Do the opposite hack for the read side */
+       if (!params.is_write) {
+               u64 val = *vcpu_reg(vcpu, params.Rt);
+               val >>= 32;
+               *vcpu_reg(vcpu, Rt2) = val;
+       }
+
+       return 1;
+}
+
+/**
+ * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
+                           const struct sys_reg_desc *global,
+                           size_t nr_global,
+                           const struct sys_reg_desc *target_specific,
+                           size_t nr_specific)
+{
+       struct sys_reg_params params;
+       u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+       params.is_aarch32 = true;
+       params.is_32bit = true;
+       params.CRm = (hsr >> 1) & 0xf;
+       params.Rt  = (hsr >> 5) & 0xf;
+       params.is_write = ((hsr & 1) == 0);
+       params.CRn = (hsr >> 10) & 0xf;
+       params.Op0 = 0;
+       params.Op1 = (hsr >> 14) & 0x7;
+       params.Op2 = (hsr >> 17) & 0x7;
+
+       if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
+               return 1;
+       if (!emulate_cp(vcpu, &params, global, nr_global))
+               return 1;
+
+       unhandled_cp_access(vcpu, &params);
+       return 1;
+}
+
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       const struct sys_reg_desc *target_specific;
+       size_t num;
+
+       target_specific = get_target_table(vcpu->arch.target, false, &num);
+       return kvm_handle_cp_64(vcpu,
+                               cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
+                               target_specific, num);
+}
+
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       const struct sys_reg_desc *target_specific;
+       size_t num;
+
+       target_specific = get_target_table(vcpu->arch.target, false, &num);
+       return kvm_handle_cp_32(vcpu,
+                               cp15_regs, ARRAY_SIZE(cp15_regs),
+                               target_specific, num);
+}
+
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       return kvm_handle_cp_64(vcpu,
+                               cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
+                               NULL, 0);
+}
+
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       return kvm_handle_cp_32(vcpu,
+                               cp14_regs, ARRAY_SIZE(cp14_regs),
+                               NULL, 0);
+}
+
+static int emulate_sys_reg(struct kvm_vcpu *vcpu,
+                          const struct sys_reg_params *params)
+{
+       size_t num;
+       const struct sys_reg_desc *table, *r;
+
+       table = get_target_table(vcpu->arch.target, true, &num);
+
+       /* Search target-specific then generic table. */
+       r = find_reg(params, table, num);
+       if (!r)
+               r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+
+       if (likely(r)) {
+               /*
+                * Not having an accessor means that we have
+                * configured a trap that we don't know how to
+                * handle. This certainly qualifies as a gross bug
+                * that should be fixed right away.
+                */
+               BUG_ON(!r->access);
+
+               if (likely(r->access(vcpu, params, r))) {
+                       /* Skip instruction, since it was emulated */
+                       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+                       return 1;
+               }
+               /* If access function fails, it should complain. */
+       } else {
+               kvm_err("Unsupported guest sys_reg access at: %lx\n",
+                       *vcpu_pc(vcpu));
+               print_sys_reg_instr(params);
+       }
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
+                             const struct sys_reg_desc *table, size_t num)
+{
+       unsigned long i;
+
+       for (i = 0; i < num; i++)
+               if (table[i].reset)
+                       table[i].reset(vcpu, &table[i]);
+}
+
+/**
+ * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       struct sys_reg_params params;
+       unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+
+       params.is_aarch32 = false;
+       params.is_32bit = false;
+       params.Op0 = (esr >> 20) & 3;
+       params.Op1 = (esr >> 14) & 0x7;
+       params.CRn = (esr >> 10) & 0xf;
+       params.CRm = (esr >> 1) & 0xf;
+       params.Op2 = (esr >> 17) & 0x7;
+       params.Rt = (esr >> 5) & 0x1f;
+       params.is_write = !(esr & 1);
+
+       return emulate_sys_reg(vcpu, &params);
+}
+
+/******************************************************************************
+ * Userspace API
+ *****************************************************************************/
+
+static bool index_to_params(u64 id, struct sys_reg_params *params)
+{
+       switch (id & KVM_REG_SIZE_MASK) {
+       case KVM_REG_SIZE_U64:
+               /* Any unused index bits means it's not valid. */
+               if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
+                             | KVM_REG_ARM_COPROC_MASK
+                             | KVM_REG_ARM64_SYSREG_OP0_MASK
+                             | KVM_REG_ARM64_SYSREG_OP1_MASK
+                             | KVM_REG_ARM64_SYSREG_CRN_MASK
+                             | KVM_REG_ARM64_SYSREG_CRM_MASK
+                             | KVM_REG_ARM64_SYSREG_OP2_MASK))
+                       return false;
+               params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
+                              >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
+               params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
+                              >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
+               params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
+                              >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
+               params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
+                              >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
+               params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
+                              >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
+               return true;
+       default:
+               return false;
+       }
+}
+
+/* Decode an index value, and find the sys_reg_desc entry. */
+static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
+                                                   u64 id)
+{
+       size_t num;
+       const struct sys_reg_desc *table, *r;
+       struct sys_reg_params params;
+
+       /* We only do sys_reg for now. */
+       if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
+               return NULL;
+
+       if (!index_to_params(id, &params))
+               return NULL;
+
+       table = get_target_table(vcpu->arch.target, true, &num);
+       r = find_reg(&params, table, num);
+       if (!r)
+               r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+
+       /* Not saved in the sys_reg array? */
+       if (r && !r->reg)
+               r = NULL;
+
+       return r;
+}
+
+/*
+ * These are the invariant sys_reg registers: we let the guest see the
+ * host versions of these, so they're part of the guest state.
+ *
+ * A future CPU may provide a mechanism to present different values to
+ * the guest, or a future kvm may trap them.
+ */
+
+#define FUNCTION_INVARIANT(reg)                                                \
+       static void get_##reg(struct kvm_vcpu *v,                       \
+                             const struct sys_reg_desc *r)             \
+       {                                                               \
+               u64 val;                                                \
+                                                                       \
+               asm volatile("mrs %0, " __stringify(reg) "\n"           \
+                            : "=r" (val));                             \
+               ((struct sys_reg_desc *)r)->val = val;                  \
+       }
+
+FUNCTION_INVARIANT(midr_el1)
+FUNCTION_INVARIANT(ctr_el0)
+FUNCTION_INVARIANT(revidr_el1)
+FUNCTION_INVARIANT(id_pfr0_el1)
+FUNCTION_INVARIANT(id_pfr1_el1)
+FUNCTION_INVARIANT(id_dfr0_el1)
+FUNCTION_INVARIANT(id_afr0_el1)
+FUNCTION_INVARIANT(id_mmfr0_el1)
+FUNCTION_INVARIANT(id_mmfr1_el1)
+FUNCTION_INVARIANT(id_mmfr2_el1)
+FUNCTION_INVARIANT(id_mmfr3_el1)
+FUNCTION_INVARIANT(id_isar0_el1)
+FUNCTION_INVARIANT(id_isar1_el1)
+FUNCTION_INVARIANT(id_isar2_el1)
+FUNCTION_INVARIANT(id_isar3_el1)
+FUNCTION_INVARIANT(id_isar4_el1)
+FUNCTION_INVARIANT(id_isar5_el1)
+FUNCTION_INVARIANT(clidr_el1)
+FUNCTION_INVARIANT(aidr_el1)
+
+/* ->val is filled in by kvm_sys_reg_table_init() */
+static struct sys_reg_desc invariant_sys_regs[] = {
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
+         NULL, get_midr_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
+         NULL, get_revidr_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
+         NULL, get_id_pfr0_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
+         NULL, get_id_pfr1_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
+         NULL, get_id_dfr0_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
+         NULL, get_id_afr0_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
+         NULL, get_id_mmfr0_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
+         NULL, get_id_mmfr1_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
+         NULL, get_id_mmfr2_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
+         NULL, get_id_mmfr3_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
+         NULL, get_id_isar0_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
+         NULL, get_id_isar1_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
+         NULL, get_id_isar2_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
+         NULL, get_id_isar3_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
+         NULL, get_id_isar4_el1 },
+       { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
+         NULL, get_id_isar5_el1 },
+       { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
+         NULL, get_clidr_el1 },
+       { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
+         NULL, get_aidr_el1 },
+       { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
+         NULL, get_ctr_el0 },
+};
+
+static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
+{
+       if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
+{
+       if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int get_invariant_sys_reg(u64 id, void __user *uaddr)
+{
+       struct sys_reg_params params;
+       const struct sys_reg_desc *r;
+
+       if (!index_to_params(id, &params))
+               return -ENOENT;
+
+       r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
+       if (!r)
+               return -ENOENT;
+
+       return reg_to_user(uaddr, &r->val, id);
+}
+
+static int set_invariant_sys_reg(u64 id, void __user *uaddr)
+{
+       struct sys_reg_params params;
+       const struct sys_reg_desc *r;
+       int err;
+       u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
+
+       if (!index_to_params(id, &params))
+               return -ENOENT;
+       r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
+       if (!r)
+               return -ENOENT;
+
+       err = reg_from_user(&val, uaddr, id);
+       if (err)
+               return err;
+
+       /* This is what we mean by invariant: you can't change it. */
+       if (r->val != val)
+               return -EINVAL;
+
+       return 0;
+}
+
+static bool is_valid_cache(u32 val)
+{
+       u32 level, ctype;
+
+       if (val >= CSSELR_MAX)
+               return false;
+
+       /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
+       level = (val >> 1);
+       ctype = (cache_levels >> (level * 3)) & 7;
+
+       switch (ctype) {
+       case 0: /* No cache */
+               return false;
+       case 1: /* Instruction cache only */
+               return (val & 1);
+       case 2: /* Data cache only */
+       case 4: /* Unified cache */
+               return !(val & 1);
+       case 3: /* Separate instruction and data caches */
+               return true;
+       default: /* Reserved: we can't know instruction or data. */
+               return false;
+       }
+}
+
+static int demux_c15_get(u64 id, void __user *uaddr)
+{
+       u32 val;
+       u32 __user *uval = uaddr;
+
+       /* Fail if we have unknown bits set. */
+       if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+                  | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+               return -ENOENT;
+
+       switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
+       case KVM_REG_ARM_DEMUX_ID_CCSIDR:
+               if (KVM_REG_SIZE(id) != 4)
+                       return -ENOENT;
+               val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
+                       >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
+               if (!is_valid_cache(val))
+                       return -ENOENT;
+
+               return put_user(get_ccsidr(val), uval);
+       default:
+               return -ENOENT;
+       }
+}
+
+static int demux_c15_set(u64 id, void __user *uaddr)
+{
+       u32 val, newval;
+       u32 __user *uval = uaddr;
+
+       /* Fail if we have unknown bits set. */
+       if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+                  | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+               return -ENOENT;
+
+       switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
+       case KVM_REG_ARM_DEMUX_ID_CCSIDR:
+               if (KVM_REG_SIZE(id) != 4)
+                       return -ENOENT;
+               val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
+                       >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
+               if (!is_valid_cache(val))
+                       return -ENOENT;
+
+               if (get_user(newval, uval))
+                       return -EFAULT;
+
+               /* This is also invariant: you can't change it. */
+               if (newval != get_ccsidr(val))
+                       return -EINVAL;
+               return 0;
+       default:
+               return -ENOENT;
+       }
+}
+
+int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       const struct sys_reg_desc *r;
+       void __user *uaddr = (void __user *)(unsigned long)reg->addr;
+
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
+               return demux_c15_get(reg->id, uaddr);
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
+               return -ENOENT;
+
+       r = index_to_sys_reg_desc(vcpu, reg->id);
+       if (!r)
+               return get_invariant_sys_reg(reg->id, uaddr);
+
+       return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
+}
+
+int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       const struct sys_reg_desc *r;
+       void __user *uaddr = (void __user *)(unsigned long)reg->addr;
+
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
+               return demux_c15_set(reg->id, uaddr);
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
+               return -ENOENT;
+
+       r = index_to_sys_reg_desc(vcpu, reg->id);
+       if (!r)
+               return set_invariant_sys_reg(reg->id, uaddr);
+
+       return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
+}
+
+static unsigned int num_demux_regs(void)
+{
+       unsigned int i, count = 0;
+
+       for (i = 0; i < CSSELR_MAX; i++)
+               if (is_valid_cache(i))
+                       count++;
+
+       return count;
+}
+
+static int write_demux_regids(u64 __user *uindices)
+{
+       u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
+       unsigned int i;
+
+       val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
+       for (i = 0; i < CSSELR_MAX; i++) {
+               if (!is_valid_cache(i))
+                       continue;
+               if (put_user(val | i, uindices))
+                       return -EFAULT;
+               uindices++;
+       }
+       return 0;
+}
+
+static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
+{
+       return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
+               KVM_REG_ARM64_SYSREG |
+               (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
+               (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
+               (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
+               (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
+               (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
+}
+
+static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
+{
+       if (!*uind)
+               return true;
+
+       if (put_user(sys_reg_to_index(reg), *uind))
+               return false;
+
+       (*uind)++;
+       return true;
+}
+
+/* Assumed ordered tables, see kvm_sys_reg_table_init. */
+static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
+{
+       const struct sys_reg_desc *i1, *i2, *end1, *end2;
+       unsigned int total = 0;
+       size_t num;
+
+       /* We check for duplicates here, to allow arch-specific overrides. */
+       i1 = get_target_table(vcpu->arch.target, true, &num);
+       end1 = i1 + num;
+       i2 = sys_reg_descs;
+       end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
+
+       BUG_ON(i1 == end1 || i2 == end2);
+
+       /* Walk carefully, as both tables may refer to the same register. */
+       while (i1 || i2) {
+               int cmp = cmp_sys_reg(i1, i2);
+               /* target-specific overrides generic entry. */
+               if (cmp <= 0) {
+                       /* Ignore registers we trap but don't save. */
+                       if (i1->reg) {
+                               if (!copy_reg_to_user(i1, &uind))
+                                       return -EFAULT;
+                               total++;
+                       }
+               } else {
+                       /* Ignore registers we trap but don't save. */
+                       if (i2->reg) {
+                               if (!copy_reg_to_user(i2, &uind))
+                                       return -EFAULT;
+                               total++;
+                       }
+               }
+
+               if (cmp <= 0 && ++i1 == end1)
+                       i1 = NULL;
+               if (cmp >= 0 && ++i2 == end2)
+                       i2 = NULL;
+       }
+       return total;
+}
+
+unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
+{
+       return ARRAY_SIZE(invariant_sys_regs)
+               + num_demux_regs()
+               + walk_sys_regs(vcpu, (u64 __user *)NULL);
+}
+
+int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       unsigned int i;
+       int err;
+
+       /* Then give them all the invariant registers' indices. */
+       for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
+               if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
+                       return -EFAULT;
+               uindices++;
+       }
+
+       err = walk_sys_regs(vcpu, uindices);
+       if (err < 0)
+               return err;
+       uindices += err;
+
+       return write_demux_regids(uindices);
+}
+
+static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
+{
+       unsigned int i;
+
+       for (i = 1; i < n; i++) {
+               if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
+                       kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+void kvm_sys_reg_table_init(void)
+{
+       unsigned int i;
+       struct sys_reg_desc clidr;
+
+       /* Make sure tables are unique and in order. */
+       BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
+       BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
+       BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
+       BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
+       BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
+       BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
+
+       /* We abuse the reset function to overwrite the table itself. */
+       for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
+               invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
+
+       /*
+        * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
+        *
+        *   If software reads the Cache Type fields from Ctype1
+        *   upwards, once it has seen a value of 0b000, no caches
+        *   exist at further-out levels of the hierarchy. So, for
+        *   example, if Ctype3 is the first Cache Type field with a
+        *   value of 0b000, the values of Ctype4 to Ctype7 must be
+        *   ignored.
+        */
+       get_clidr_el1(NULL, &clidr); /* Ugly... */
+       cache_levels = clidr.val;
+       for (i = 0; i < 7; i++)
+               if (((cache_levels >> (i*3)) & 7) == 0)
+                       break;
+       /* Clear all higher bits. */
+       cache_levels &= (1 << (i*3))-1;
+}
+
+/**
+ * kvm_reset_sys_regs - sets system registers to reset value
+ * @vcpu: The VCPU pointer
+ *
+ * This function finds the right table above and sets the registers on the
+ * virtual CPU struct to their architecturally defined reset values.
+ */
+void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
+{
+       size_t num;
+       const struct sys_reg_desc *table;
+
+       /* Catch someone adding a register without putting in reset entry. */
+       memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
+
+       /* Generic chip reset first (so target could override). */
+       reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+
+       table = get_target_table(vcpu->arch.target, true, &num);
+       reset_sys_reg_descs(vcpu, table, num);
+
+       for (num = 1; num < NR_SYS_REGS; num++)
+               if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
+                       panic("Didn't reset vcpu_sys_reg(%zi)", num);
+}
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
new file mode 100644 (file)
index 0000000..d411e25
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/coproc.h
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
+#define __ARM64_KVM_SYS_REGS_LOCAL_H__
+
+struct sys_reg_params {
+       u8      Op0;
+       u8      Op1;
+       u8      CRn;
+       u8      CRm;
+       u8      Op2;
+       u8      Rt;
+       bool    is_write;
+       bool    is_aarch32;
+       bool    is_32bit;       /* Only valid if is_aarch32 is true */
+};
+
+struct sys_reg_desc {
+       /* MRS/MSR instruction which accesses it. */
+       u8      Op0;
+       u8      Op1;
+       u8      CRn;
+       u8      CRm;
+       u8      Op2;
+
+       /* Trapped access from guest, if non-NULL. */
+       bool (*access)(struct kvm_vcpu *,
+                      const struct sys_reg_params *,
+                      const struct sys_reg_desc *);
+
+       /* Initialization for vcpu. */
+       void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
+
+       /* Index into sys_reg[], or 0 if we don't need to save it. */
+       int reg;
+
+       /* Value (usually reset value) */
+       u64 val;
+};
+
+static inline void print_sys_reg_instr(const struct sys_reg_params *p)
+{
+       /* Look, we even formatted it for you to paste into the table! */
+       kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
+                     p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
+}
+
+static inline bool ignore_write(struct kvm_vcpu *vcpu,
+                               const struct sys_reg_params *p)
+{
+       return true;
+}
+
+static inline bool read_zero(struct kvm_vcpu *vcpu,
+                            const struct sys_reg_params *p)
+{
+       *vcpu_reg(vcpu, p->Rt) = 0;
+       return true;
+}
+
+static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
+                                     const struct sys_reg_params *params)
+{
+       kvm_debug("sys_reg write to read-only register at: %lx\n",
+                 *vcpu_pc(vcpu));
+       print_sys_reg_instr(params);
+       return false;
+}
+
+static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
+                                       const struct sys_reg_params *params)
+{
+       kvm_debug("sys_reg read to write-only register at: %lx\n",
+                 *vcpu_pc(vcpu));
+       print_sys_reg_instr(params);
+       return false;
+}
+
+/* Reset functions */
+static inline void reset_unknown(struct kvm_vcpu *vcpu,
+                                const struct sys_reg_desc *r)
+{
+       BUG_ON(!r->reg);
+       BUG_ON(r->reg >= NR_SYS_REGS);
+       vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
+}
+
+static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+       BUG_ON(!r->reg);
+       BUG_ON(r->reg >= NR_SYS_REGS);
+       vcpu_sys_reg(vcpu, r->reg) = r->val;
+}
+
+static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
+                             const struct sys_reg_desc *i2)
+{
+       BUG_ON(i1 == i2);
+       if (!i1)
+               return 1;
+       else if (!i2)
+               return -1;
+       if (i1->Op0 != i2->Op0)
+               return i1->Op0 - i2->Op0;
+       if (i1->Op1 != i2->Op1)
+               return i1->Op1 - i2->Op1;
+       if (i1->CRn != i2->CRn)
+               return i1->CRn - i2->CRn;
+       if (i1->CRm != i2->CRm)
+               return i1->CRm - i2->CRm;
+       return i1->Op2 - i2->Op2;
+}
+
+
+#define Op0(_x)        .Op0 = _x
+#define Op1(_x)        .Op1 = _x
+#define CRn(_x)                .CRn = _x
+#define CRm(_x)        .CRm = _x
+#define Op2(_x)        .Op2 = _x
+
+#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
new file mode 100644 (file)
index 0000000..475fd29
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Based on arch/arm/kvm/coproc_a15.c:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Rusty Russell <rusty@rustcorp.au>
+ *          Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kvm_host.h>
+#include <asm/cputype.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <linux/init.h>
+
+#include "sys_regs.h"
+
+static bool access_actlr(struct kvm_vcpu *vcpu,
+                        const struct sys_reg_params *p,
+                        const struct sys_reg_desc *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+
+       *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
+       return true;
+}
+
+static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+       u64 actlr;
+
+       asm volatile("mrs %0, actlr_el1\n" : "=r" (actlr));
+       vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr;
+}
+
+/*
+ * Implementation specific sys-reg registers.
+ * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
+ */
+static const struct sys_reg_desc genericv8_sys_regs[] = {
+       /* ACTLR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
+         access_actlr, reset_actlr, ACTLR_EL1 },
+};
+
+static const struct sys_reg_desc genericv8_cp15_regs[] = {
+       /* ACTLR */
+       { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
+         access_actlr },
+};
+
+static struct kvm_sys_reg_target_table genericv8_target_table = {
+       .table64 = {
+               .table = genericv8_sys_regs,
+               .num = ARRAY_SIZE(genericv8_sys_regs),
+       },
+       .table32 = {
+               .table = genericv8_cp15_regs,
+               .num = ARRAY_SIZE(genericv8_cp15_regs),
+       },
+};
+
+static int __init sys_reg_genericv8_init(void)
+{
+       unsigned int i;
+
+       for (i = 1; i < ARRAY_SIZE(genericv8_sys_regs); i++)
+               BUG_ON(cmp_sys_reg(&genericv8_sys_regs[i-1],
+                              &genericv8_sys_regs[i]) >= 0);
+
+       kvm_register_target_sys_reg_table(KVM_ARM_TARGET_AEM_V8,
+                                         &genericv8_target_table);
+       kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8,
+                                         &genericv8_target_table);
+       kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A53,
+                                         &genericv8_target_table);
+       kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57,
+                                         &genericv8_target_table);
+       kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
+                                         &genericv8_target_table);
+
+       return 0;
+}
+late_initcall(sys_reg_genericv8_init);
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
new file mode 100644 (file)
index 0000000..ae21177
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+       .text
+       .pushsection    .hyp.text, "ax"
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+ENTRY(__save_vgic_v2_state)
+__save_vgic_v2_state:
+       /* Get VGIC VCTRL base into x2 */
+       ldr     x2, [x0, #VCPU_KVM]
+       kern_hyp_va     x2
+       ldr     x2, [x2, #KVM_VGIC_VCTRL]
+       kern_hyp_va     x2
+       cbz     x2, 2f          // disabled
+
+       /* Compute the address of struct vgic_cpu */
+       add     x3, x0, #VCPU_VGIC_CPU
+
+       /* Save all interesting registers */
+       ldr     w4, [x2, #GICH_HCR]
+       ldr     w5, [x2, #GICH_VMCR]
+       ldr     w6, [x2, #GICH_MISR]
+       ldr     w7, [x2, #GICH_EISR0]
+       ldr     w8, [x2, #GICH_EISR1]
+       ldr     w9, [x2, #GICH_ELRSR0]
+       ldr     w10, [x2, #GICH_ELRSR1]
+       ldr     w11, [x2, #GICH_APR]
+CPU_BE(        rev     w4,  w4  )
+CPU_BE(        rev     w5,  w5  )
+CPU_BE(        rev     w6,  w6  )
+CPU_BE(        rev     w7,  w7  )
+CPU_BE(        rev     w8,  w8  )
+CPU_BE(        rev     w9,  w9  )
+CPU_BE(        rev     w10, w10 )
+CPU_BE(        rev     w11, w11 )
+
+       str     w4, [x3, #VGIC_V2_CPU_HCR]
+       str     w5, [x3, #VGIC_V2_CPU_VMCR]
+       str     w6, [x3, #VGIC_V2_CPU_MISR]
+       str     w7, [x3, #VGIC_V2_CPU_EISR]
+       str     w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
+       str     w9, [x3, #VGIC_V2_CPU_ELRSR]
+       str     w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
+       str     w11, [x3, #VGIC_V2_CPU_APR]
+
+       /* Clear GICH_HCR */
+       str     wzr, [x2, #GICH_HCR]
+
+       /* Save list registers */
+       add     x2, x2, #GICH_LR0
+       ldr     w4, [x3, #VGIC_CPU_NR_LR]
+       add     x3, x3, #VGIC_V2_CPU_LR
+1:     ldr     w5, [x2], #4
+CPU_BE(        rev     w5, w5 )
+       str     w5, [x3], #4
+       sub     w4, w4, #1
+       cbnz    w4, 1b
+2:
+       ret
+ENDPROC(__save_vgic_v2_state)
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+ENTRY(__restore_vgic_v2_state)
+__restore_vgic_v2_state:
+       /* Get VGIC VCTRL base into x2 */
+       ldr     x2, [x0, #VCPU_KVM]
+       kern_hyp_va     x2
+       ldr     x2, [x2, #KVM_VGIC_VCTRL]
+       kern_hyp_va     x2
+       cbz     x2, 2f          // disabled
+
+       /* Compute the address of struct vgic_cpu */
+       add     x3, x0, #VCPU_VGIC_CPU
+
+       /* We only restore a minimal set of registers */
+       ldr     w4, [x3, #VGIC_V2_CPU_HCR]
+       ldr     w5, [x3, #VGIC_V2_CPU_VMCR]
+       ldr     w6, [x3, #VGIC_V2_CPU_APR]
+CPU_BE(        rev     w4, w4 )
+CPU_BE(        rev     w5, w5 )
+CPU_BE(        rev     w6, w6 )
+
+       str     w4, [x2, #GICH_HCR]
+       str     w5, [x2, #GICH_VMCR]
+       str     w6, [x2, #GICH_APR]
+
+       /* Restore list registers */
+       add     x2, x2, #GICH_LR0
+       ldr     w4, [x3, #VGIC_CPU_NR_LR]
+       add     x3, x3, #VGIC_V2_CPU_LR
+1:     ldr     w5, [x3], #4
+CPU_BE(        rev     w5, w5 )
+       str     w5, [x2], #4
+       sub     w4, w4, #1
+       cbnz    w4, 1b
+2:
+       ret
+ENDPROC(__restore_vgic_v2_state)
+
+       .popsection
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
new file mode 100644 (file)
index 0000000..d160469
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+       .text
+       .pushsection    .hyp.text, "ax"
+
+/*
+ * We store LRs in reverse order to let the CPU deal with streaming
+ * access. Use this macro to make it look saner...
+ */
+#define LR_OFFSET(n)   (VGIC_V3_CPU_LR + (15 - n) * 8)
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+.macro save_vgic_v3_state
+       // Compute the address of struct vgic_cpu
+       add     x3, x0, #VCPU_VGIC_CPU
+
+       // Make sure stores to the GIC via the memory mapped interface
+       // are now visible to the system register interface
+       dsb     st
+
+       // Save all interesting registers
+       mrs_s   x4, ICH_HCR_EL2
+       mrs_s   x5, ICH_VMCR_EL2
+       mrs_s   x6, ICH_MISR_EL2
+       mrs_s   x7, ICH_EISR_EL2
+       mrs_s   x8, ICH_ELSR_EL2
+
+       str     w4, [x3, #VGIC_V3_CPU_HCR]
+       str     w5, [x3, #VGIC_V3_CPU_VMCR]
+       str     w6, [x3, #VGIC_V3_CPU_MISR]
+       str     w7, [x3, #VGIC_V3_CPU_EISR]
+       str     w8, [x3, #VGIC_V3_CPU_ELRSR]
+
+       msr_s   ICH_HCR_EL2, xzr
+
+       mrs_s   x21, ICH_VTR_EL2
+       mvn     w22, w21
+       ubfiz   w23, w22, 2, 4  // w23 = (15 - ListRegs) * 4
+
+       adr     x24, 1f
+       add     x24, x24, x23
+       br      x24
+
+1:
+       mrs_s   x20, ICH_LR15_EL2
+       mrs_s   x19, ICH_LR14_EL2
+       mrs_s   x18, ICH_LR13_EL2
+       mrs_s   x17, ICH_LR12_EL2
+       mrs_s   x16, ICH_LR11_EL2
+       mrs_s   x15, ICH_LR10_EL2
+       mrs_s   x14, ICH_LR9_EL2
+       mrs_s   x13, ICH_LR8_EL2
+       mrs_s   x12, ICH_LR7_EL2
+       mrs_s   x11, ICH_LR6_EL2
+       mrs_s   x10, ICH_LR5_EL2
+       mrs_s   x9, ICH_LR4_EL2
+       mrs_s   x8, ICH_LR3_EL2
+       mrs_s   x7, ICH_LR2_EL2
+       mrs_s   x6, ICH_LR1_EL2
+       mrs_s   x5, ICH_LR0_EL2
+
+       adr     x24, 1f
+       add     x24, x24, x23
+       br      x24
+
+1:
+       str     x20, [x3, #LR_OFFSET(15)]
+       str     x19, [x3, #LR_OFFSET(14)]
+       str     x18, [x3, #LR_OFFSET(13)]
+       str     x17, [x3, #LR_OFFSET(12)]
+       str     x16, [x3, #LR_OFFSET(11)]
+       str     x15, [x3, #LR_OFFSET(10)]
+       str     x14, [x3, #LR_OFFSET(9)]
+       str     x13, [x3, #LR_OFFSET(8)]
+       str     x12, [x3, #LR_OFFSET(7)]
+       str     x11, [x3, #LR_OFFSET(6)]
+       str     x10, [x3, #LR_OFFSET(5)]
+       str     x9, [x3, #LR_OFFSET(4)]
+       str     x8, [x3, #LR_OFFSET(3)]
+       str     x7, [x3, #LR_OFFSET(2)]
+       str     x6, [x3, #LR_OFFSET(1)]
+       str     x5, [x3, #LR_OFFSET(0)]
+
+       tbnz    w21, #29, 6f    // 6 bits
+       tbz     w21, #30, 5f    // 5 bits
+                               // 7 bits
+       mrs_s   x20, ICH_AP0R3_EL2
+       str     w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+       mrs_s   x19, ICH_AP0R2_EL2
+       str     w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+6:     mrs_s   x18, ICH_AP0R1_EL2
+       str     w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+5:     mrs_s   x17, ICH_AP0R0_EL2
+       str     w17, [x3, #VGIC_V3_CPU_AP0R]
+
+       tbnz    w21, #29, 6f    // 6 bits
+       tbz     w21, #30, 5f    // 5 bits
+                               // 7 bits
+       mrs_s   x20, ICH_AP1R3_EL2
+       str     w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+       mrs_s   x19, ICH_AP1R2_EL2
+       str     w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+6:     mrs_s   x18, ICH_AP1R1_EL2
+       str     w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+5:     mrs_s   x17, ICH_AP1R0_EL2
+       str     w17, [x3, #VGIC_V3_CPU_AP1R]
+
+       // Restore SRE_EL1 access and re-enable SRE at EL1.
+       mrs_s   x5, ICC_SRE_EL2
+       orr     x5, x5, #ICC_SRE_EL2_ENABLE
+       msr_s   ICC_SRE_EL2, x5
+       isb
+       mov     x5, #1
+       msr_s   ICC_SRE_EL1, x5
+.endm
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+.macro restore_vgic_v3_state
+       // Disable SRE_EL1 access. Necessary, otherwise
+       // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
+       msr_s   ICC_SRE_EL1, xzr
+       isb
+
+       // Compute the address of struct vgic_cpu
+       add     x3, x0, #VCPU_VGIC_CPU
+
+       // Restore all interesting registers
+       ldr     w4, [x3, #VGIC_V3_CPU_HCR]
+       ldr     w5, [x3, #VGIC_V3_CPU_VMCR]
+
+       msr_s   ICH_HCR_EL2, x4
+       msr_s   ICH_VMCR_EL2, x5
+
+       mrs_s   x21, ICH_VTR_EL2
+
+       tbnz    w21, #29, 6f    // 6 bits
+       tbz     w21, #30, 5f    // 5 bits
+                               // 7 bits
+       ldr     w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+       msr_s   ICH_AP1R3_EL2, x20
+       ldr     w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+       msr_s   ICH_AP1R2_EL2, x19
+6:     ldr     w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+       msr_s   ICH_AP1R1_EL2, x18
+5:     ldr     w17, [x3, #VGIC_V3_CPU_AP1R]
+       msr_s   ICH_AP1R0_EL2, x17
+
+       tbnz    w21, #29, 6f    // 6 bits
+       tbz     w21, #30, 5f    // 5 bits
+                               // 7 bits
+       ldr     w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+       msr_s   ICH_AP0R3_EL2, x20
+       ldr     w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+       msr_s   ICH_AP0R2_EL2, x19
+6:     ldr     w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+       msr_s   ICH_AP0R1_EL2, x18
+5:     ldr     w17, [x3, #VGIC_V3_CPU_AP0R]
+       msr_s   ICH_AP0R0_EL2, x17
+
+       and     w22, w21, #0xf
+       mvn     w22, w21
+       ubfiz   w23, w22, 2, 4  // w23 = (15 - ListRegs) * 4
+
+       adr     x24, 1f
+       add     x24, x24, x23
+       br      x24
+
+1:
+       ldr     x20, [x3, #LR_OFFSET(15)]
+       ldr     x19, [x3, #LR_OFFSET(14)]
+       ldr     x18, [x3, #LR_OFFSET(13)]
+       ldr     x17, [x3, #LR_OFFSET(12)]
+       ldr     x16, [x3, #LR_OFFSET(11)]
+       ldr     x15, [x3, #LR_OFFSET(10)]
+       ldr     x14, [x3, #LR_OFFSET(9)]
+       ldr     x13, [x3, #LR_OFFSET(8)]
+       ldr     x12, [x3, #LR_OFFSET(7)]
+       ldr     x11, [x3, #LR_OFFSET(6)]
+       ldr     x10, [x3, #LR_OFFSET(5)]
+       ldr     x9, [x3, #LR_OFFSET(4)]
+       ldr     x8, [x3, #LR_OFFSET(3)]
+       ldr     x7, [x3, #LR_OFFSET(2)]
+       ldr     x6, [x3, #LR_OFFSET(1)]
+       ldr     x5, [x3, #LR_OFFSET(0)]
+
+       adr     x24, 1f
+       add     x24, x24, x23
+       br      x24
+
+1:
+       msr_s   ICH_LR15_EL2, x20
+       msr_s   ICH_LR14_EL2, x19
+       msr_s   ICH_LR13_EL2, x18
+       msr_s   ICH_LR12_EL2, x17
+       msr_s   ICH_LR11_EL2, x16
+       msr_s   ICH_LR10_EL2, x15
+       msr_s   ICH_LR9_EL2,  x14
+       msr_s   ICH_LR8_EL2,  x13
+       msr_s   ICH_LR7_EL2,  x12
+       msr_s   ICH_LR6_EL2,  x11
+       msr_s   ICH_LR5_EL2,  x10
+       msr_s   ICH_LR4_EL2,   x9
+       msr_s   ICH_LR3_EL2,   x8
+       msr_s   ICH_LR2_EL2,   x7
+       msr_s   ICH_LR1_EL2,   x6
+       msr_s   ICH_LR0_EL2,   x5
+
+       // Ensure that the above will have reached the
+       // (re)distributors. This ensure the guest will read
+       // the correct values from the memory-mapped interface.
+       isb
+       dsb     sy
+
+       // Prevent the guest from touching the GIC system registers
+       mrs_s   x5, ICC_SRE_EL2
+       and     x5, x5, #~ICC_SRE_EL2_ENABLE
+       msr_s   ICC_SRE_EL2, x5
+.endm
+
+ENTRY(__save_vgic_v3_state)
+       save_vgic_v3_state
+       ret
+ENDPROC(__save_vgic_v3_state)
+
+ENTRY(__restore_vgic_v3_state)
+       restore_vgic_v3_state
+       ret
+ENDPROC(__restore_vgic_v3_state)
+
+ENTRY(__vgic_v3_get_ich_vtr_el2)
+       mrs_s   x0, ICH_VTR_EL2
+       ret
+ENDPROC(__vgic_v3_get_ich_vtr_el2)
+
+       .popsection
index e5db797790d3265c5418d749b7537c494ddeb250..7dac371cc9a2f8c817d895d1be0103db9a009ec9 100644 (file)
@@ -46,11 +46,12 @@ ENTRY(      \name   )
        mov     x2, #1
        add     x1, x1, x0, lsr #3      // Get word offset
        lsl     x4, x2, x3              // Create mask
-1:     ldaxr   x2, [x1]
+1:     ldxr    x2, [x1]
        lsr     x0, x2, x3              // Save old value of bit
        \instr  x2, x2, x4              // toggle bit
        stlxr   w5, x2, [x1]
        cbnz    w5, 1b
+       dmb     ish
        and     x0, x0, #1
 3:     ret
 ENDPROC(\name  )
index 6e0ed93d51fe1850ee27176fc9222dac5416b74d..c17967fdf5f6007330ba65beb7a05bd347600764 100644 (file)
@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2   )
        sub     x1, x1, #2
 4:     adds    x1, x1, #1
        b.mi    5f
-       strb    wzr, [x0]
+USER(9f, strb  wzr, [x0]       )
 5:     mov     x0, #0
        ret
 ENDPROC(__clear_user)
diff --git a/arch/arm64/mach-rockchip/Kconfig b/arch/arm64/mach-rockchip/Kconfig
new file mode 100644 (file)
index 0000000..db47762
--- /dev/null
@@ -0,0 +1 @@
+source "arch/arm/mach-rockchip/Kconfig.common"
diff --git a/arch/arm64/mach-rockchip/Makefile b/arch/arm64/mach-rockchip/Makefile
new file mode 100644 (file)
index 0000000..c2a9526
--- /dev/null
@@ -0,0 +1,9 @@
+obj-y += ../../arm/mach-rockchip/common.o
+obj-y += ../../arm/mach-rockchip/cpu.o
+obj-y += ../../arm/mach-rockchip/efuse.o
+obj-y += ../../arm/mach-rockchip/pvtm.o
+obj-y += ../../arm/mach-rockchip/rk_system_status.o
+obj-$(CONFIG_PM) += ../../arm/mach-rockchip/rockchip_pm.o
+obj-$(CONFIG_RK_LAST_LOG) += ../../arm/mach-rockchip/last_log.o
+obj-$(CONFIG_DVFS) += ../../arm/mach-rockchip/dvfs.o
+obj-$(CONFIG_BLOCK_RKNAND) += ../../arm/mach-rockchip/rknandbase.o
index df4f2fd187c3bed7b54574fa0ac92e86da3727a7..c23751b0612033f56533955788eabefa6c5fd39e 100644 (file)
@@ -199,13 +199,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
        unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
-       if (esr & ESR_LNX_EXEC) {
-               vm_flags = VM_EXEC;
-       } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
-               vm_flags = VM_WRITE;
-               mm_flags |= FAULT_FLAG_WRITE;
-       }
-
        tsk = current;
        mm  = tsk->mm;
 
@@ -220,6 +213,16 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               mm_flags |= FAULT_FLAG_USER;
+
+       if (esr & ESR_LNX_EXEC) {
+               vm_flags = VM_EXEC;
+       } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
+               vm_flags = VM_WRITE;
+               mm_flags |= FAULT_FLAG_WRITE;
+       }
+
        /*
         * As per x86, we may deadlock here. However, since the kernel only
         * validly references user space from well defined areas of the code,
@@ -288,6 +291,13 @@ retry:
                              VM_FAULT_BADACCESS))))
                return 0;
 
+       /*
+        * If we are in kernel mode at this point, we have no context to
+        * handle this fault with.
+        */
+       if (!user_mode(regs))
+               goto no_context;
+
        if (fault & VM_FAULT_OOM) {
                /*
                 * We ran out of memory, call the OOM killer, and return to
@@ -298,13 +308,6 @@ retry:
                return 0;
        }
 
-       /*
-        * If we are in kernel mode at this point, we have no context to
-        * handle this fault with.
-        */
-       if (!user_mode(regs))
-               goto no_context;
-
        if (fault & VM_FAULT_SIGBUS) {
                /*
                 * We had some memory, but were unable to successfully fix up
index 52806427e15d2cd7ee8cf145cfb132a34b6030ef..a725447ec8e003f090de16f40988c57ef8c9039f 100644 (file)
@@ -110,9 +110,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 }
 
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
+#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
+
 int pfn_valid(unsigned long pfn)
 {
-       return memblock_is_memory(pfn << PAGE_SHIFT);
+       return (pfn & PFN_MASK) == pfn && memblock_is_memory(pfn << PAGE_SHIFT);
 }
 EXPORT_SYMBOL(pfn_valid);
 #endif
@@ -170,6 +172,7 @@ void __init arm64_memblock_init(void)
                memblock_reserve(base, size);
        }
 
+       early_init_fdt_scan_reserved_mem();
        dma_contiguous_reserve(0);
 
        memblock_allow_resize();
index e0ef63cd05dccca405010f747e5df71a2d15caa3..e832494016157716da981c07097ba0b0d5e3535e 100644 (file)
@@ -209,8 +209,14 @@ ENTRY(__cpu_setup)
         * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
         * both user and kernel.
         */
-       ldr     x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \
+       ldr     x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | \
                      TCR_ASID16 | TCR_TBI0 | (1 << 31)
+       /*
+        * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
+        * TCR_EL1.
+        */
+       mrs     x9, ID_AA64MMFR0_EL1
+       bfi     x10, x9, #32, #3
 #ifdef CONFIG_ARM64_64K_PAGES
        orr     x10, x10, TCR_TG0_64K
        orr     x10, x10, TCR_TG1_64K
@@ -219,6 +225,20 @@ ENTRY(__cpu_setup)
        ret                                     // return to head.S
 ENDPROC(__cpu_setup)
 
+#ifdef CONFIG_ARMV7_COMPAT
+       /*
+        *                 n n            T
+        *       U E      WT T UD     US IHBS
+        *       CE0      XWHW CZ     ME TEEA S
+        * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
+        * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
+        * .... .100 .... 01.1 11.1 ..01 0011 1101 < software settings
+        */
+       .type   crval, #object
+crval:
+       .word   0x030802e2                      // clear
+       .word   0x0405d03d                      // set
+#else
        /*
         *                 n n            T
         *       U E      WT T UD     US IHBS
@@ -231,3 +251,4 @@ ENDPROC(__cpu_setup)
 crval:
        .word   0x000802e2                      // clear
        .word   0x0405d11d                      // set
+#endif
index b2f2d2d668491905dbbc37c385449852a9e7fdde..0eca93327195077ec16bdfd99efd7294c6ab2de6 100644 (file)
@@ -86,6 +86,8 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
 
        local_irq_enable();
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
 
@@ -228,9 +230,9 @@ no_context:
         */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       pagefault_out_of_memory();
        if (!user_mode(regs))
                goto no_context;
+       pagefault_out_of_memory();
        return;
 
 do_sigbus:
index 73312ab6c696c160f7fd58df02a50890390ad650..1790f22e71a21a859b2b7b1942cbbc503c2d557e 100644 (file)
@@ -58,8 +58,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
        struct vm_area_struct * vma;
        siginfo_t info;
        int fault;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                               ((writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        D(printk(KERN_DEBUG
                 "Page fault for %lX on %X at %lX, prot %d write %d\n",
@@ -117,6 +116,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -155,6 +156,7 @@ retry:
        } else if (writeaccess == 1) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
index 331c1e2cfb6760ee7ed3f38d4e5c92a2526d44bf..9a66372fc7c76019ca874a9c3780c2fc8392266c 100644 (file)
@@ -34,11 +34,11 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
        struct vm_area_struct *vma;
        struct mm_struct *mm;
        unsigned long _pme, lrai, lrad, fixup;
+       unsigned long flags = 0;
        siginfo_t info;
        pgd_t *pge;
        pud_t *pue;
        pte_t *pte;
-       int write;
        int fault;
 
 #if 0
@@ -81,6 +81,9 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(__frame))
+               flags |= FAULT_FLAG_USER;
+
        down_read(&mm->mmap_sem);
 
        vma = find_vma(mm, ear0);
@@ -129,7 +132,6 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
  */
  good_area:
        info.si_code = SEGV_ACCERR;
-       write = 0;
        switch (esr0 & ESR0_ATXC) {
        default:
                /* handle write to write protected page */
@@ -140,7 +142,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
 #endif
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
-               write = 1;
+               flags |= FAULT_FLAG_WRITE;
                break;
 
                 /* handle read from protected page */
@@ -162,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0);
+       fault = handle_mm_fault(mm, vma, ear0, flags);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 1bd276dbec7d3503f92a19fc7cb5a39df1051621..8704c9320032705cf7de10d6a94f3c4d70cf8b12 100644 (file)
@@ -53,8 +53,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
        int si_code = SEGV_MAPERR;
        int fault;
        const struct exception_table_entry *fixup;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                (cause > 0 ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        /*
         * If we're in an interrupt or have no user context,
@@ -65,6 +64,8 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
 
        local_irq_enable();
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -96,6 +97,7 @@ good_area:
        case FLT_STORE:
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
                break;
        }
 
index 60576e06b6fb4b3205292f621c727626a6232c92..d0a69aa35e27decc3eacb83e299a910945221480 100644 (file)
 # define smp_rmb()     rmb()
 # define smp_wmb()     wmb()
 # define smp_read_barrier_depends()    read_barrier_depends()
+
 #else
+
 # define smp_mb()      barrier()
 # define smp_rmb()     barrier()
 # define smp_wmb()     barrier()
 # define smp_read_barrier_depends()    do { } while(0)
+
 #endif
 
+/*
+ * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
+ * need for asm trickery!
+ */
+
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       barrier();                                                      \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       barrier();                                                      \
+       ___p1;                                                          \
+})
+
 /*
  * XXX check on this ---I suspect what Linus really wants here is
  * acquire vs release semantics but we can't discuss this stuff with
index 989dd3fe8de19d9fc40de248f5788f359eb3ebc6..cf03097176b1af8686468bd5afdafabe6559ee4e 100644 (file)
@@ -238,9 +238,6 @@ struct kvm_vm_data {
 #define KVM_NR_PAGE_SIZES      1
 #define KVM_PAGES_PER_HPAGE(x) 1
 
-struct kvm;
-struct kvm_vcpu;
-
 struct kvm_mmio_req {
        uint64_t addr;          /*  physical address            */
        uint64_t size;          /*  size in bytes               */
@@ -599,6 +596,18 @@ void kvm_sal_emul(struct kvm_vcpu *vcpu);
 struct kvm *kvm_arch_alloc_vm(void);
 void kvm_arch_free_vm(struct kvm *kvm);
 
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_free_memslot(struct kvm *kvm,
+               struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
+static inline void kvm_arch_commit_memory_region(struct kvm *kvm,
+               struct kvm_userspace_memory_region *mem,
+               const struct kvm_memory_slot *old,
+               enum kvm_mr_change change) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+
 #endif /* __ASSEMBLY__*/
 
 #endif
index 990b86420cc64638b542b95dd6a9059ba8cd37f9..3d50ea955c4cf435674ced40d0ee98f33a6b2d96 100644 (file)
@@ -25,6 +25,7 @@ config KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select HAVE_KVM_IRQCHIP
+       select HAVE_KVM_IRQFD
        select HAVE_KVM_IRQ_ROUTING
        select KVM_APIC_ARCHITECTURE
        select KVM_MMIO
index 1a4053789d0167ad6cb70dcb60a09e56020eaeb3..18e45ec49bbfdbb3bd25627eba31b79d22b9ad5a 100644 (file)
@@ -47,12 +47,13 @@ FORCE : $(obj)/$(offsets-file)
 
 ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
 asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
+KVM := ../../../virt/kvm
 
-common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
-               coalesced_mmio.o irq_comm.o)
+common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \
+               $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o
 
 ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y)
-common-objs += $(addprefix ../../../virt/kvm/, assigned-dev.o iommu.o)
+common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o
 endif
 
 kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
index 5b2dc0d10c8f4211d28e044a2071306d1ef955ed..c9aa236dc29b7cf68fe8526ea7fbcb6e02533354 100644 (file)
@@ -125,7 +125,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
 
 static  DEFINE_SPINLOCK(vp_lock);
 
-int kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void)
 {
        long  status;
        long  tmp_base;
@@ -160,7 +160,7 @@ int kvm_arch_hardware_enable(void *garbage)
        return 0;
 }
 
-void kvm_arch_hardware_disable(void *garbage)
+void kvm_arch_hardware_disable(void)
 {
 
        long status;
@@ -190,7 +190,7 @@ void kvm_arch_check_processor_compat(void *rtn)
        *(int *)rtn = 0;
 }
 
-int kvm_dev_ioctl_check_extension(long ext)
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 {
 
        int r;
@@ -702,7 +702,7 @@ again:
 out:
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        if (r > 0) {
-               kvm_resched(vcpu);
+               cond_resched();
                idx = srcu_read_lock(&vcpu->kvm->srcu);
                goto again;
        }
@@ -1363,10 +1363,6 @@ static void kvm_release_vm_pages(struct kvm *kvm)
        }
 }
 
-void kvm_arch_sync_events(struct kvm *kvm)
-{
-}
-
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        kvm_iommu_unmap_guest(kvm);
@@ -1375,10 +1371,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        kvm_release_vm_pages(kvm);
 }
 
-void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
-{
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        if (cpu != vcpu->cpu) {
@@ -1467,7 +1459,6 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
        kfree(vcpu->arch.apic);
 }
 
-
 long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
 {
@@ -1550,12 +1541,8 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
        return VM_FAULT_SIGBUS;
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
-                          struct kvm_memory_slot *dont)
-{
-}
-
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages)
 {
        return 0;
 }
@@ -1591,14 +1578,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
        return 0;
 }
 
-void kvm_arch_commit_memory_region(struct kvm *kvm,
-               struct kvm_userspace_memory_region *mem,
-               const struct kvm_memory_slot *old,
-               enum kvm_mr_change change)
-{
-       return;
-}
-
 void kvm_arch_flush_shadow_all(struct kvm *kvm)
 {
        kvm_flush_remote_tlbs(kvm);
@@ -1847,10 +1826,6 @@ int kvm_arch_hardware_setup(void)
        return 0;
 }
 
-void kvm_arch_hardware_unsetup(void)
-{
-}
-
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
 {
        return __apic_accept_irq(vcpu, irq->vector);
index 6cf0341f978e59ddf235c44e70dcf615799390ed..7225dad87094d81e89459e5a61909fa5b2d10ca0 100644 (file)
@@ -90,8 +90,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
        mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
                | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 
-       flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
-
        /* mmap_sem is performance critical.... */
        prefetchw(&mm->mmap_sem);
 
@@ -119,6 +117,10 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
        if (notify_page_fault(regs, TRAP_BRKPT))
                return;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+       if (mask & VM_WRITE)
+               flags |= FAULT_FLAG_WRITE;
 retry:
        down_read(&mm->mmap_sem);
 
index 3cdfa9c1d0915b71969c0943b27da64e64156d53..e9c6a8014bd647eec50a66afb5bc75b076b35e4d 100644 (file)
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
        struct mm_struct *mm;
        struct vm_area_struct * vma;
        unsigned long page, addr;
-       int write;
+       unsigned long flags = 0;
        int fault;
        siginfo_t info;
 
@@ -117,6 +117,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
        if (in_atomic() || !mm)
                goto bad_area_nosemaphore;
 
+       if (error_code & ACE_USERMODE)
+               flags |= FAULT_FLAG_USER;
+
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -166,14 +169,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
  */
 good_area:
        info.si_code = SEGV_ACCERR;
-       write = 0;
        switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
                default:        /* 3: write, present */
                        /* fall through */
                case ACE_WRITE: /* write, not present */
                        if (!(vma->vm_flags & VM_WRITE))
                                goto bad_area;
-                       write++;
+                       flags |= FAULT_FLAG_WRITE;
                        break;
                case ACE_PROTECTION:    /* read, present */
                case 0:         /* read, not present */
@@ -194,7 +196,7 @@ good_area:
         */
        addr = (address & PAGE_MASK);
        set_thread_fault_code(error_code);
-       fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
+       fault = handle_mm_fault(mm, vma, addr, flags);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index a563727806bf922b5b3315559b9b46b5bbb9d7fc..eb1d61f6872549991dae7d5e491a74627d8456d0 100644 (file)
@@ -88,6 +88,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
 
index 2c7dde3c6430fc3bbfe1de8d812253ee90173d3c..2a5259fd23ebc532bfc73b6d54ec2d800cd95af0 100644 (file)
 int hwreg_present( volatile void *regp )
 {
     int        ret = 0;
+    unsigned long flags;
     long       save_sp, save_vbr;
     long       tmp_vectors[3];
 
+    local_irq_save(flags);
     __asm__ __volatile__
        (       "movec  %/vbr,%2\n\t"
                "movel  #Lberr1,%4@(8)\n\t"
@@ -46,6 +48,7 @@ int hwreg_present( volatile void *regp )
                : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
                : "a" (regp), "a" (tmp_vectors)
                 );
+    local_irq_restore(flags);
 
     return( ret );
 }
@@ -58,9 +61,11 @@ EXPORT_SYMBOL(hwreg_present);
 int hwreg_write( volatile void *regp, unsigned short val )
 {
        int             ret;
+       unsigned long flags;
        long    save_sp, save_vbr;
        long    tmp_vectors[3];
 
+       local_irq_save(flags);
        __asm__ __volatile__
        (       "movec  %/vbr,%2\n\t"
                "movel  #Lberr2,%4@(8)\n\t"
@@ -78,6 +83,7 @@ int hwreg_write( volatile void *regp, unsigned short val )
                : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
                : "a" (regp), "a" (tmp_vectors), "g" (val)
        );
+       local_irq_restore(flags);
 
        return( ret );
 }
index e355a4c10968eecdbb8760edf7906233a4e17d84..2d6f0de7732529212bf4a9256bb3720386e39e71 100644 (file)
@@ -85,4 +85,19 @@ static inline void fence(void)
 #define smp_read_barrier_depends()     do { } while (0)
 #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
 
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       smp_mb();                                                       \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       smp_mb();                                                       \
+       ___p1;                                                          \
+})
+
 #endif /* _ASM_METAG_BARRIER_H */
index 2c75bf7357c58deec87850b5cf293d510ddd9743..332680e5ebf23c7909b796c415c2273efd77ba3c 100644 (file)
@@ -53,8 +53,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
        struct vm_area_struct *vma, *prev_vma;
        siginfo_t info;
        int fault;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                               (write_access ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        tsk = current;
 
@@ -109,6 +108,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
 
@@ -121,6 +122,7 @@ good_area:
        if (write_access) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
                        goto bad_area;
@@ -224,8 +226,10 @@ do_sigbus:
         */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (user_mode(regs))
-               do_group_exit(SIGKILL);
+       if (user_mode(regs)) {
+               pagefault_out_of_memory();
+               return 1;
+       }
 
 no_context:
        /* Are we prepared to handle this kernel fault?  */
index 731f739d17a1be6c485a7759f9479adc592377d1..fa4cf52aa7a6d386711690005a314ece7d67fc53 100644 (file)
@@ -92,8 +92,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
        int code = SEGV_MAPERR;
        int is_write = error_code & ESR_S;
        int fault;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                        (is_write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        regs->ear = address;
        regs->esr = error_code;
@@ -121,6 +120,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
                die("Weird page fault", regs, SIGSEGV);
        }
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -199,6 +201,7 @@ good_area:
        if (unlikely(is_write)) {
                if (unlikely(!(vma->vm_flags & VM_WRITE)))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        /* a read */
        } else {
                /* protection fault */
index 2c9573098c0dab7889895de68c2dae5bb9ad9ba8..d498a1f9bccf5d307f8a44a7775967c7b8baaf35 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/string.h>
 
 #include <asm/addrspace.h>
 
index 2a75ff249e7156cec2e424f79e2caedf6163c78e..6430e7acb1ebbfdfb30a864afd9eda6d474cd334 100644 (file)
@@ -463,6 +463,18 @@ static void octeon_halt(void)
        octeon_kill_core(NULL);
 }
 
+static char __read_mostly octeon_system_type[80];
+
+static int __init init_octeon_system_type(void)
+{
+       snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
+               cvmx_board_type_to_string(octeon_bootinfo->board_type),
+               octeon_model_get_string(read_c0_prid()));
+
+       return 0;
+}
+early_initcall(init_octeon_system_type);
+
 /**
  * Handle all the error condition interrupts that might occur.
  *
@@ -482,11 +494,7 @@ static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
  */
 const char *octeon_board_type_string(void)
 {
-       static char name[80];
-       sprintf(name, "%s (%s)",
-               cvmx_board_type_to_string(octeon_bootinfo->board_type),
-               octeon_model_get_string(read_c0_prid()));
-       return name;
+       return octeon_system_type;
 }
 
 const char *get_system_type(void)
index 314ab5532019603545add2b96249d98397d0c08e..52c5b61d7aba9b7343a232bc8913535c06e44c9f 100644 (file)
 #define nudge_writes() mb()
 #endif
 
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       smp_mb();                                                       \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       smp_mb();                                                       \
+       ___p1;                                                          \
+})
+
 #endif /* __ASM_BARRIER_H */
index 4d6fa0bf1305d7376c7ae2ce2201fc412a8791de..5e3f4b0f18c81b5d2cd13dd34ef86ae44d32fc0c 100644 (file)
 #define CAUSEB_DC       27
 #define CAUSEF_DC       (_ULCAST_(1)   << 27)
 
-struct kvm;
-struct kvm_run;
-struct kvm_vcpu;
-struct kvm_interrupt;
-
 extern atomic_t kvm_mips_instance;
 extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
 extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
@@ -659,5 +654,16 @@ extern void mips32_SyncICache(unsigned long addr, unsigned long size);
 extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
 
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_free_memslot(struct kvm *kvm,
+               struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
+static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+               struct kvm_memory_slot *slot) {}
+static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 
 #endif /* __MIPS_KVM_HOST_H__ */
index 910e71a12466de2f1fb3f1fab82c6203ef062e63..b8343ccbc98986b910e72bf65fb9aa71120e4478 100644 (file)
 #ifndef __ASM_MIPS_REG_H
 #define __ASM_MIPS_REG_H
 
-
-#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H)
-
-#define EF_R0                  6
-#define EF_R1                  7
-#define EF_R2                  8
-#define EF_R3                  9
-#define EF_R4                  10
-#define EF_R5                  11
-#define EF_R6                  12
-#define EF_R7                  13
-#define EF_R8                  14
-#define EF_R9                  15
-#define EF_R10                 16
-#define EF_R11                 17
-#define EF_R12                 18
-#define EF_R13                 19
-#define EF_R14                 20
-#define EF_R15                 21
-#define EF_R16                 22
-#define EF_R17                 23
-#define EF_R18                 24
-#define EF_R19                 25
-#define EF_R20                 26
-#define EF_R21                 27
-#define EF_R22                 28
-#define EF_R23                 29
-#define EF_R24                 30
-#define EF_R25                 31
+#define MIPS32_EF_R0           6
+#define MIPS32_EF_R1           7
+#define MIPS32_EF_R2           8
+#define MIPS32_EF_R3           9
+#define MIPS32_EF_R4           10
+#define MIPS32_EF_R5           11
+#define MIPS32_EF_R6           12
+#define MIPS32_EF_R7           13
+#define MIPS32_EF_R8           14
+#define MIPS32_EF_R9           15
+#define MIPS32_EF_R10          16
+#define MIPS32_EF_R11          17
+#define MIPS32_EF_R12          18
+#define MIPS32_EF_R13          19
+#define MIPS32_EF_R14          20
+#define MIPS32_EF_R15          21
+#define MIPS32_EF_R16          22
+#define MIPS32_EF_R17          23
+#define MIPS32_EF_R18          24
+#define MIPS32_EF_R19          25
+#define MIPS32_EF_R20          26
+#define MIPS32_EF_R21          27
+#define MIPS32_EF_R22          28
+#define MIPS32_EF_R23          29
+#define MIPS32_EF_R24          30
+#define MIPS32_EF_R25          31
 
 /*
  * k0/k1 unsaved
  */
-#define EF_R26                 32
-#define EF_R27                 33
+#define MIPS32_EF_R26          32
+#define MIPS32_EF_R27          33
 
-#define EF_R28                 34
-#define EF_R29                 35
-#define EF_R30                 36
-#define EF_R31                 37
+#define MIPS32_EF_R28          34
+#define MIPS32_EF_R29          35
+#define MIPS32_EF_R30          36
+#define MIPS32_EF_R31          37
 
 /*
  * Saved special registers
  */
-#define EF_LO                  38
-#define EF_HI                  39
-
-#define EF_CP0_EPC             40
-#define EF_CP0_BADVADDR                41
-#define EF_CP0_STATUS          42
-#define EF_CP0_CAUSE           43
-#define EF_UNUSED0             44
-
-#define EF_SIZE                        180
-
-#endif
-
-#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H)
-
-#define EF_R0                   0
-#define EF_R1                   1
-#define EF_R2                   2
-#define EF_R3                   3
-#define EF_R4                   4
-#define EF_R5                   5
-#define EF_R6                   6
-#define EF_R7                   7
-#define EF_R8                   8
-#define EF_R9                   9
-#define EF_R10                 10
-#define EF_R11                 11
-#define EF_R12                 12
-#define EF_R13                 13
-#define EF_R14                 14
-#define EF_R15                 15
-#define EF_R16                 16
-#define EF_R17                 17
-#define EF_R18                 18
-#define EF_R19                 19
-#define EF_R20                 20
-#define EF_R21                 21
-#define EF_R22                 22
-#define EF_R23                 23
-#define EF_R24                 24
-#define EF_R25                 25
+#define MIPS32_EF_LO           38
+#define MIPS32_EF_HI           39
+
+#define MIPS32_EF_CP0_EPC      40
+#define MIPS32_EF_CP0_BADVADDR 41
+#define MIPS32_EF_CP0_STATUS   42
+#define MIPS32_EF_CP0_CAUSE    43
+#define MIPS32_EF_UNUSED0      44
+
+#define MIPS32_EF_SIZE         180
+
+#define MIPS64_EF_R0           0
+#define MIPS64_EF_R1           1
+#define MIPS64_EF_R2           2
+#define MIPS64_EF_R3           3
+#define MIPS64_EF_R4           4
+#define MIPS64_EF_R5           5
+#define MIPS64_EF_R6           6
+#define MIPS64_EF_R7           7
+#define MIPS64_EF_R8           8
+#define MIPS64_EF_R9           9
+#define MIPS64_EF_R10          10
+#define MIPS64_EF_R11          11
+#define MIPS64_EF_R12          12
+#define MIPS64_EF_R13          13
+#define MIPS64_EF_R14          14
+#define MIPS64_EF_R15          15
+#define MIPS64_EF_R16          16
+#define MIPS64_EF_R17          17
+#define MIPS64_EF_R18          18
+#define MIPS64_EF_R19          19
+#define MIPS64_EF_R20          20
+#define MIPS64_EF_R21          21
+#define MIPS64_EF_R22          22
+#define MIPS64_EF_R23          23
+#define MIPS64_EF_R24          24
+#define MIPS64_EF_R25          25
 
 /*
  * k0/k1 unsaved
  */
-#define EF_R26                 26
-#define EF_R27                 27
+#define MIPS64_EF_R26          26
+#define MIPS64_EF_R27          27
 
 
-#define EF_R28                 28
-#define EF_R29                 29
-#define EF_R30                 30
-#define EF_R31                 31
+#define MIPS64_EF_R28          28
+#define MIPS64_EF_R29          29
+#define MIPS64_EF_R30          30
+#define MIPS64_EF_R31          31
 
 /*
  * Saved special registers
  */
-#define EF_LO                  32
-#define EF_HI                  33
-
-#define EF_CP0_EPC             34
-#define EF_CP0_BADVADDR                35
-#define EF_CP0_STATUS          36
-#define EF_CP0_CAUSE           37
-
-#define EF_SIZE                        304     /* size in bytes */
+#define MIPS64_EF_LO           32
+#define MIPS64_EF_HI           33
+
+#define MIPS64_EF_CP0_EPC      34
+#define MIPS64_EF_CP0_BADVADDR 35
+#define MIPS64_EF_CP0_STATUS   36
+#define MIPS64_EF_CP0_CAUSE    37
+
+#define MIPS64_EF_SIZE         304     /* size in bytes */
+
+#if defined(CONFIG_32BIT)
+
+#define EF_R0                  MIPS32_EF_R0
+#define EF_R1                  MIPS32_EF_R1
+#define EF_R2                  MIPS32_EF_R2
+#define EF_R3                  MIPS32_EF_R3
+#define EF_R4                  MIPS32_EF_R4
+#define EF_R5                  MIPS32_EF_R5
+#define EF_R6                  MIPS32_EF_R6
+#define EF_R7                  MIPS32_EF_R7
+#define EF_R8                  MIPS32_EF_R8
+#define EF_R9                  MIPS32_EF_R9
+#define EF_R10                 MIPS32_EF_R10
+#define EF_R11                 MIPS32_EF_R11
+#define EF_R12                 MIPS32_EF_R12
+#define EF_R13                 MIPS32_EF_R13
+#define EF_R14                 MIPS32_EF_R14
+#define EF_R15                 MIPS32_EF_R15
+#define EF_R16                 MIPS32_EF_R16
+#define EF_R17                 MIPS32_EF_R17
+#define EF_R18                 MIPS32_EF_R18
+#define EF_R19                 MIPS32_EF_R19
+#define EF_R20                 MIPS32_EF_R20
+#define EF_R21                 MIPS32_EF_R21
+#define EF_R22                 MIPS32_EF_R22
+#define EF_R23                 MIPS32_EF_R23
+#define EF_R24                 MIPS32_EF_R24
+#define EF_R25                 MIPS32_EF_R25
+#define EF_R26                 MIPS32_EF_R26
+#define EF_R27                 MIPS32_EF_R27
+#define EF_R28                 MIPS32_EF_R28
+#define EF_R29                 MIPS32_EF_R29
+#define EF_R30                 MIPS32_EF_R30
+#define EF_R31                 MIPS32_EF_R31
+#define EF_LO                  MIPS32_EF_LO
+#define EF_HI                  MIPS32_EF_HI
+#define EF_CP0_EPC             MIPS32_EF_CP0_EPC
+#define EF_CP0_BADVADDR                MIPS32_EF_CP0_BADVADDR
+#define EF_CP0_STATUS          MIPS32_EF_CP0_STATUS
+#define EF_CP0_CAUSE           MIPS32_EF_CP0_CAUSE
+#define EF_UNUSED0             MIPS32_EF_UNUSED0
+#define EF_SIZE                        MIPS32_EF_SIZE
+
+#elif defined(CONFIG_64BIT)
+
+#define EF_R0                  MIPS64_EF_R0
+#define EF_R1                  MIPS64_EF_R1
+#define EF_R2                  MIPS64_EF_R2
+#define EF_R3                  MIPS64_EF_R3
+#define EF_R4                  MIPS64_EF_R4
+#define EF_R5                  MIPS64_EF_R5
+#define EF_R6                  MIPS64_EF_R6
+#define EF_R7                  MIPS64_EF_R7
+#define EF_R8                  MIPS64_EF_R8
+#define EF_R9                  MIPS64_EF_R9
+#define EF_R10                 MIPS64_EF_R10
+#define EF_R11                 MIPS64_EF_R11
+#define EF_R12                 MIPS64_EF_R12
+#define EF_R13                 MIPS64_EF_R13
+#define EF_R14                 MIPS64_EF_R14
+#define EF_R15                 MIPS64_EF_R15
+#define EF_R16                 MIPS64_EF_R16
+#define EF_R17                 MIPS64_EF_R17
+#define EF_R18                 MIPS64_EF_R18
+#define EF_R19                 MIPS64_EF_R19
+#define EF_R20                 MIPS64_EF_R20
+#define EF_R21                 MIPS64_EF_R21
+#define EF_R22                 MIPS64_EF_R22
+#define EF_R23                 MIPS64_EF_R23
+#define EF_R24                 MIPS64_EF_R24
+#define EF_R25                 MIPS64_EF_R25
+#define EF_R26                 MIPS64_EF_R26
+#define EF_R27                 MIPS64_EF_R27
+#define EF_R28                 MIPS64_EF_R28
+#define EF_R29                 MIPS64_EF_R29
+#define EF_R30                 MIPS64_EF_R30
+#define EF_R31                 MIPS64_EF_R31
+#define EF_LO                  MIPS64_EF_LO
+#define EF_HI                  MIPS64_EF_HI
+#define EF_CP0_EPC             MIPS64_EF_CP0_EPC
+#define EF_CP0_BADVADDR                MIPS64_EF_CP0_BADVADDR
+#define EF_CP0_STATUS          MIPS64_EF_CP0_STATUS
+#define EF_CP0_CAUSE           MIPS64_EF_CP0_CAUSE
+#define EF_SIZE                        MIPS64_EF_SIZE
 
 #endif /* CONFIG_64BIT */
 
index 895320e25662cd98a673980c449ec916841920d7..e6e5d916221394bf8cca242176fa461065230d85 100644 (file)
@@ -131,6 +131,8 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_FPUBOUND          (1<<TIF_FPUBOUND)
 #define _TIF_LOAD_WATCH                (1<<TIF_LOAD_WATCH)
 
+#define _TIF_WORK_SYSCALL_ENTRY        (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
+
 /* work to do in syscall_trace_leave() */
 #define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
 
index 202e581e609653ea3f2b651cb2387ba9e04fe65e..7fdf1de0447f8e990d2874332a8dbbe433c99bce 100644 (file)
@@ -58,12 +58,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
 #include <asm/processor.h>
 
-/*
- * When this file is selected, we are definitely running a 64bit kernel.
- * So using the right regs define in asm/reg.h
- */
-#define WANT_COMPAT_REG_H
-
 /* These MUST be defined before elf.h gets included */
 extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
 #define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
@@ -135,21 +129,21 @@ void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
 {
        int i;
 
-       for (i = 0; i < EF_R0; i++)
+       for (i = 0; i < MIPS32_EF_R0; i++)
                grp[i] = 0;
-       grp[EF_R0] = 0;
+       grp[MIPS32_EF_R0] = 0;
        for (i = 1; i <= 31; i++)
-               grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
-       grp[EF_R26] = 0;
-       grp[EF_R27] = 0;
-       grp[EF_LO] = (elf_greg_t) regs->lo;
-       grp[EF_HI] = (elf_greg_t) regs->hi;
-       grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
-       grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
-       grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
-       grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
-#ifdef EF_UNUSED0
-       grp[EF_UNUSED0] = 0;
+               grp[MIPS32_EF_R0 + i] = (elf_greg_t) regs->regs[i];
+       grp[MIPS32_EF_R26] = 0;
+       grp[MIPS32_EF_R27] = 0;
+       grp[MIPS32_EF_LO] = (elf_greg_t) regs->lo;
+       grp[MIPS32_EF_HI] = (elf_greg_t) regs->hi;
+       grp[MIPS32_EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
+       grp[MIPS32_EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
+       grp[MIPS32_EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
+       grp[MIPS32_EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
+#ifdef MIPS32_EF_UNUSED0
+       grp[MIPS32_EF_UNUSED0] = 0;
 #endif
 }
 
index c01b307317a9635b88394d18164710262e206234..bffbbc5578796dc4fb6aa2fb2da94c1f1a6595d6 100644 (file)
@@ -256,11 +256,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
 
        /* Setup Intr to Pin mapping */
        if (pin & GIC_MAP_TO_NMI_MSK) {
+               int i;
+
                GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
                /* FIXME: hack to route NMI to all cpu's */
-               for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
+               for (i = 0; i < NR_CPUS; i += 32) {
                        GICWRITE(GIC_REG_ADDR(SHARED,
-                                         GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
+                                         GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
                                 0xffffffff);
                }
        } else {
index 33d067148e61ba6d1526529ef735b24f2c3017c3..3efbf0b29c1beec4297b2da9ea56135017930204 100644 (file)
@@ -123,7 +123,11 @@ NESTED(_mcount, PT_SIZE, ra)
         nop
 #endif
        b       ftrace_stub
+#ifdef CONFIG_32BIT
+        addiu sp, sp, 8
+#else
         nop
+#endif
 
 static_trace:
        MCOUNT_SAVE_REGS
@@ -133,6 +137,9 @@ static_trace:
         move   a1, AT          /* arg2: parent's return address */
 
        MCOUNT_RESTORE_REGS
+#ifdef CONFIG_32BIT
+       addiu sp, sp, 8
+#endif
        .globl ftrace_stub
 ftrace_stub:
        RETURN_BACK
@@ -181,6 +188,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
        jal     prepare_ftrace_return
         nop
        MCOUNT_RESTORE_REGS
+#ifndef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_32BIT
+       addiu sp, sp, 8
+#endif
+#endif
        RETURN_BACK
        END(ftrace_graph_caller)
 
index 9c6299c733a317ce5c975ce91b054297502ba960..1b95b24432216b44c1ad21b4aa310ae20b93c132 100644 (file)
@@ -161,6 +161,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
                __get_user(fregs[i], i + (__u64 __user *) data);
 
        __get_user(child->thread.fpu.fcr31, data + 64);
+       child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
 
        /* FIR may not be written.  */
 
@@ -451,7 +452,7 @@ long arch_ptrace(struct task_struct *child, long request,
                        break;
 #endif
                case FPC_CSR:
-                       child->thread.fpu.fcr31 = data;
+                       child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
                        break;
                case DSP_BASE ... DSP_BASE + 5: {
                        dspreg_t *dregs;
index 9b36424b03c5f41aa48312c770f90e69a43f6fae..ed5bafb5d637535fce344f7983a12b22587e0e7f 100644 (file)
@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
 
 stack_done:
        lw      t0, TI_FLAGS($28)       # syscall tracing enabled?
-       li      t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+       li      t1, _TIF_WORK_SYSCALL_ENTRY
        and     t0, t1
        bnez    t0, syscall_trace_entry # -> yes
 
index 97a5909a61cf0c623dfdf8284eaf11bf13e7adf7..be6627ead619e72b35bea9f8d031af3b21a2b06a 100644 (file)
@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
 
        sd      a3, PT_R26(sp)          # save a3 for syscall restarting
 
-       li      t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+       li      t1, _TIF_WORK_SYSCALL_ENTRY
        LONG_L  t0, TI_FLAGS($28)       # syscall tracing enabled?
        and     t0, t1, t0
        bnez    t0, syscall_trace_entry
index edcb6594e7b5b32a8d658bbe8c69de6626d4fbf0..cab150789c8d8412409506c99143a45f04717e80 100644 (file)
@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
 
        sd      a3, PT_R26(sp)          # save a3 for syscall restarting
 
-       li      t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+       li      t1, _TIF_WORK_SYSCALL_ENTRY
        LONG_L  t0, TI_FLAGS($28)       # syscall tracing enabled?
        and     t0, t1, t0
        bnez    t0, n32_syscall_trace_entry
index 74f485d3c0ef41bb7f73204f06a7a801d0465f13..37605dc8eef7a9c72d5743ad3ad5d1f50088fdfb 100644 (file)
@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
        PTR     4b, bad_stack
        .previous
 
-       li      t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+       li      t1, _TIF_WORK_SYSCALL_ENTRY
        LONG_L  t0, TI_FLAGS($28)       # syscall tracing enabled?
        and     t0, t1, t0
        bnez    t0, trace_a_syscall
index 203d8857070dd225f2d8fdea7bf986ad7a6560cc..2c81265bcf46591c7d6859e7f49ad5767b749c77 100644 (file)
@@ -604,7 +604,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        case sdc1_op:
                die_if_kernel("Unaligned FP access in kernel code", regs);
                BUG_ON(!used_math());
-               BUG_ON(!is_fpu_owner());
 
                lose_fpu(1);    /* Save FPU state for the emulator. */
                res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
index 2c7b3ade8ec0eb1cc78b8fe1263a8fadff9abe47..5cd48572450ef0b76e85488e2ee08f672962a849 100644 (file)
@@ -196,16 +196,21 @@ kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
        return -ENOIOCTLCMD;
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
                           struct kvm_memory_slot *dont)
 {
 }
 
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages)
 {
        return 0;
 }
 
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                 struct kvm_memory_slot *memslot,
                                 struct kvm_userspace_memory_region *mem,
index 21813beec7a56f8c17ff493ca6000324168850fe..c2ec87e5d1cc6980f6f6e3fe72590b56079234af 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/highmem.h>
 #include <linux/kernel.h>
 #include <linux/linkage.h>
+#include <linux/preempt.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
@@ -601,11 +602,13 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
        /* Catch bad driver code */
        BUG_ON(size == 0);
 
+       preempt_disable();
        if (cpu_has_inclusive_pcaches) {
                if (size >= scache_size)
                        r4k_blast_scache();
                else
                        blast_scache_range(addr, addr + size);
+               preempt_enable();
                __sync();
                return;
        }
@@ -621,6 +624,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
                R4600_HIT_CACHEOP_WAR_IMPL;
                blast_dcache_range(addr, addr + size);
        }
+       preempt_enable();
 
        bc_wback_inv(addr, size);
        __sync();
@@ -631,6 +635,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
        /* Catch bad driver code */
        BUG_ON(size == 0);
 
+       preempt_disable();
        if (cpu_has_inclusive_pcaches) {
                if (size >= scache_size)
                        r4k_blast_scache();
@@ -645,6 +650,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
                         */
                        blast_inv_scache_range(addr, addr + size);
                }
+               preempt_enable();
                __sync();
                return;
        }
@@ -655,6 +661,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
                R4600_HIT_CACHEOP_WAR_IMPL;
                blast_inv_dcache_range(addr, addr + size);
        }
+       preempt_enable();
 
        bc_inv(addr, size);
        __sync();
index 0fead53d1c26b261affa89d00c1fa06844f9e602..0214a43b9911b00e552d99c3396b07f9f12f6481 100644 (file)
@@ -41,8 +41,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
        const int field = sizeof(unsigned long) * 2;
        siginfo_t info;
        int fault;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                                (write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
 #if 0
        printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
@@ -92,6 +91,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
        if (in_atomic() || !mm)
                goto bad_area_nosemaphore;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -113,6 +114,7 @@ good_area:
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (cpu_has_rixi) {
                        if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
@@ -240,6 +242,8 @@ out_of_memory:
         * (which will retry the fault, or kill us if we got oom-killed).
         */
        up_read(&mm->mmap_sem);
+       if (!user_mode(regs))
+               goto no_context;
        pagefault_out_of_memory();
        return;
 
index 9b973e0af9cbbd2ba7c164fcfea8d9ac93265d36..d340d53c345b621bb93084993a2b685ef07b4ff2 100644 (file)
@@ -74,6 +74,7 @@
  */
 unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL_GPL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
 
 /*
  * Not static inline because used by IP27 special magic initialization code
index afeef93f81a79829ec564eaa8ddafebd4ed7e377..a91a7a99f70f3f299550d8b9c01c888018ee46d7 100644 (file)
@@ -1091,6 +1091,7 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
 struct mips_huge_tlb_info {
        int huge_pte;
        int restore_scratch;
+       bool need_reload_pte;
 };
 
 static struct mips_huge_tlb_info __cpuinit
@@ -1105,6 +1106,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
 
        rv.huge_pte = scratch;
        rv.restore_scratch = 0;
+       rv.need_reload_pte = false;
 
        if (check_for_high_segbits) {
                UASM_i_MFC0(p, tmp, C0_BADVADDR);
@@ -1293,6 +1295,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
        } else {
                htlb_info.huge_pte = K0;
                htlb_info.restore_scratch = 0;
+               htlb_info.need_reload_pte = true;
                vmalloc_mode = refill_noscratch;
                /*
                 * create the plain linear handler
@@ -1329,6 +1332,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
        }
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        uasm_l_tlb_huge_update(&l, p);
+       if (htlb_info.need_reload_pte)
+               UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
        build_huge_update_entries(&p, htlb_info.huge_pte, K1);
        build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
                                   htlb_info.restore_scratch);
index d48a84fd7fae51b298ee1b03956dd823dd82ffdf..3516cbdf1ee93acb82ebef6428f79df9af104514 100644 (file)
@@ -171,6 +171,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
 
@@ -345,9 +347,10 @@ no_context:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
-       if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
-               do_exit(SIGKILL);
+       if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
+               pagefault_out_of_memory();
+               return;
+       }
        goto no_context;
 
 do_sigbus:
index d8a455ede5a751c8dc31604526e425fb50167ee0..fec8bf97d806422ce76a578c83576733633a9bf6 100644 (file)
@@ -853,37 +853,44 @@ UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
 
 /* ========================================================[ return ] === */
 
+_resume_userspace:
+       DISABLE_INTERRUPTS(r3,r4)
+       l.lwz   r4,TI_FLAGS(r10)
+       l.andi  r13,r4,_TIF_WORK_MASK
+       l.sfeqi r13,0
+       l.bf    _restore_all
+        l.nop
+
 _work_pending:
-       /*
-        * if (current_thread_info->flags & _TIF_NEED_RESCHED)
-        *     schedule();
-        */
-       l.lwz   r5,TI_FLAGS(r10)
-       l.andi  r3,r5,_TIF_NEED_RESCHED
-       l.sfnei r3,0
-       l.bnf   _work_notifysig
+       l.lwz   r5,PT_ORIG_GPR11(r1)
+       l.sfltsi r5,0
+       l.bnf   1f
         l.nop
-       l.jal   schedule
+       l.andi  r5,r5,0
+1:
+       l.jal   do_work_pending
+        l.ori  r3,r1,0                 /* pt_regs */
+
+       l.sfeqi r11,0
+       l.bf    _restore_all
         l.nop
-       l.j     _resume_userspace
+       l.sfltsi r11,0
+       l.bnf   1f
         l.nop
-
-/* Handle pending signals and notify-resume requests.
- * do_notify_resume must be passed the latest pushed pt_regs, not
- * necessarily the "userspace" ones.  Also, pt_regs->syscallno
- * must be set so that the syscall restart functionality works.
- */
-_work_notifysig:
-       l.jal   do_notify_resume
-        l.ori  r3,r1,0           /* pt_regs */
-
-_resume_userspace:
-       DISABLE_INTERRUPTS(r3,r4)
-       l.lwz   r3,TI_FLAGS(r10)
-       l.andi  r3,r3,_TIF_WORK_MASK
-       l.sfnei r3,0
-       l.bf    _work_pending
+       l.and   r11,r11,r0
+       l.ori   r11,r11,__NR_restart_syscall
+       l.j     _syscall_check_trace_enter
         l.nop
+1:
+       l.lwz   r11,PT_ORIG_GPR11(r1)
+       /* Restore arg registers */
+       l.lwz   r3,PT_GPR3(r1)
+       l.lwz   r4,PT_GPR4(r1)
+       l.lwz   r5,PT_GPR5(r1)
+       l.lwz   r6,PT_GPR6(r1)
+       l.lwz   r7,PT_GPR7(r1)
+       l.j     _syscall_check_trace_enter
+        l.lwz  r8,PT_GPR8(r1)
 
 _restore_all:
        RESTORE_ALL
index ae167f7e081aa0368cb571093df8f16a9dcf71d9..c277ec82783d6fadef157be24c91ff734d76ec00 100644 (file)
 #include <linux/tracehook.h>
 
 #include <asm/processor.h>
+#include <asm/syscall.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 
 #define DEBUG_SIG 0
 
 struct rt_sigframe {
-       struct siginfo *pinfo;
-       void *puc;
        struct siginfo info;
        struct ucontext uc;
        unsigned char retcode[16];      /* trampoline code */
 };
 
-static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+static int restore_sigcontext(struct pt_regs *regs,
+                             struct sigcontext __user *sc)
 {
-       unsigned int err = 0;
+       int err = 0;
 
-       /* Alwys make any pending restarted system call return -EINTR */
+       /* Always make any pending restarted system calls return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
        /*
@@ -53,25 +53,21 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
         * (sc is already checked for VERIFY_READ since the sigframe was
         *  checked in sys_sigreturn previously)
         */
-       if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
-               goto badframe;
-       if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
-               goto badframe;
-       if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
-               goto badframe;
+       err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
+       err |= __copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long));
+       err |= __copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long));
 
        /* make sure the SM-bit is cleared so user-mode cannot fool us */
        regs->sr &= ~SPR_SR_SM;
 
+       regs->orig_gpr11 = -1;  /* Avoid syscall restart checks */
+
        /* TODO: the other ports use regs->orig_XX to disable syscall checks
         * after this completes, but we don't use that mechanism. maybe we can
         * use it now ?
         */
 
        return err;
-
-badframe:
-       return 1;
 }
 
 asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
@@ -111,21 +107,18 @@ badframe:
  * Set up a signal frame.
  */
 
-static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
-                           unsigned long mask)
+static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 {
        int err = 0;
 
        /* copy the regs */
-
+       /* There should be no need to save callee-saved registers here...
+        * ...but we save them anyway.  Revisit this
+        */
        err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
        err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
        err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
 
-       /* then some other stuff */
-
-       err |= __put_user(mask, &sc->oldmask);
-
        return err;
 }
 
@@ -181,24 +174,18 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        int err = 0;
 
        frame = get_sigframe(ka, regs, sizeof(*frame));
-
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
                goto give_sigsegv;
 
-       err |= __put_user(&frame->info, &frame->pinfo);
-       err |= __put_user(&frame->uc, &frame->puc);
-
+       /* Create siginfo.  */
        if (ka->sa.sa_flags & SA_SIGINFO)
                err |= copy_siginfo_to_user(&frame->info, info);
-       if (err)
-               goto give_sigsegv;
 
-       /* Clear all the bits of the ucontext we don't use.  */
-       err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+       /* Create the ucontext.  */
        err |= __put_user(0, &frame->uc.uc_flags);
        err |= __put_user(NULL, &frame->uc.uc_link);
        err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+       err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
 
        err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 
@@ -207,9 +194,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 
        /* trampoline - the desired return ip is the retcode itself */
        return_ip = (unsigned long)&frame->retcode;
-       /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
-       err |= __put_user(0xa960, (short *)(frame->retcode + 0));
-       err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
+       /* This is:
+               l.ori r11,r0,__NR_sigreturn
+               l.sys 1
+        */
+       err |= __put_user(0xa960,             (short *)(frame->retcode + 0));
+       err |= __put_user(__NR_rt_sigreturn,  (short *)(frame->retcode + 2));
        err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
        err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
 
@@ -262,82 +252,106 @@ handle_signal(unsigned long sig,
  * mode below.
  */
 
-void do_signal(struct pt_regs *regs)
+int do_signal(struct pt_regs *regs, int syscall)
 {
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-
-       /*
-        * We want the common case to go fast, which
-        * is why we may in certain cases get here from
-        * kernel mode. Just return without doing anything
-        * if so.
-        */
-       if (!user_mode(regs))
-               return;
-
-       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-
-       /* If we are coming out of a syscall then we need
-        * to check if the syscall was interrupted and wants to be
-        * restarted after handling the signal.  If so, the original
-        * syscall number is put back into r11 and the PC rewound to
-        * point at the l.sys instruction that resulted in the
-        * original syscall.  Syscall results other than the four
-        * below mean that the syscall executed to completion and no
-        * restart is necessary.
-        */
-       if (regs->orig_gpr11) {
-               int restart = 0;
-
-               switch (regs->gpr[11]) {
+       unsigned long continue_addr = 0;
+       unsigned long restart_addr = 0;
+       unsigned long retval = 0;
+       int restart = 0;
+
+       if (syscall) {
+               continue_addr = regs->pc;
+               restart_addr = continue_addr - 4;
+               retval = regs->gpr[11];
+
+               /*
+                * Setup syscall restart here so that a debugger will
+                * see the already changed PC.
+                */
+               switch (retval) {
                case -ERESTART_RESTARTBLOCK:
+                       restart = -2;
+                       /* Fall through */
                case -ERESTARTNOHAND:
-                       /* Restart if there is no signal handler */
-                       restart = (signr <= 0);
-                       break;
                case -ERESTARTSYS:
-                       /* Restart if there no signal handler or
-                        * SA_RESTART flag is set */
-                       restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART));
-                       break;
                case -ERESTARTNOINTR:
-                       /* Always restart */
-                       restart = 1;
+                       restart++;
+                       regs->gpr[11] = regs->orig_gpr11;
+                       regs->pc = restart_addr;
                        break;
                }
+       }
 
-               if (restart) {
-                       if (regs->gpr[11] == -ERESTART_RESTARTBLOCK)
-                               regs->gpr[11] = __NR_restart_syscall;
-                       else
-                               regs->gpr[11] = regs->orig_gpr11;
-                       regs->pc -= 4;
-               } else {
-                       regs->gpr[11] = -EINTR;
+       /*
+        * Get the signal to deliver.  When running under ptrace, at this
+        * point the debugger may change all our registers ...
+        */
+       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+       /*
+        * Depending on the signal settings we may need to revert the
+        * decision to restart the system call.  But skip this if a
+        * debugger has chosen to restart at a different PC.
+        */
+       if (signr > 0) {
+               if (unlikely(restart) && regs->pc == restart_addr) {
+                       if (retval == -ERESTARTNOHAND ||
+                           retval == -ERESTART_RESTARTBLOCK
+                           || (retval == -ERESTARTSYS
+                               && !(ka.sa.sa_flags & SA_RESTART))) {
+                               /* No automatic restart */
+                               regs->gpr[11] = -EINTR;
+                               regs->pc = continue_addr;
+                       }
                }
-       }
 
-       if (signr <= 0) {
-               /* no signal to deliver so we just put the saved sigmask
-                * back */
-               restore_saved_sigmask();
-       } else {                /* signr > 0 */
-               /* Whee!  Actually deliver the signal.  */
                handle_signal(signr, &info, &ka, regs);
+       } else {
+               /* no handler */
+               restore_saved_sigmask();
+               /*
+                * Restore pt_regs PC as syscall restart will be handled by
+                * kernel without return to userspace
+                */
+               if (unlikely(restart) && regs->pc == restart_addr) {
+                       regs->pc = continue_addr;
+                       return restart;
+               }
        }
 
-       return;
+       return 0;
 }
 
-asmlinkage void do_notify_resume(struct pt_regs *regs)
+asmlinkage int
+do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 {
-       if (current_thread_info()->flags & _TIF_SIGPENDING)
-               do_signal(regs);
-
-       if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
-               clear_thread_flag(TIF_NOTIFY_RESUME);
-               tracehook_notify_resume(regs);
-       }
+       do {
+               if (likely(thread_flags & _TIF_NEED_RESCHED)) {
+                       schedule();
+               } else {
+                       if (unlikely(!user_mode(regs)))
+                               return 0;
+                       local_irq_enable();
+                       if (thread_flags & _TIF_SIGPENDING) {
+                               int restart = do_signal(regs, syscall);
+                               if (unlikely(restart)) {
+                                       /*
+                                        * Restart without handlers.
+                                        * Deal with it without leaving
+                                        * the kernel space.
+                                        */
+                                       return restart;
+                               }
+                               syscall = 0;
+                       } else {
+                               clear_thread_flag(TIF_NOTIFY_RESUME);
+                               tracehook_notify_resume(regs);
+                       }
+               }
+               local_irq_disable();
+               thread_flags = current_thread_info()->flags;
+       } while (thread_flags & _TIF_WORK_MASK);
+       return 0;
 }
index e2bfafce66c53661064e2cf5e4b0d5d036e2c6a7..0703acf7d3276811919fd3d398ada99b1b9c6d50 100644 (file)
@@ -86,6 +86,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
        if (user_mode(regs)) {
                /* Exception was in userspace: reenable interrupts */
                local_irq_enable();
+               flags |= FAULT_FLAG_USER;
        } else {
                /* If exception was in a syscall, then IRQ's may have
                 * been enabled or disabled.  If they were enabled,
@@ -267,10 +268,10 @@ out_of_memory:
        __asm__ __volatile__("l.nop 1");
 
        up_read(&mm->mmap_sem);
-       printk("VM: killing process %s\n", tsk->comm);
-       if (user_mode(regs))
-               do_exit(SIGKILL);
-       goto no_context;
+       if (!user_mode(regs))
+               goto no_context;
+       pagefault_out_of_memory();
+       return;
 
 do_sigbus:
        up_read(&mm->mmap_sem);
index 96ec3982be8d37271b6e3236d00d3a0f3f5ce4d1..94607bfa273db574159fdd8bebcf3ef27a8ed3c6 100644 (file)
@@ -46,7 +46,12 @@ cflags-y     := -pipe
 
 # These flags should be implied by an hppa-linux configuration, but they
 # are not in gcc 3.2.
-cflags-y       += -mno-space-regs -mfast-indirect-calls
+cflags-y       += -mno-space-regs
+
+# -mfast-indirect-calls is only relevant for 32-bit kernels.
+ifndef CONFIG_64BIT
+cflags-y       += -mfast-indirect-calls
+endif
 
 # Currently we save and restore fpregs on all kernel entry/interruption paths.
 # If that gets optimized, we might need to disable the use of fpregs in the
index 0a3eada1863b71e1e859e5bf1536da089a65ffd1..f395cde7b5931a2f6d8d9cb1903c5dfffa319185 100644 (file)
@@ -36,23 +36,16 @@ struct shmid64_ds {
        unsigned int            __unused2;
 };
 
-#ifdef CONFIG_64BIT
-/* The 'unsigned int' (formerly 'unsigned long') data types below will
- * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
- * a wide kernel, but if some of these values are meant to contain pointers
- * they may need to be 'long long' instead. -PB XXX FIXME
- */
-#endif
 struct shminfo64 {
-       unsigned int    shmmax;
-       unsigned int    shmmin;
-       unsigned int    shmmni;
-       unsigned int    shmseg;
-       unsigned int    shmall;
-       unsigned int    __unused1;
-       unsigned int    __unused2;
-       unsigned int    __unused3;
-       unsigned int    __unused4;
+       unsigned long   shmmax;
+       unsigned long   shmmin;
+       unsigned long   shmmni;
+       unsigned long   shmseg;
+       unsigned long   shmall;
+       unsigned long   __unused1;
+       unsigned long   __unused2;
+       unsigned long   __unused3;
+       unsigned long   __unused4;
 };
 
 #endif /* _PARISC_SHMBUF_H */
index a2fa297196bc19f1de4f021ebec6e5338b237313..f5645d6a89f2c9c79e8e22cd9a7201df6dc389df 100644 (file)
@@ -69,8 +69,6 @@
 #define SA_NOMASK      SA_NODEFER
 #define SA_ONESHOT     SA_RESETHAND
 
-#define SA_RESTORER    0x04000000 /* obsolete -- ignored */
-
 #define MINSIGSTKSZ    2048
 #define SIGSTKSZ       8192
 
index 10a0c2aad8cfd1e36dc2f75ffcf7ab841a094b86..b24732d1bdbf7ad5ae28864c4f40b952bac84273 100644 (file)
        ENTRY_COMP(msgsnd)
        ENTRY_COMP(msgrcv)
        ENTRY_SAME(msgget)              /* 190 */
-       ENTRY_SAME(msgctl)
-       ENTRY_SAME(shmat)
+       ENTRY_COMP(msgctl)
+       ENTRY_COMP(shmat)
        ENTRY_SAME(shmdt)
        ENTRY_SAME(shmget)
-       ENTRY_SAME(shmctl)              /* 195 */
+       ENTRY_COMP(shmctl)              /* 195 */
        ENTRY_SAME(ni_syscall)          /* streams1 */
        ENTRY_SAME(ni_syscall)          /* streams2 */
        ENTRY_SAME(lstat64)
        ENTRY_SAME(epoll_ctl)           /* 225 */
        ENTRY_SAME(epoll_wait)
        ENTRY_SAME(remap_file_pages)
-       ENTRY_SAME(semtimedop)
+       ENTRY_COMP(semtimedop)
        ENTRY_COMP(mq_open)
        ENTRY_SAME(mq_unlink)           /* 230 */
        ENTRY_COMP(mq_timedsend)
index f247a3480e8e0cd7e276f4fd04004cbb608b3e11..d10d27a720c0d1f323c2248f01cc93193e73ca1e 100644 (file)
@@ -180,6 +180,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+       if (acc_type & VM_WRITE)
+               flags |= FAULT_FLAG_WRITE;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma_prev(mm, address, &prev_vma);
@@ -203,8 +207,7 @@ good_area:
         * fault.
         */
 
-       fault = handle_mm_fault(mm, vma, address,
-                       flags | ((acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0));
+       fault = handle_mm_fault(mm, vma, address, flags);
 
        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
                return;
index fe404e77246ed47e87bd47d2b9e88b7192dcfed4..7f656f119ea67513eb7aab29419aebf79a19e7de 100644 (file)
@@ -138,6 +138,7 @@ config PPC
        select ARCH_USE_BUILTIN_BSWAP
        select OLD_SIGSUSPEND
        select OLD_SIGACTION if PPC32
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config EARLY_PRINTK
        bool
index ae782254e731bbcd03c8152379b4adf6ddea01a7..f89da808ce310e1f373da55e96fc52d58bd4ccc9 100644 (file)
 #    define SMPWMB      eieio
 #endif
 
+#define __lwsync()     __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+
 #define smp_mb()       mb()
-#define smp_rmb()      __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define smp_rmb()      __lwsync()
 #define smp_wmb()      __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
 #define smp_read_barrier_depends()     read_barrier_depends()
 #else
+#define __lwsync()     barrier()
+
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
 #define data_barrier(x)        \
        asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
 
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       __lwsync();                                                     \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       __lwsync();                                                     \
+       ___p1;                                                          \
+})
+
 #endif /* _ASM_POWERPC_BARRIER_H */
index af326cde7cb62bf2f07c70e6d0992e154504d5e0..f391f3fbde8b28eefb3ee919e651ba568d2c59e0 100644 (file)
@@ -53,7 +53,6 @@
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
 
-struct kvm;
 extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 extern int kvm_unmap_hva_range(struct kvm *kvm,
                               unsigned long start, unsigned long end);
@@ -81,10 +80,6 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 /* Physical Address Mask - allowed range of real mode RAM access */
 #define KVM_PAM                        0x0fffffffffffffffULL
 
-struct kvm;
-struct kvm_run;
-struct kvm_vcpu;
-
 struct lppaca;
 struct slb_shadow;
 struct dtl_entry;
@@ -628,4 +623,12 @@ struct kvm_vcpu_arch {
 #define __KVM_HAVE_ARCH_WQP
 #define __KVM_HAVE_CREATE_DEVICE
 
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
+static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_exit(void) {}
+
 #endif /* __POWERPC_KVM_HOST_H__ */
index a5287fe03d773e7d541e26757b8b5fad4a9a3af3..e2dd05c81bc6e9198c9e564fb287def87778b9de 100644 (file)
@@ -143,9 +143,11 @@ extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
 extern void kvm_release_hpt(struct kvmppc_linear_info *li);
 extern int kvmppc_core_init_vm(struct kvm *kvm);
 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
-extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+extern void kvmppc_core_free_memslot(struct kvm *kvm,
+                                    struct kvm_memory_slot *free,
                                     struct kvm_memory_slot *dont);
-extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+extern int kvmppc_core_create_memslot(struct kvm *kvm,
+                                     struct kvm_memory_slot *slot,
                                      unsigned long npages);
 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
                                struct kvm_memory_slot *memslot,
index d836d945068d032cb072013cf027f7a43e36b88a..063fcadd1a00c50dec3f4b38240abce7ae3007a7 100644 (file)
 
 #ifndef __ASSEMBLY__
 
+#include <asm/barrier.h>       /* for smp_rmb() */
+
 /*
  * With 64K pages on hash table, we have a special PTE format that
  * uses a second "half" of the page table to encode sub-page information
  * in order to deal with 64K made of 4K HW pages. Thus we override the
  * generic accessors and iterators here
  */
-#define __real_pte(e,p)        ((real_pte_t) { \
-                       (e), (pte_val(e) & _PAGE_COMBO) ? \
-                               (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
-#define __rpte_to_hidx(r,index)        ((pte_val((r).pte) & _PAGE_COMBO) ? \
-        (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
+#define __real_pte __real_pte
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
+{
+       real_pte_t rpte;
+
+       rpte.pte = pte;
+       rpte.hidx = 0;
+       if (pte_val(pte) & _PAGE_COMBO) {
+               /*
+                * Make sure we order the hidx load against the _PAGE_COMBO
+                * check. The store side ordering is done in __hash_page_4K
+                */
+               smp_rmb();
+               rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
+       }
+       return rpte;
+}
+
+static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
+{
+       if ((pte_val(rpte.pte) & _PAGE_COMBO))
+               return (rpte.hidx >> (index<<2)) & 0xf;
+       return (pte_val(rpte.pte) >> 12) & 0xf;
+}
+
 #define __rpte_to_pte(r)       ((r).pte)
 #define __rpte_sub_valid(rpte, index) \
        (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
index becc08e6a65c585e0b6bc995f7699d74e2c36338..637c97fcbeb57dac841d1b3a4f3a4e36b50be951 100644 (file)
                                        STACK_FRAME_OVERHEAD + 288)
 #define STACK_FRAME_MARKER     12
 
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define STACK_FRAME_MIN_SIZE   32
+#else
+#define STACK_FRAME_MIN_SIZE   STACK_FRAME_OVERHEAD
+#endif
+
 /* Size of dummy stack frame allocated when calling signal handler. */
 #define __SIGNAL_FRAMESIZE     128
 #define __SIGNAL_FRAMESIZE32   64
@@ -46,6 +52,7 @@
 #define STACK_FRAME_REGS_MARKER        ASM_CONST(0x72656773)
 #define STACK_INT_FRAME_SIZE   (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
 #define STACK_FRAME_MARKER     2
+#define STACK_FRAME_MIN_SIZE   STACK_FRAME_OVERHEAD
 
 /* Size of stack frame allocated when calling signal handler. */
 #define __SIGNAL_FRAMESIZE     64
index eb643f8625796711f93fbad6d92022f957517df9..60019a6fd6bbf3bbaaa104401ef680f92122d9b9 100644 (file)
@@ -155,6 +155,7 @@ config KVM_MPIC
        bool "KVM in-kernel MPIC emulation"
        depends on KVM && E500
        select HAVE_KVM_IRQCHIP
+       select HAVE_KVM_IRQFD
        select HAVE_KVM_IRQ_ROUTING
        select HAVE_KVM_MSI
        help
index 422de3f4d46cd129dd14c06dd1852dab9bf12463..008cd856c5b52942c7be2d1e136650ea9aca8922 100644 (file)
@@ -5,9 +5,10 @@
 subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
 
 ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
+KVM := ../../../virt/kvm
 
-common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \
-                                               eventfd.o)
+common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
+               $(KVM)/eventfd.o
 
 CFLAGS_44x_tlb.o  := -I.
 CFLAGS_e500_mmu.o := -I.
@@ -53,7 +54,7 @@ kvm-e500mc-objs := \
 kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
 
 kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
-       ../../../virt/kvm/coalesced_mmio.o \
+       $(KVM)/coalesced_mmio.o \
        fpu.o \
        book3s_paired_singles.o \
        book3s_pr.o \
@@ -86,8 +87,8 @@ kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
        book3s_xics.o
 
 kvm-book3s_64-module-objs := \
-       ../../../virt/kvm/kvm_main.o \
-       ../../../virt/kvm/eventfd.o \
+       $(KVM)/kvm_main.o \
+       $(KVM)/eventfd.o \
        powerpc.o \
        emulate.o \
        book3s.o \
@@ -111,7 +112,7 @@ kvm-book3s_32-objs := \
 kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
 
 kvm-objs-$(CONFIG_KVM_MPIC) += mpic.o
-kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(addprefix ../../../virt/kvm/, irqchip.o)
+kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
 
 kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
 
index 102ad8a255f3621774f4ad300a02676fdac1333c..a54c59db7bd82c562c450ab733e0174b5d271575 100644 (file)
@@ -1258,7 +1258,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
        kvm_guest_exit();
 
        preempt_enable();
-       kvm_resched(vcpu);
+       cond_resched();
 
        spin_lock(&vc->lock);
        now = get_tb();
index 1a1b511897733da58ec1a1b79d6c9a66ffdedea2..0a91f47e264b4a3962cb6aec3ba4464d63e011e9 100644 (file)
@@ -1592,12 +1592,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
        return -ENOTSUPP;
 }
 
-void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
                              struct kvm_memory_slot *dont)
 {
 }
 
-int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
                               unsigned long npages)
 {
        return 0;
index 2861ae9eaae6e2f841af45bc928cec309303d66c..b58d61039015b8eec1f1aa909908703d7984323e 100644 (file)
@@ -1822,8 +1822,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
        return 0;
 }
 
-int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
-                         struct kvm_kernel_irq_routing_entry *e,
+int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
                          const struct kvm_irq_routing_entry *ue)
 {
        int r = -EINVAL;
@@ -1835,7 +1834,6 @@ int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
                e->irqchip.pin = ue->u.irqchip.pin;
                if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
                        goto out;
-               rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
                break;
        case KVM_IRQ_ROUTING_MSI:
                e->set = kvm_set_msi;
index 6316ee336e888e22636f557d1623c54b30d7a207..ea4cfdc991daa9a99faeff03734105a96b709bef 100644 (file)
@@ -246,24 +246,16 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
        return r;
 }
 
-int kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void)
 {
        return 0;
 }
 
-void kvm_arch_hardware_disable(void *garbage)
-{
-}
-
 int kvm_arch_hardware_setup(void)
 {
        return 0;
 }
 
-void kvm_arch_hardware_unsetup(void)
-{
-}
-
 void kvm_arch_check_processor_compat(void *rtn)
 {
        *(int *)rtn = kvmppc_core_check_processor_compat();
@@ -296,11 +288,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        mutex_unlock(&kvm->lock);
 }
 
-void kvm_arch_sync_events(struct kvm *kvm)
-{
-}
-
-int kvm_dev_ioctl_check_extension(long ext)
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 {
        int r;
 
@@ -409,15 +397,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
        return -EINVAL;
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
                           struct kvm_memory_slot *dont)
 {
-       kvmppc_core_free_memslot(free, dont);
+       kvmppc_core_free_memslot(kvm, free, dont);
 }
 
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages)
 {
-       return kvmppc_core_create_memslot(slot, npages);
+       return kvmppc_core_create_memslot(kvm, slot, npages);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -436,10 +425,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
        kvmppc_core_commit_memory_region(kvm, mem, old);
 }
 
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-}
-
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
                                   struct kvm_memory_slot *slot)
 {
@@ -1125,7 +1110,3 @@ int kvm_arch_init(void *opaque)
 {
        return 0;
 }
-
-void kvm_arch_exit(void)
-{
-}
index 8726779e1409b5da36c1e1ea852276cf47d4252c..d9196c9f93d9dd7c1cb075fa3e0b732cd0ba9993 100644 (file)
@@ -223,9 +223,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        is_write = error_code & ESR_DST;
 #endif /* CONFIG_4xx || CONFIG_BOOKE */
 
-       if (is_write)
-               flags |= FAULT_FLAG_WRITE;
-
 #ifdef CONFIG_PPC_ICSWX
        /*
         * we need to do this early because this "data storage
@@ -280,6 +277,9 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -408,6 +408,7 @@ good_area:
        } else if (is_write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        /* a read */
        } else {
                /* protection fault */
index b7293bba00622fb11ba4b79c3ea390dd4854a605..08c6f3185d45b2914c0add10d99c7eaa56a993c4 100644 (file)
@@ -586,8 +586,8 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
                unmap_cpu_from_node(lcpu);
-               break;
                ret = NOTIFY_OK;
+               break;
 #endif
        }
        return ret;
index 74d1e780748b58f17f8987218a6184c87aae63da..2396dda282cdef0ed5c11c6ab7c3f4f479d0ac04 100644 (file)
@@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
                return 0;               /* must be 16-byte aligned */
        if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
                return 0;
-       if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
+       if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
                return 1;
        /*
         * sp could decrease when we jump off an interrupt stack
index 9a432de363b8d081d0663f8795f28ff7354ce378..bebe64ed5dc32ee84e2d4f7d280a02f3df54ce54 100644 (file)
@@ -158,7 +158,7 @@ static int pseries_remove_memory(struct device_node *np)
 static inline int pseries_remove_memblock(unsigned long base,
                                          unsigned int memblock_size)
 {
-       return -EOPNOTSUPP;
+       return 0;
 }
 static inline int pseries_remove_memory(struct device_node *np)
 {
index 16760eeb79b09ec5ea6da10274157491bb5a5d0f..578680f6207acb62ccc8f52c321c349f510f73a6 100644 (file)
 
 #define set_mb(var, value)             do { var = value; mb(); } while (0)
 
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       barrier();                                                      \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       barrier();                                                      \
+       ___p1;                                                          \
+})
+
 #endif /* __ASM_BARRIER_H */
index 16bd5d169cdb4779c975cdd379160ff3ef6d637c..99971dfc6b9acacf5ac6878e06082d2534ca69da 100644 (file)
 
 #ifndef ASM_KVM_HOST_H
 #define ASM_KVM_HOST_H
+
+#include <linux/types.h>
 #include <linux/hrtimer.h>
 #include <linux/interrupt.h>
+#include <linux/kvm_types.h>
 #include <linux/kvm_host.h>
 #include <asm/debug.h>
 #include <asm/cpu.h>
@@ -266,4 +269,18 @@ struct kvm_arch{
 };
 
 extern int sie64a(struct kvm_s390_sie_block *, u64 *);
+
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_check_processor_compat(void *rtn) {}
+static inline void kvm_arch_exit(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_free_memslot(struct kvm *kvm,
+               struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
+static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+               struct kvm_memory_slot *slot) {}
+
 #endif
index cd29d2f4e4f355512ecbafd0cfcc0c7fab05042a..bebc0bd8abc2e55be7472b1f07b677cb03656d04 100644 (file)
@@ -89,11 +89,10 @@ static inline void syscall_set_arguments(struct task_struct *task,
                regs->orig_gpr2 = args[0];
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-                                  struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 #ifdef CONFIG_COMPAT
-       if (test_tsk_thread_flag(task, TIF_31BIT))
+       if (test_tsk_thread_flag(current, TIF_31BIT))
                return AUDIT_ARCH_S390;
 #endif
        return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
index a314c57f4e94a5a91c162de68360556babd71f8a..9677d935583ca42a55daf1f1fa3bc8ba2c807ac8 100644 (file)
@@ -314,7 +314,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                 * psw and gprs are stored on the stack
                 */
                if (addr == (addr_t) &dummy->regs.psw.mask &&
-                   ((data & ~PSW_MASK_USER) != psw_user_bits ||
+                   (((data^psw_user_bits) & ~PSW_MASK_USER) ||
+                    (((data^psw_user_bits) & PSW_MASK_ASC) &&
+                     ((data|psw_user_bits) & PSW_MASK_ASC) == PSW_MASK_ASC) ||
                     ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
                        /* Invalid psw mask. */
                        return -EINVAL;
@@ -627,7 +629,10 @@ static int __poke_user_compat(struct task_struct *child,
                 */
                if (addr == (addr_t) &dummy32->regs.psw.mask) {
                        /* Build a 64 bit psw mask from 31 bit mask. */
-                       if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
+                       if (((tmp^psw32_user_bits) & ~PSW32_MASK_USER) ||
+                           (((tmp^psw32_user_bits) & PSW32_MASK_ASC) &&
+                            ((tmp|psw32_user_bits) & PSW32_MASK_ASC)
+                            == PSW32_MASK_ASC))
                                /* Invalid psw mask. */
                                return -EINVAL;
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
index 8fe9d65a4585b0670c5a1c622d774632b3f39b18..40b4c6470f88a7181b7c8e703db2074016e348d5 100644 (file)
@@ -6,7 +6,8 @@
 # it under the terms of the GNU General Public License (version 2 only)
 # as published by the Free Software Foundation.
 
-common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o eventfd.o)
+KVM := ../../../virt/kvm
+common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o
 
 ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
 
index 5c948177529e281ca7138a1b9bd7ef0186f4cec9..bc79ab00536f7edcbdef8e356df49eeef99ef959 100644 (file)
@@ -71,6 +71,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
                        return 0;
                if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
                        return 1;
+               return 0;
        case KVM_S390_INT_EMERGENCY:
                if (psw_extint_disabled(vcpu))
                        return 0;
index 698fb826e149d9c8c32f629fdb9d982c07c197db..412fbc5dc688be09a94ce3bcf4230a440e74eeb3 100644 (file)
@@ -86,16 +86,12 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 static unsigned long long *facilities;
 
 /* Section: not file related */
-int kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void)
 {
        /* every s390 is virtualization enabled ;-) */
        return 0;
 }
 
-void kvm_arch_hardware_disable(void *garbage)
-{
-}
-
 int kvm_arch_hardware_setup(void)
 {
        return 0;
@@ -105,19 +101,11 @@ void kvm_arch_hardware_unsetup(void)
 {
 }
 
-void kvm_arch_check_processor_compat(void *rtn)
-{
-}
-
 int kvm_arch_init(void *opaque)
 {
        return 0;
 }
 
-void kvm_arch_exit(void)
-{
-}
-
 /* Section: device related */
 long kvm_arch_dev_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg)
@@ -127,7 +115,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
        return -EINVAL;
 }
 
-int kvm_dev_ioctl_check_extension(long ext)
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 {
        int r;
 
@@ -289,10 +277,6 @@ static void kvm_free_vcpus(struct kvm *kvm)
        mutex_unlock(&kvm->lock);
 }
 
-void kvm_arch_sync_events(struct kvm *kvm)
-{
-}
-
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        kvm_free_vcpus(kvm);
@@ -320,11 +304,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-       /* Nothing todo */
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        save_fp_regs(&vcpu->arch.host_fpregs);
@@ -971,12 +950,8 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
        return VM_FAULT_SIGBUS;
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
-                          struct kvm_memory_slot *dont)
-{
-}
-
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages)
 {
        return 0;
 }
@@ -1026,15 +1001,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
        return;
 }
 
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
-                                  struct kvm_memory_slot *slot)
-{
-}
-
 static int __init kvm_s390_init(void)
 {
        int ret;
index 047c3e4c59a2e4ad03ea7812b8a5da8a6ab20f9a..416facec4a332eea5c33b6ffad24eb80643b5e2c 100644 (file)
@@ -302,6 +302,8 @@ static inline int do_exception(struct pt_regs *regs, int access)
        address = trans_exc_code & __FAIL_ADDR_MASK;
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
        if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
                flags |= FAULT_FLAG_WRITE;
        down_read(&mm->mmap_sem);
index eba15f18fd38b331de68410a29c303d4429985c3..a4dfc0bd05dba509e425aded6aeb77fdd713e16b 100644 (file)
@@ -43,6 +43,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
 
 unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
 
 static void __init setup_zero_pages(void)
 {
index 47b600e4b2c50ae853b72404f3e1b051dce15f53..52238983527d605914853fd5415ea39617944ffe 100644 (file)
@@ -47,6 +47,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
        const int field = sizeof(unsigned long) * 2;
+       unsigned long flags = 0;
        siginfo_t info;
        int fault;
 
@@ -75,6 +76,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
        if (in_atomic() || !mm)
                goto bad_area_nosemaphore;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
        if (!vma)
@@ -95,18 +99,18 @@ good_area:
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
                        goto bad_area;
        }
 
-survive:
        /*
        * If for any reason at all we couldn't handle the fault,
        * make sure we exit gracefully rather than endlessly redo
        * the fault.
        */
-       fault = handle_mm_fault(mm, vma, address, write);
+       fault = handle_mm_fault(mm, vma, address, flags);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
@@ -167,15 +171,10 @@ no_context:
        */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (is_global_init(tsk)) {
-               yield();
-               down_read(&mm->mmap_sem);
-               goto survive;
-       }
-       printk("VM: killing process %s\n", tsk->comm);
-       if (user_mode(regs))
-               do_group_exit(SIGKILL);
-       goto no_context;
+       if (!user_mode(regs))
+               goto no_context;
+       pagefault_out_of_memory();
+       return;
 
 do_sigbus:
        up_read(&mm->mmap_sem);
index 1f49c28affa90495047c6b82577b2d5221bc089b..541dc610150888e706977c7944c42ab1d61d7437 100644 (file)
@@ -400,9 +400,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
        struct mm_struct *mm;
        struct vm_area_struct * vma;
        int fault;
-       int write = error_code & FAULT_CODE_WRITE;
-       unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                             (write ? FAULT_FLAG_WRITE : 0));
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        tsk = current;
        mm = tsk->mm;
@@ -476,6 +474,11 @@ good_area:
 
        set_thread_fault_code(error_code);
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+       if (error_code & FAULT_CODE_WRITE)
+               flags |= FAULT_FLAG_WRITE;
+
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
index 2668b3142fa2a0f00c120cf55d7dc3e90a0338dc..03a1bc3c3ddea4fd1bb5239463e160e221007dc1 100644 (file)
@@ -77,6 +77,7 @@ config SPARC64
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select HAVE_C_RECORDMCOUNT
        select NO_BOOTMEM
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config ARCH_DEFCONFIG
        string
index 905832aa9e9ec0c4d6520567e61d26e96fda0d97..a0ed182ae73c5b54087da8b271338a8b13098d85 100644 (file)
@@ -21,7 +21,7 @@
 
 extern int __atomic_add_return(int, atomic_t *);
 extern int atomic_cmpxchg(atomic_t *, int, int);
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+extern int atomic_xchg(atomic_t *, int);
 extern int __atomic_add_unless(atomic_t *, int, int);
 extern void atomic_set(atomic_t *, int);
 
index 95d45986f908d4224d6b5bf6ec4d5e2a4d697ceb..b5aad964558e756bbf6c3f3c3e29616dceabde94 100644 (file)
@@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
 
 #define smp_read_barrier_depends()     do { } while(0)
 
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       barrier();                                                      \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       barrier();                                                      \
+       ___p1;                                                          \
+})
+
 #endif /* !(__SPARC64_BARRIER_H) */
index 1fae1a02e3c2136324cab7429d5561f8db76ed3c..ae0f9a7a314d12585fc83f1550c8d6401cf72f9b 100644 (file)
 #ifndef __ARCH_SPARC_CMPXCHG__
 #define __ARCH_SPARC_CMPXCHG__
 
-static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
-{
-       __asm__ __volatile__("swap [%2], %0"
-                            : "=&r" (val)
-                            : "0" (val), "r" (m)
-                            : "memory");
-       return val;
-}
-
+extern unsigned long __xchg_u32(volatile u32 *m, u32 new);
 extern void __xchg_called_with_bad_pointer(void);
 
 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
 {
        switch (size) {
        case 4:
-               return xchg_u32(ptr, x);
+               return __xchg_u32(ptr, x);
        }
        __xchg_called_with_bad_pointer();
        return x;
index dfb0019bf05bf7ad992965bc6bbed5cc1f2818ab..6663604a902a451ccbeff44a7c8bf8d429ee5f2a 100644 (file)
@@ -24,7 +24,8 @@
 
 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
  * The page copy blockops can use 0x6000000 to 0x8000000.
- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  * The vmalloc area spans 0x100000000 to 0x200000000.
  * Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
  * 0x400000000.
  */
 #define        TLBTEMP_BASE            _AC(0x0000000006000000,UL)
-#define        TSBMAP_BASE             _AC(0x0000000008000000,UL)
+#define        TSBMAP_8K_BASE          _AC(0x0000000008000000,UL)
+#define        TSBMAP_4M_BASE          _AC(0x0000000008400000,UL)
 #define MODULES_VADDR          _AC(0x0000000010000000,UL)
 #define MODULES_LEN            _AC(0x00000000e0000000,UL)
 #define MODULES_END            _AC(0x00000000f0000000,UL)
index f0d6a9700f4c8351e20be4743d9782c590b9e016..1a4bb971e06d4785be4225d92b4f977154ec30a0 100644 (file)
@@ -35,6 +35,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 {
 }
 
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 
 extern void flush_tlb_pending(void);
@@ -49,11 +51,6 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifndef CONFIG_SMP
 
-#define flush_tlb_kernel_range(start,end) \
-do {   flush_tsb_kernel_range(start,end); \
-       __flush_tlb_kernel_range(start,end); \
-} while (0)
-
 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
 {
        __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
@@ -64,11 +61,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
 extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
 extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
 
-#define flush_tlb_kernel_range(start, end) \
-do {   flush_tsb_kernel_range(start,end); \
-       smp_flush_tlb_kernel_range(start, end); \
-} while (0)
-
 #define global_flush_tlb_page(mm, vaddr) \
        smp_flush_tlb_page(mm, vaddr)
 
index 432afa83886137a89bcd6fa66911922addfa50d3..55841c184e6d5d4ae23d15f7f1506b903b86c540 100644 (file)
@@ -118,12 +118,18 @@ struct vio_disk_attr_info {
        u8                      vdisk_type;
 #define VD_DISK_TYPE_SLICE     0x01 /* Slice in block device   */
 #define VD_DISK_TYPE_DISK      0x02 /* Entire block device     */
-       u16                     resv1;
+       u8                      vdisk_mtype;            /* v1.1 */
+#define VD_MEDIA_TYPE_FIXED    0x01 /* Fixed device */
+#define VD_MEDIA_TYPE_CD       0x02 /* CD Device    */
+#define VD_MEDIA_TYPE_DVD      0x03 /* DVD Device   */
+       u8                      resv1;
        u32                     vdisk_block_size;
        u64                     operations;
-       u64                     vdisk_size;
+       u64                     vdisk_size;             /* v1.1 */
        u64                     max_xfer_size;
-       u64                     resv2[2];
+       u32                     phys_block_size;        /* v1.2 */
+       u32                     resv2;
+       u64                     resv3[1];
 };
 
 struct vio_disk_desc {
@@ -259,7 +265,7 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr,
                                  unsigned int ring_size)
 {
        return (dr->pending -
-               ((dr->prod - dr->cons) & (ring_size - 1)));
+               ((dr->prod - dr->cons) & (ring_size - 1)) - 1);
 }
 
 #define VIO_MAX_TYPE_LEN       32
index 54df554b82d98a684a3cbd7710ea10998e0d1722..fa4c900a0d1ff8bbd78bf000b8a8feff54f5777c 100644 (file)
@@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp)
        if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
            !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
            lp->hs_state != LDC_HS_OPEN)
-               err = -EINVAL;
+               err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
        else
                err = start_handshake(lp);
 
index 8f76f23dac38ec66b0afea55a5311e612f2459f0..f9c6813c132d606c2a2d138342d4ed9941165f47 100644 (file)
@@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
 {
        unsigned long csr_reg, csr, csr_error_bits;
        irqreturn_t ret = IRQ_NONE;
-       u16 stat;
+       u32 stat;
 
        csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
        csr = upa_readq(csr_reg);
@@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
                               pbm->name);
                ret = IRQ_HANDLED;
        }
-       pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
+       pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat);
        if (stat & (PCI_STATUS_PARITY |
                    PCI_STATUS_SIG_TARGET_ABORT |
                    PCI_STATUS_REC_TARGET_ABORT |
@@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
                    PCI_STATUS_SIG_SYSTEM_ERROR)) {
                printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
                       pbm->name, stat);
-               pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
+               pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff);
                ret = IRQ_HANDLED;
        }
        return ret;
index 77539eda928c93d05683d57612f9edec907e48e6..173964d5e948a19664a56d2f7d4b022bc43e3f10 100644 (file)
@@ -150,7 +150,7 @@ void cpu_panic(void)
 #define NUM_ROUNDS     64      /* magic value */
 #define NUM_ITERS      5       /* likewise */
 
-static DEFINE_SPINLOCK(itc_sync_lock);
+static DEFINE_RAW_SPINLOCK(itc_sync_lock);
 static unsigned long go[SLAVE + 1];
 
 #define DEBUG_TICK_SYNC        0
@@ -258,7 +258,7 @@ static void smp_synchronize_one_tick(int cpu)
        go[MASTER] = 0;
        membar_safe("#StoreLoad");
 
-       spin_lock_irqsave(&itc_sync_lock, flags);
+       raw_spin_lock_irqsave(&itc_sync_lock, flags);
        {
                for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
                        while (!go[MASTER])
@@ -269,7 +269,7 @@ static void smp_synchronize_one_tick(int cpu)
                        membar_safe("#StoreLoad");
                }
        }
-       spin_unlock_irqrestore(&itc_sync_lock, flags);
+       raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
 }
 
 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
@@ -821,13 +821,17 @@ void arch_send_call_function_single_ipi(int cpu)
 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
+       irq_enter();
        generic_smp_call_function_interrupt();
+       irq_exit();
 }
 
 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
+       irq_enter();
        generic_smp_call_function_single_interrupt();
+       irq_exit();
 }
 
 static void tsb_sync(void *info)
index f7c72b6efc27556cd2e2de7a74539b1ba21831ce..d066eb18650c1598f898f7a4314bdd7ed5b606a7 100644 (file)
@@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
 SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
 SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
 SIGN1(sys32_select, compat_sys_select, %o0)
-SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
+SIGN1(sys32_futex, compat_sys_futex, %o1)
 SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
 SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
 SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
index 8201c25e76697ad5f5a96de1b6921a3fb34372ac..4db8898199f7240d0ef01eddecc056e67ad369e6 100644 (file)
@@ -163,17 +163,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
 unsigned long compute_effective_address(struct pt_regs *regs,
                                        unsigned int insn, unsigned int rd)
 {
+       int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
        unsigned int rs1 = (insn >> 14) & 0x1f;
        unsigned int rs2 = insn & 0x1f;
-       int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+       unsigned long addr;
 
        if (insn & 0x2000) {
                maybe_flush_windows(rs1, 0, rd, from_kernel);
-               return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
+               addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
        } else {
                maybe_flush_windows(rs1, rs2, rd, from_kernel);
-               return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+               addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
        }
+
+       if (!from_kernel && test_thread_flag(TIF_32BIT))
+               addr &= 0xffffffff;
+
+       return addr;
 }
 
 /* This is just to make gcc think die_if_kernel does return... */
index 2c20ad63ddbf2bbf8a4da5e751e49650d8be7060..30eee6e8a81b2d45797aab304914b10571573b1a 100644 (file)
@@ -236,6 +236,7 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
         */
        VISEntryHalf
 
+       membar          #Sync
        alignaddr       %o1, %g0, %g0
 
        add             %o1, (64 - 1), %o4
index 1d32b54089aad3e3d6094f9d3d013b4f3664602b..8f2f94d53434af179ae1be20a1573d3e0df026f4 100644 (file)
@@ -40,6 +40,19 @@ int __atomic_add_return(int i, atomic_t *v)
 }
 EXPORT_SYMBOL(__atomic_add_return);
 
+int atomic_xchg(atomic_t *v, int new)
+{
+       int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(ATOMIC_HASH(v), flags);
+       ret = v->counter;
+       v->counter = new;
+       spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+       return ret;
+}
+EXPORT_SYMBOL(atomic_xchg);
+
 int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        int ret;
@@ -132,3 +145,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
        return (unsigned long)prev;
 }
 EXPORT_SYMBOL(__cmpxchg_u32);
+
+unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
+{
+       unsigned long flags;
+       u32 prev;
+
+       spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
+       prev = *ptr;
+       *ptr = new;
+       spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
+
+       return (unsigned long)prev;
+}
+EXPORT_SYMBOL(__xchg_u32);
index aa4d55b0bdf0326370ec6d54a4759b04215d61cb..5ce8f2f64604a0399e792cada3d9c3026f6a36c0 100644 (file)
@@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
                case 0: fsr = *pfsr;
                        if (IR == -1) IR = 2;
                        /* fcc is always fcc0 */
-                       fsr &= ~0xc00; fsr |= (IR << 10); break;
+                       fsr &= ~0xc00; fsr |= (IR << 10);
                        *pfsr = fsr;
                        break;
                case 1: rd->s = IR; break;
index e98bfda205a2beb97bcaa49a27d5a9412bf03dc3..59dbd46457250b050ce84a48370a4f96afa3746d 100644 (file)
@@ -177,8 +177,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
        unsigned long g2;
        int from_user = !(regs->psr & PSR_PS);
        int fault, code;
-       unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                             (write ? FAULT_FLAG_WRITE : 0));
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        if (text_fault)
                address = regs->pc;
@@ -235,6 +234,11 @@ good_area:
                        goto bad_area;
        }
 
+       if (from_user)
+               flags |= FAULT_FLAG_USER;
+       if (write)
+               flags |= FAULT_FLAG_WRITE;
+
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
@@ -383,6 +387,7 @@ static void force_user_fault(unsigned long address, int write)
        struct vm_area_struct *vma;
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
+       unsigned int flags = FAULT_FLAG_USER;
        int code;
 
        code = SEGV_MAPERR;
@@ -402,11 +407,12 @@ good_area:
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
        }
-       switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
+       switch (handle_mm_fault(mm, vma, address, flags)) {
        case VM_FAULT_SIGBUS:
        case VM_FAULT_OOM:
                goto do_sigbus;
index 5062ff389e83bb3b2865deaabb0a4d5338256e09..3841a081beb3967d9f2f08d0c6d1b4abf5390ff0 100644 (file)
@@ -95,38 +95,51 @@ static unsigned int get_user_insn(unsigned long tpc)
        pte_t *ptep, pte;
        unsigned long pa;
        u32 insn = 0;
-       unsigned long pstate;
 
-       if (pgd_none(*pgdp))
-               goto outret;
+       if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
+               goto out;
        pudp = pud_offset(pgdp, tpc);
-       if (pud_none(*pudp))
-               goto outret;
-       pmdp = pmd_offset(pudp, tpc);
-       if (pmd_none(*pmdp))
-               goto outret;
-
-       /* This disables preemption for us as well. */
-       __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
-       __asm__ __volatile__("wrpr %0, %1, %%pstate"
-                               : : "r" (pstate), "i" (PSTATE_IE));
-       ptep = pte_offset_map(pmdp, tpc);
-       pte = *ptep;
-       if (!pte_present(pte))
+       if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
                goto out;
 
-       pa  = (pte_pfn(pte) << PAGE_SHIFT);
-       pa += (tpc & ~PAGE_MASK);
-
-       /* Use phys bypass so we don't pollute dtlb/dcache. */
-       __asm__ __volatile__("lduwa [%1] %2, %0"
-                            : "=r" (insn)
-                            : "r" (pa), "i" (ASI_PHYS_USE_EC));
+       /* This disables preemption for us as well. */
+       local_irq_disable();
 
+       pmdp = pmd_offset(pudp, tpc);
+       if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
+               goto out_irq_enable;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       if (pmd_trans_huge(*pmdp)) {
+               if (pmd_trans_splitting(*pmdp))
+                       goto out_irq_enable;
+
+               pa  = pmd_pfn(*pmdp) << PAGE_SHIFT;
+               pa += tpc & ~HPAGE_MASK;
+
+               /* Use phys bypass so we don't pollute dtlb/dcache. */
+               __asm__ __volatile__("lduwa [%1] %2, %0"
+                                    : "=r" (insn)
+                                    : "r" (pa), "i" (ASI_PHYS_USE_EC));
+       } else
+#endif
+       {
+               ptep = pte_offset_map(pmdp, tpc);
+               pte = *ptep;
+               if (pte_present(pte)) {
+                       pa  = (pte_pfn(pte) << PAGE_SHIFT);
+                       pa += (tpc & ~PAGE_MASK);
+
+                       /* Use phys bypass so we don't pollute dtlb/dcache. */
+                       __asm__ __volatile__("lduwa [%1] %2, %0"
+                                            : "=r" (insn)
+                                            : "r" (pa), "i" (ASI_PHYS_USE_EC));
+               }
+               pte_unmap(ptep);
+       }
+out_irq_enable:
+       local_irq_enable();
 out:
-       pte_unmap(ptep);
-       __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
-outret:
        return insn;
 }
 
@@ -152,7 +165,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
 }
 
 static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
-                            unsigned int insn, int fault_code)
+                            unsigned long fault_addr, unsigned int insn,
+                            int fault_code)
 {
        unsigned long addr;
        siginfo_t info;
@@ -160,10 +174,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
        info.si_code = code;
        info.si_signo = sig;
        info.si_errno = 0;
-       if (fault_code & FAULT_CODE_ITLB)
+       if (fault_code & FAULT_CODE_ITLB) {
                addr = regs->tpc;
-       else
-               addr = compute_effective_address(regs, insn, 0);
+       } else {
+               /* If we were able to probe the faulting instruction, use it
+                * to compute a precise fault address.  Otherwise use the fault
+                * time provided address which may only have page granularity.
+                */
+               if (insn)
+                       addr = compute_effective_address(regs, insn, 0);
+               else
+                       addr = fault_addr;
+       }
        info.si_addr = (void __user *) addr;
        info.si_trapno = 0;
 
@@ -238,7 +260,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
                /* The si_code was set to make clear whether
                 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
                 */
-               do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
+               do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
                return;
        }
 
@@ -258,18 +280,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
        show_regs(regs);
 }
 
-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
-                                                        unsigned long addr)
-{
-       static int times;
-
-       if (times++ < 10)
-               printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
-                      "reports 64-bit fault address [%lx]\n",
-                      current->comm, current->pid, addr);
-       show_regs(regs);
-}
-
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 {
        struct mm_struct *mm = current->mm;
@@ -298,10 +308,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
                                goto intr_or_no_mm;
                        }
                }
-               if (unlikely((address >> 32) != 0)) {
-                       bogus_32bit_fault_address(regs, address);
+               if (unlikely((address >> 32) != 0))
                        goto intr_or_no_mm;
-               }
        }
 
        if (regs->tstate & TSTATE_PRIV) {
@@ -315,7 +323,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
                        bad_kernel_pc(regs, address);
                        return;
                }
-       }
+       } else
+               flags |= FAULT_FLAG_USER;
 
        /*
         * If we're in an interrupt or have no user
@@ -418,13 +427,14 @@ good_area:
                    vma->vm_file != NULL)
                        set_thread_fault_code(fault_code |
                                              FAULT_CODE_BLKCOMMIT);
+
+               flags |= FAULT_FLAG_WRITE;
        } else {
                /* Allow reads even for write-only mappings */
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
        }
 
-       flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
        fault = handle_mm_fault(mm, vma, address, flags);
 
        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
@@ -519,7 +529,7 @@ do_sigbus:
         * Send a sigbus, regardless of whether we were in kernel
         * or user mode.
         */
-       do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
+       do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
 
        /* Kernel mode? Handle exceptions or die */
        if (regs->tstate & TSTATE_PRIV)
index 04fd55a6e4613ae009d6f53abce4fbf3f043bc8e..a751023dbdcd99fb528a10a64e8cddf40df3efa4 100644 (file)
@@ -350,6 +350,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
 
        mm = vma->vm_mm;
 
+       /* Don't insert a non-valid PTE into the TSB, we'll deadlock.  */
+       if (!pte_accessible(mm, pte))
+               return;
+
        spin_lock_irqsave(&mm->context.lock, flags);
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -2764,3 +2768,26 @@ void hugetlb_setup(struct pt_regs *regs)
        }
 }
 #endif
+
+#ifdef CONFIG_SMP
+#define do_flush_tlb_kernel_range      smp_flush_tlb_kernel_range
+#else
+#define do_flush_tlb_kernel_range      __flush_tlb_kernel_range
+#endif
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
+               if (start < LOW_OBP_ADDRESS) {
+                       flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
+                       do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
+               }
+               if (end > HI_OBP_ADDRESS) {
+                       flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
+                       do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
+               }
+       } else {
+               flush_tsb_kernel_range(start, end);
+               do_flush_tlb_kernel_range(start, end);
+       }
+}
index 2cc3bce5ee914a158a16960c4ece028cc959b83a..71d99a6c75a75cd4c2bbea0dea763686668410d3 100644 (file)
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
        mm->context.tsb_block[tsb_idx].tsb_nentries =
                tsb_bytes / sizeof(struct tsb);
 
-       base = TSBMAP_BASE;
+       switch (tsb_idx) {
+       case MM_TSB_BASE:
+               base = TSBMAP_8K_BASE;
+               break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       case MM_TSB_HUGE:
+               base = TSBMAP_4M_BASE;
+               break;
+#endif
+       default:
+               BUG();
+       }
+
        tte = pgprot_val(PAGE_KERNEL_LOCKED);
        tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
        BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
index 3d2b81c163a6aac89e0e32d862bd603021eec917..3ff289f422e635cdf74280f86488733c0f88c55f 100644 (file)
@@ -280,8 +280,7 @@ static int handle_page_fault(struct pt_regs *regs,
        if (!is_page_fault)
                write = 1;
 
-       flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                (write ? FAULT_FLAG_WRITE : 0));
+       flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
 
@@ -365,6 +364,9 @@ static int handle_page_fault(struct pt_regs *regs,
                goto bad_area_nosemaphore;
        }
 
+       if (!is_kernel_mode)
+               flags |= FAULT_FLAG_USER;
+
        /*
         * When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
@@ -425,12 +427,12 @@ good_area:
 #endif
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!is_page_fault || !(vma->vm_flags & VM_READ))
                        goto bad_area;
        }
 
- survive:
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
@@ -568,15 +570,10 @@ no_context:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (is_global_init(tsk)) {
-               yield();
-               down_read(&mm->mmap_sem);
-               goto survive;
-       }
-       pr_alert("VM: killing process %s\n", tsk->comm);
-       if (!is_kernel_mode)
-               do_group_exit(SIGKILL);
-       goto no_context;
+       if (is_kernel_mode)
+               goto no_context;
+       pagefault_out_of_memory();
+       return 0;
 
 do_sigbus:
        up_read(&mm->mmap_sem);
index 089f3987e273a2c3f9576ef923dbd8ba8648cca5..5c3aef74237ffda72a0b252d4ee4b118d14a969a 100644 (file)
@@ -30,8 +30,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
        pmd_t *pmd;
        pte_t *pte;
        int err = -EFAULT;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                (is_write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        *code_out = SEGV_MAPERR;
 
@@ -42,6 +41,8 @@ int handle_page_fault(unsigned long address, unsigned long ip,
        if (in_atomic())
                goto out_nosemaphore;
 
+       if (is_user)
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -58,12 +59,15 @@ retry:
 
 good_area:
        *code_out = SEGV_ACCERR;
-       if (is_write && !(vma->vm_flags & VM_WRITE))
-               goto out;
-
-       /* Don't require VM_READ|VM_EXEC for write faults! */
-       if (!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC)))
-               goto out;
+       if (is_write) {
+               if (!(vma->vm_flags & VM_WRITE))
+                       goto out;
+               flags |= FAULT_FLAG_WRITE;
+       } else {
+               /* Don't require VM_READ|VM_EXEC for write faults! */
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+                       goto out;
+       }
 
        do {
                int fault;
@@ -124,6 +128,8 @@ out_of_memory:
         * (which will retry the fault, or kill us if we got oom-killed).
         */
        up_read(&mm->mmap_sem);
+       if (!is_user)
+               goto out_nosemaphore;
        pagefault_out_of_memory();
        return 0;
 }
index f9b5c10bccee96e8838484aaf6effc39b3c89bd1..0dc922dba9154d7cfcfe5352ec8ec77169e8082f 100644 (file)
@@ -209,8 +209,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        struct task_struct *tsk;
        struct mm_struct *mm;
        int fault, sig, code;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        tsk = current;
        mm = tsk->mm;
@@ -222,6 +221,11 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+       if (!(fsr ^ 0x12))
+               flags |= FAULT_FLAG_WRITE;
+
        /*
         * As per x86, we may deadlock here.  However, since the kernel only
         * validly references user space from well defined areas of the code,
@@ -278,6 +282,13 @@ retry:
               (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
                return 0;
 
+       /*
+        * If we are in kernel mode at this point, we
+        * have no context to handle this fault with.
+        */
+       if (!user_mode(regs))
+               goto no_context;
+
        if (fault & VM_FAULT_OOM) {
                /*
                 * We ran out of memory, call the OOM killer, and return to
@@ -288,13 +299,6 @@ retry:
                return 0;
        }
 
-       /*
-        * If we are in kernel mode at this point, we
-        * have no context to handle this fault with.
-        */
-       if (!user_mode(regs))
-               goto no_context;
-
        if (fault & VM_FAULT_SIGBUS) {
                /*
                 * We had some memory, but were unable to
index 787072769a803a3196f21d3a06326ccea016bdd4..3115eae96ad8a2963d2d78fbf18a8189ac8dde28 100644 (file)
@@ -121,6 +121,7 @@ config X86
        select OLD_SIGACTION if X86_32
        select COMPAT_OLD_SIGACTION if IA32_EMULATION
        select RTC_LIB
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config INSTRUCTION_DECODER
        def_bool y
@@ -957,10 +958,27 @@ config VM86
        default y
        depends on X86_32
        ---help---
-         This option is required by programs like DOSEMU to run 16-bit legacy
-         code on X86 processors. It also may be needed by software like
-         XFree86 to initialize some video cards via BIOS. Disabling this
-         option saves about 6k.
+         This option is required by programs like DOSEMU to run
+         16-bit real mode legacy code on x86 processors. It also may
+         be needed by software like XFree86 to initialize some video
+         cards via BIOS. Disabling this option saves about 6K.
+
+config X86_16BIT
+       bool "Enable support for 16-bit segments" if EXPERT
+       default y
+       ---help---
+         This option is required by programs like Wine to run 16-bit
+         protected mode legacy code on x86 processors.  Disabling
+         this option saves about 300 bytes on i386, or around 6K text
+         plus 16K runtime memory on x86-64,
+
+config X86_ESPFIX32
+       def_bool y
+       depends on X86_16BIT && X86_32
+
+config X86_ESPFIX64
+       def_bool y
+       depends on X86_16BIT && X86_64
 
 config TOSHIBA
        tristate "Toshiba Laptop support"
@@ -1566,6 +1584,7 @@ config EFI
 config EFI_STUB
        bool "EFI stub support"
        depends on EFI
+       select RELOCATABLE
        ---help---
           This kernel feature allows a bzImage to be loaded directly
          by EFI firmware without the use of a bootloader.
index 9ec06a1f6d61b2d64f6520bb68d1b0e047af1276..425712462178072e0b6f3ca51cd785c5933e042a 100644 (file)
@@ -91,10 +91,9 @@ bs_die:
 
        .section ".bsdata", "a"
 bugger_off_msg:
-       .ascii  "Direct floppy boot is not supported. "
-       .ascii  "Use a boot loader program instead.\r\n"
+       .ascii  "Use a boot loader.\r\n"
        .ascii  "\n"
-       .ascii  "Remove disk and press any key to reboot ...\r\n"
+       .ascii  "Remove disk and press any key to reboot...\r\n"
        .byte   0
 
 #ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@ coff_header:
 #else
        .word   0x8664                          # x86-64
 #endif
-       .word   3                               # nr_sections
+       .word   4                               # nr_sections
        .long   0                               # TimeDateStamp
        .long   0                               # PointerToSymbolTable
        .long   1                               # NumberOfSymbols
@@ -250,6 +249,25 @@ section_table:
        .word   0                               # NumberOfLineNumbers
        .long   0x60500020                      # Characteristics (section flags)
 
+       #
+       # The offset & size fields are filled in by build.c.
+       #
+       .ascii  ".bss"
+       .byte   0
+       .byte   0
+       .byte   0
+       .byte   0
+       .long   0
+       .long   0x0
+       .long   0                               # Size of initialized data
+                                               # on disk
+       .long   0x0
+       .long   0                               # PointerToRelocations
+       .long   0                               # PointerToLineNumbers
+       .word   0                               # NumberOfRelocations
+       .word   0                               # NumberOfLineNumbers
+       .long   0xc8000080                      # Characteristics (section flags)
+
 #endif /* CONFIG_EFI_STUB */
 
        # Kernel attributes; used by setup.  This is part 1 of the
index 94c54465002003e34af8f6fb3d14a1be839fdeba..971a0ce062aad13b1f4b8384953af8cb1114c91a 100644 (file)
@@ -141,7 +141,7 @@ static void usage(void)
 
 #ifdef CONFIG_EFI_STUB
 
-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
 {
        unsigned int pe_header;
        unsigned short num_sections;
@@ -162,10 +162,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
                        put_unaligned_le32(size, section + 0x8);
 
                        /* section header vma field */
-                       put_unaligned_le32(offset, section + 0xc);
+                       put_unaligned_le32(vma, section + 0xc);
 
                        /* section header 'size of initialised data' field */
-                       put_unaligned_le32(size, section + 0x10);
+                       put_unaligned_le32(datasz, section + 0x10);
 
                        /* section header 'file offset' field */
                        put_unaligned_le32(offset, section + 0x14);
@@ -177,6 +177,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
        }
 }
 
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+       update_pecoff_section_header_fields(section_name, offset, size, size, offset);
+}
+
 static void update_pecoff_setup_and_reloc(unsigned int size)
 {
        u32 setup_offset = 0x200;
@@ -201,9 +206,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
 
        pe_header = get_unaligned_le32(&buf[0x3c]);
 
-       /* Size of image */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
        /*
         * Size of code: Subtract the size of the first sector (512 bytes)
         * which includes the header.
@@ -218,6 +220,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
        update_pecoff_section_header(".text", text_start, text_sz);
 }
 
+static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
+{
+       unsigned int pe_header;
+       unsigned int bss_sz = init_sz - file_sz;
+
+       pe_header = get_unaligned_le32(&buf[0x3c]);
+
+       /* Size of uninitialized data */
+       put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
+
+       /* Size of image */
+       put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
+
+       update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
+}
+
 #endif /* CONFIG_EFI_STUB */
 
 
@@ -268,6 +286,9 @@ int main(int argc, char ** argv)
        int fd;
        void *kernel;
        u32 crc = 0xffffffffUL;
+#ifdef CONFIG_EFI_STUB
+       unsigned int init_sz;
+#endif
 
        /* Defaults for old kernel */
 #ifdef CONFIG_X86_32
@@ -338,7 +359,9 @@ int main(int argc, char ** argv)
        put_unaligned_le32(sys_size, &buf[0x1f4]);
 
 #ifdef CONFIG_EFI_STUB
-       update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+       update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
+       init_sz = get_unaligned_le32(&buf[0x260]);
+       update_pecoff_bss(i + (sys_size * 16), init_sz);
 
 #ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
        efi_stub_entry -= 0x200;
index 474dc1b59f726b92de0a6c9fe3cfcc76ae5e005e..c9305ef1d41136e38066402dd4af5801b135ec90 100644 (file)
@@ -151,6 +151,16 @@ ENTRY(ia32_sysenter_target)
 1:     movl    (%rbp),%ebp
        _ASM_EXTABLE(1b,ia32_badarg)
        ASM_CLAC
+
+       /*
+        * Sysenter doesn't filter flags, so we need to clear NT
+        * ourselves.  To save a few cycles, we can check whether
+        * NT was set instead of doing an unconditional popfq.
+        */
+       testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
+       jnz sysenter_fix_flags
+sysenter_flags_fixed:
+
        orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
        testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
        CFI_REMEMBER_STATE
@@ -184,6 +194,8 @@ sysexit_from_sys_call:
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS_SYSEXIT32
 
+       CFI_RESTORE_STATE
+
 #ifdef CONFIG_AUDITSYSCALL
        .macro auditsys_entry_common
        movl %esi,%r9d                  /* 6th arg: 4th syscall arg */
@@ -226,7 +238,6 @@ sysexit_from_sys_call:
        .endm
 
 sysenter_auditsys:
-       CFI_RESTORE_STATE
        auditsys_entry_common
        movl %ebp,%r9d                  /* reload 6th syscall arg */
        jmp sysenter_dispatch
@@ -235,6 +246,11 @@ sysexit_audit:
        auditsys_exit sysexit_from_sys_call
 #endif
 
+sysenter_fix_flags:
+       pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
+       popfq_cfi
+       jmp sysenter_flags_fixed
+
 sysenter_tracesys:
 #ifdef CONFIG_AUDITSYSCALL
        testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
index c6cd358a1eec806a779371a631a97a8de2d28912..04a48903b2eb31973080d60d36cfd93b3fc68a5f 100644 (file)
 #endif
 #define smp_read_barrier_depends()     read_barrier_depends()
 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else
+#else /* !SMP */
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
 #define smp_read_barrier_depends()     do { } while (0)
 #define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif /* SMP */
+
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+/*
+ * For either of these options x86 doesn't have a strong TSO memory
+ * model and we should fall back to full barriers.
+ */
+
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       smp_mb();                                                       \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       smp_mb();                                                       \
+       ___p1;                                                          \
+})
+
+#else /* regular x86 TSO memory ordering */
+
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       barrier();                                                      \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       barrier();                                                      \
+       ___p1;                                                          \
+})
+
 #endif
 
 /*
index 9c999c1674facf727001df749e896bdbd3295dc1..01f15b227d7ed49ec2c8a9ce68c4c6a2e3fca6b1 100644 (file)
@@ -155,8 +155,9 @@ do {                                                \
 #define elf_check_arch(x)                      \
        ((x)->e_machine == EM_X86_64)
 
-#define compat_elf_check_arch(x)               \
-       (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
+#define compat_elf_check_arch(x)                                       \
+       (elf_check_arch_ia32(x) ||                                      \
+        (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
 
 #if __USER32_DS != __USER_DS
 # error "The following code assumes __USER32_DS == __USER_DS"
diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
new file mode 100644 (file)
index 0000000..99efebb
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _ASM_X86_ESPFIX_H
+#define _ASM_X86_ESPFIX_H
+
+#ifdef CONFIG_X86_64
+
+#include <asm/percpu.h>
+
+DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
+DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
+
+extern void init_espfix_bsp(void);
+extern void init_espfix_ap(void);
+
+#endif /* CONFIG_X86_64 */
+
+#endif /* _ASM_X86_ESPFIX_H */
index 0dc7d9e21c34f2d8e8284c7f9a76b78c19eef2e9..9d7d36c82fc21aa7d640e613ffa87d7a48a3034e 100644 (file)
@@ -123,14 +123,14 @@ enum fixed_addresses {
        __end_of_permanent_fixed_addresses,
 
        /*
-        * 256 temporary boot-time mappings, used by early_ioremap(),
+        * 512 temporary boot-time mappings, used by early_ioremap(),
         * before ioremap() is functional.
         *
-        * If necessary we round it up to the next 256 pages boundary so
+        * If necessary we round it up to the next 512 pages boundary so
         * that we can have a single pgd entry and a single pte table:
         */
 #define NR_FIX_BTMAPS          64
-#define FIX_BTMAPS_SLOTS       4
+#define FIX_BTMAPS_SLOTS       8
 #define TOTAL_FIX_BTMAPS       (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
        FIX_BTMAP_END =
         (__end_of_permanent_fixed_addresses ^
index bba3cf88e6249187a00fba403ed533648a728673..0a8b519226b8feb37368ffbc4ca81011bc031fde 100644 (file)
@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
 
 #define PARAVIRT_ADJUST_EXCEPTION_FRAME        /*  */
 
-#define INTERRUPT_RETURN       iretq
+#define INTERRUPT_RETURN       jmp native_iret
 #define USERGS_SYSRET64                                \
        swapgs;                                 \
        sysretq;
index 8b320722de7a9963f8e8adff4ba9204dd355fccd..b15a6b5fa3417b4b6b9b48e8a7f8f066b0ff1f73 100644 (file)
 #define KVM_HPAGE_MASK(x)      (~(KVM_HPAGE_SIZE(x) - 1))
 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
 
+static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
+{
+       /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
+       return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
+               (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
+}
+
 #define SELECTOR_TI_MASK (1 << 2)
 #define SELECTOR_RPL_MASK 0x03
 
 #define KVM_REFILL_PAGES 25
 #define KVM_MAX_CPUID_ENTRIES 80
 #define KVM_NR_FIXED_MTRR_REGION 88
-#define KVM_NR_VAR_MTRR 10
+#define KVM_NR_VAR_MTRR 8
 
 #define ASYNC_PF_PER_VCPU 64
 
-struct kvm_vcpu;
-struct kvm;
-struct kvm_async_pf;
-
 enum kvm_reg {
        VCPU_REGS_RAX = 0,
        VCPU_REGS_RCX = 1,
@@ -463,6 +466,7 @@ struct kvm_vcpu_arch {
        u64 mmio_gva;
        unsigned access;
        gfn_t mmio_gfn;
+       u64 mmio_gen;
 
        struct kvm_pmu pmu;
 
@@ -631,8 +635,8 @@ struct msr_data {
 struct kvm_x86_ops {
        int (*cpu_has_kvm_support)(void);          /* __init */
        int (*disabled_by_bios)(void);             /* __init */
-       int (*hardware_enable)(void *dummy);
-       void (*hardware_disable)(void *dummy);
+       int (*hardware_enable)(void);
+       void (*hardware_disable)(void);
        void (*check_processor_compatibility)(void *rtn);
        int (*hardware_setup)(void);               /* __init */
        void (*hardware_unsetup)(void);            /* __exit */
@@ -952,6 +956,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
        kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
 }
 
+static inline u64 get_canonical(u64 la)
+{
+       return ((int64_t)la << 16) >> 16;
+}
+
+static inline bool is_noncanonical_address(u64 la)
+{
+#ifdef CONFIG_X86_64
+       return get_canonical(la) != la;
+#else
+       return false;
+#endif
+}
+
 #define TSS_IOPB_BASE_OFFSET 0x66
 #define TSS_BASE_SIZE 0x68
 #define TSS_IOPB_SIZE (65536 / 8)
@@ -1010,7 +1028,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
 void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
 
 void kvm_define_shared_msr(unsigned index, u32 msr);
-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
 
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
 
index 2d883440cb9a282f948d7062684c52789e95d90f..b1609f2c524cb29b9d81493a5b35c2793c4c07d7 100644 (file)
@@ -61,6 +61,8 @@ typedef struct { pteval_t pte; } pte_t;
 #define MODULES_VADDR    _AC(0xffffffffa0000000, UL)
 #define MODULES_END      _AC(0xffffffffff000000, UL)
 #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
+#define ESPFIX_PGD_ENTRY _AC(-2, UL)
+#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
 
 #define EARLY_DYNAMIC_PAGE_TABLES      64
 
index b7bf3505e1ec0ba4d1c42c51aeae60babf150a55..2e327f114a1bf210d457f2a780bdc30f4d481581 100644 (file)
@@ -62,6 +62,8 @@ static inline void x86_ce4100_early_setup(void) { }
 
 #ifndef _SETUP
 
+#include <asm/espfix.h>
+
 /*
  * This is set up by the setup-routine at boot-time
  */
index 2e188d68397c82930bdba80c52a87260546fcbd3..f106908a12ec491a6b7d810d24e08ba0ec5c759a 100644 (file)
@@ -90,8 +90,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
        memcpy(&regs->bx + i, args, n * sizeof(args[0]));
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-                                  struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
        return AUDIT_ARCH_I386;
 }
@@ -220,8 +219,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
                }
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-                                  struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 #ifdef CONFIG_IA32_EMULATION
        /*
@@ -233,7 +231,7 @@ static inline int syscall_get_arch(struct task_struct *task,
         *
         * x32 tasks should be considered AUDIT_ARCH_X86_64.
         */
-       if (task_thread_info(task)->status & TS_COMPAT)
+       if (task_thread_info(current)->status & TS_COMPAT)
                return AUDIT_ARCH_I386;
 #endif
        /* Both x32 and x86_64 are considered "64-bit". */
index 5d9a3033b3d76dcf9d0d0566c11cfc0853d90c6f..d3a87780c70bdf017cc7ae92563a8f1af5e467a2 100644 (file)
@@ -211,9 +211,9 @@ struct kvm_cpuid_entry2 {
        __u32 padding[3];
 };
 
-#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
-#define KVM_CPUID_FLAG_STATEFUL_FUNC    2
-#define KVM_CPUID_FLAG_STATE_READ_NEXT  4
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX                BIT(0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC           BIT(1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT         BIT(2)
 
 /* for KVM_SET_CPUID2 */
 struct kvm_cpuid2 {
index 54991a746043853b187e062cf3c53a2f4f411054..b16e6d28f1497cde3fe4ccdb07e28a8483ac325e 100644 (file)
@@ -6,7 +6,7 @@
  * EFLAGS bits
  */
 #define X86_EFLAGS_CF  0x00000001 /* Carry Flag */
-#define X86_EFLAGS_BIT1        0x00000002 /* Bit 1 - always on */
+#define X86_EFLAGS_FIXED 0x00000002 /* Bit 1 - always on */
 #define X86_EFLAGS_PF  0x00000004 /* Parity Flag */
 #define X86_EFLAGS_AF  0x00000010 /* Auxiliary carry Flag */
 #define X86_EFLAGS_ZF  0x00000040 /* Zero Flag */
index 7bd3bd31010623367c540755d1dcf3ba0186bdf9..111eb356dbeae77d8b4bf23a9e2cf7c47ff4ee7b 100644 (file)
@@ -27,6 +27,7 @@ obj-$(CONFIG_X86_64)  += sys_x86_64.o x8664_ksyms_64.o
 obj-y                  += syscall_$(BITS).o
 obj-$(CONFIG_X86_64)   += vsyscall_64.o
 obj-$(CONFIG_X86_64)   += vsyscall_emu_64.o
+obj-$(CONFIG_X86_ESPFIX64)     += espfix_64.o
 obj-y                  += bootflag.o e820.o
 obj-y                  += pci-dma.o quirks.o topology.o kdebugfs.o
 obj-y                  += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
index 904611bf0e5a3edf7e7069c42795104275cb0a3b..033eb44dc661a9a904bef22d806b610688586d6e 100644 (file)
@@ -1263,7 +1263,7 @@ void __cpuinit setup_local_APIC(void)
        unsigned int value, queued;
        int i, j, acked = 0;
        unsigned long long tsc = 0, ntsc;
-       long long max_loops = cpu_khz;
+       long long max_loops = cpu_khz ? cpu_khz : 1000000;
 
        if (cpu_has_tsc)
                rdtscll(tsc);
@@ -1360,7 +1360,7 @@ void __cpuinit setup_local_APIC(void)
                        break;
                }
                if (queued) {
-                       if (cpu_has_tsc) {
+                       if (cpu_has_tsc && cpu_khz) {
                                rdtscll(ntsc);
                                max_loops = (cpu_khz << 10) - (ntsc - tsc);
                        } else
index deeb48d9459bd2b7559f5a6a9c0dee5f3cdd0349..6a7e3e9cffc3951e40ee7967130d0f5c3c30d5a6 100644 (file)
@@ -1134,7 +1134,7 @@ void syscall_init(void)
        /* Flags to clear on syscall */
        wrmsrl(MSR_SYSCALL_MASK,
               X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
-              X86_EFLAGS_IOPL|X86_EFLAGS_AC);
+              X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
 }
 
 /*
index f187806dfc187284928079161aec163bac23746e..8533e69d2b89f4c632b6ba2444812dd92892007f 100644 (file)
@@ -154,6 +154,21 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
                        setup_clear_cpu_cap(X86_FEATURE_ERMS);
                }
        }
+
+       /*
+        * Intel Quark Core DevMan_001.pdf section 6.4.11
+        * "The operating system also is required to invalidate (i.e., flush)
+        *  the TLB when any changes are made to any of the page table entries.
+        *  The operating system must reload CR3 to cause the TLB to be flushed"
+        *
+        * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
+        * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
+        * to be modified
+        */
+       if (c->x86 == 5 && c->x86_model == 9) {
+               pr_info("Disabling PGE capability bit\n");
+               setup_clear_cpu_cap(X86_FEATURE_PGE);
+       }
 }
 
 #ifdef CONFIG_X86_32
index a9e22073bd56a755ea1952dea202eba63e350f7f..6d6bb6f4fd439275a9ea83a7bc5fe776f72b4753 100644 (file)
@@ -1198,6 +1198,15 @@ again:
 
        intel_pmu_lbr_read();
 
+       /*
+        * CondChgd bit 63 doesn't mean any overflow status. Ignore
+        * and clear the bit.
+        */
+       if (__test_and_clear_bit(63, (unsigned long *)&status)) {
+               if (!status)
+                       goto done;
+       }
+
        /*
         * PEBS overflow sets bit 62 in the global status register
         */
@@ -2163,6 +2172,9 @@ __init int intel_pmu_init(void)
        case 62: /* IvyBridge EP */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
+               /* dTLB-load-misses on IVB is different than SNB */
+               hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
+
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
                       sizeof(hw_cache_extra_regs));
 
index ac632817609747468c5c30da5a18cbbe352ebe59..5c38e2b298cd7e69b5afcdae0764d230b36686da 100644 (file)
@@ -436,8 +436,8 @@ sysenter_do_call:
        cmpl $(NR_syscalls), %eax
        jae sysenter_badsys
        call *sys_call_table(,%eax,4)
-       movl %eax,PT_EAX(%esp)
 sysenter_after_call:
+       movl %eax,PT_EAX(%esp)
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
@@ -517,6 +517,7 @@ ENTRY(system_call)
        jae syscall_badsys
 syscall_call:
        call *sys_call_table(,%eax,4)
+syscall_after_call:
        movl %eax,PT_EAX(%esp)          # store the return value
 syscall_exit:
        LOCKDEP_SYS_EXIT
@@ -531,6 +532,7 @@ syscall_exit:
 restore_all:
        TRACE_IRQS_IRET
 restore_all_notrace:
+#ifdef CONFIG_X86_ESPFIX32
        movl PT_EFLAGS(%esp), %eax      # mix EFLAGS, SS and CS
        # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
        # are returning to the kernel.
@@ -541,6 +543,7 @@ restore_all_notrace:
        cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
        CFI_REMEMBER_STATE
        je ldt_ss                       # returning to user-space with LDT SS
+#endif
 restore_nocheck:
        RESTORE_REGS 4                  # skip orig_eax/error_code
 irq_return:
@@ -553,6 +556,7 @@ ENTRY(iret_exc)
 .previous
        _ASM_EXTABLE(irq_return,iret_exc)
 
+#ifdef CONFIG_X86_ESPFIX32
        CFI_RESTORE_STATE
 ldt_ss:
 #ifdef CONFIG_PARAVIRT
@@ -596,6 +600,7 @@ ldt_ss:
        lss (%esp), %esp                /* switch to espfix segment */
        CFI_ADJUST_CFA_OFFSET -8
        jmp restore_nocheck
+#endif
        CFI_ENDPROC
 ENDPROC(system_call)
 
@@ -686,12 +691,12 @@ syscall_fault:
 END(syscall_fault)
 
 syscall_badsys:
-       movl $-ENOSYS,PT_EAX(%esp)
-       jmp syscall_exit
+       movl $-ENOSYS,%eax
+       jmp syscall_after_call
 END(syscall_badsys)
 
 sysenter_badsys:
-       movl $-ENOSYS,PT_EAX(%esp)
+       movl $-ENOSYS,%eax
        jmp sysenter_after_call
 END(syscall_badsys)
        CFI_ENDPROC
@@ -708,6 +713,7 @@ END(syscall_badsys)
  * the high word of the segment base from the GDT and swiches to the
  * normal stack and adjusts ESP with the matching offset.
  */
+#ifdef CONFIG_X86_ESPFIX32
        /* fixup the stack */
        mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
        mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
@@ -717,8 +723,10 @@ END(syscall_badsys)
        pushl_cfi %eax
        lss (%esp), %esp                /* switch to the normal stack segment */
        CFI_ADJUST_CFA_OFFSET -8
+#endif
 .endm
 .macro UNWIND_ESPFIX_STACK
+#ifdef CONFIG_X86_ESPFIX32
        movl %ss, %eax
        /* see if on espfix stack */
        cmpw $__ESPFIX_SS, %ax
@@ -729,6 +737,7 @@ END(syscall_badsys)
        /* switch to normal stack */
        FIXUP_ESPFIX_STACK
 27:
+#endif
 .endm
 
 /*
@@ -1336,11 +1345,13 @@ END(debug)
 ENTRY(nmi)
        RING0_INT_FRAME
        ASM_CLAC
+#ifdef CONFIG_X86_ESPFIX32
        pushl_cfi %eax
        movl %ss, %eax
        cmpw $__ESPFIX_SS, %ax
        popl_cfi %eax
        je nmi_espfix_stack
+#endif
        cmpl $ia32_sysenter_target,(%esp)
        je nmi_stack_fixup
        pushl_cfi %eax
@@ -1380,6 +1391,7 @@ nmi_debug_stack_check:
        FIX_STACK 24, nmi_stack_correct, 1
        jmp nmi_stack_correct
 
+#ifdef CONFIG_X86_ESPFIX32
 nmi_espfix_stack:
        /* We have a RING0_INT_FRAME here.
         *
@@ -1401,6 +1413,7 @@ nmi_espfix_stack:
        lss 12+4(%esp), %esp            # back to espfix stack
        CFI_ADJUST_CFA_OFFSET -24
        jmp irq_return
+#endif
        CFI_ENDPROC
 END(nmi)
 
index 7ac938a4bfabfee9f0fb1f70ea996679785cb819..8c6b5c2284c72bc3e76f660dd20d063d754489ff 100644 (file)
@@ -58,6 +58,7 @@
 #include <asm/asm.h>
 #include <asm/context_tracking.h>
 #include <asm/smap.h>
+#include <asm/pgtable_types.h>
 #include <linux/err.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
@@ -365,7 +366,7 @@ ENDPROC(native_usergs_sysret64)
        /*CFI_REL_OFFSET        ss,0*/
        pushq_cfi %rax /* rsp */
        CFI_REL_OFFSET  rsp,0
-       pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */
+       pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
        /*CFI_REL_OFFSET        rflags,0*/
        pushq_cfi $__KERNEL_CS /* cs */
        /*CFI_REL_OFFSET        cs,0*/
@@ -1056,12 +1057,45 @@ restore_args:
 
 irq_return:
        INTERRUPT_RETURN
-       _ASM_EXTABLE(irq_return, bad_iret)
 
-#ifdef CONFIG_PARAVIRT
 ENTRY(native_iret)
+       /*
+        * Are we returning to a stack segment from the LDT?  Note: in
+        * 64-bit mode SS:RSP on the exception stack is always valid.
+        */
+#ifdef CONFIG_X86_ESPFIX64
+       testb $4,(SS-RIP)(%rsp)
+       jnz native_irq_return_ldt
+#endif
+
+native_irq_return_iret:
        iretq
-       _ASM_EXTABLE(native_iret, bad_iret)
+       _ASM_EXTABLE(native_irq_return_iret, bad_iret)
+
+#ifdef CONFIG_X86_ESPFIX64
+native_irq_return_ldt:
+       pushq_cfi %rax
+       pushq_cfi %rdi
+       SWAPGS
+       movq PER_CPU_VAR(espfix_waddr),%rdi
+       movq %rax,(0*8)(%rdi)   /* RAX */
+       movq (2*8)(%rsp),%rax   /* RIP */
+       movq %rax,(1*8)(%rdi)
+       movq (3*8)(%rsp),%rax   /* CS */
+       movq %rax,(2*8)(%rdi)
+       movq (4*8)(%rsp),%rax   /* RFLAGS */
+       movq %rax,(3*8)(%rdi)
+       movq (6*8)(%rsp),%rax   /* SS */
+       movq %rax,(5*8)(%rdi)
+       movq (5*8)(%rsp),%rax   /* RSP */
+       movq %rax,(4*8)(%rdi)
+       andl $0xffff0000,%eax
+       popq_cfi %rdi
+       orq PER_CPU_VAR(espfix_stack),%rax
+       SWAPGS
+       movq %rax,%rsp
+       popq_cfi %rax
+       jmp native_irq_return_iret
 #endif
 
        .section .fixup,"ax"
@@ -1127,9 +1161,40 @@ ENTRY(retint_kernel)
        call preempt_schedule_irq
        jmp exit_intr
 #endif
-
        CFI_ENDPROC
 END(common_interrupt)
+
+       /*
+        * If IRET takes a fault on the espfix stack, then we
+        * end up promoting it to a doublefault.  In that case,
+        * modify the stack to make it look like we just entered
+        * the #GP handler from user space, similar to bad_iret.
+        */
+#ifdef CONFIG_X86_ESPFIX64
+       ALIGN
+__do_double_fault:
+       XCPT_FRAME 1 RDI+8
+       movq RSP(%rdi),%rax             /* Trap on the espfix stack? */
+       sarq $PGDIR_SHIFT,%rax
+       cmpl $ESPFIX_PGD_ENTRY,%eax
+       jne do_double_fault             /* No, just deliver the fault */
+       cmpl $__KERNEL_CS,CS(%rdi)
+       jne do_double_fault
+       movq RIP(%rdi),%rax
+       cmpq $native_irq_return_iret,%rax
+       jne do_double_fault             /* This shouldn't happen... */
+       movq PER_CPU_VAR(kernel_stack),%rax
+       subq $(6*8-KERNEL_STACK_OFFSET),%rax    /* Reset to original stack */
+       movq %rax,RSP(%rdi)
+       movq $0,(%rax)                  /* Missing (lost) #GP error code */
+       movq $general_protection,RIP(%rdi)
+       retq
+       CFI_ENDPROC
+END(__do_double_fault)
+#else
+# define __do_double_fault do_double_fault
+#endif
+
 /*
  * End of kprobes section
  */
@@ -1298,7 +1363,7 @@ zeroentry overflow do_overflow
 zeroentry bounds do_bounds
 zeroentry invalid_op do_invalid_op
 zeroentry device_not_available do_device_not_available
-paranoiderrorentry double_fault do_double_fault
+paranoiderrorentry double_fault __do_double_fault
 zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
 errorentry invalid_TSS do_invalid_TSS
 errorentry segment_not_present do_segment_not_present
@@ -1585,7 +1650,7 @@ error_sti:
  */
 error_kernelspace:
        incl %ebx
-       leaq irq_return(%rip),%rcx
+       leaq native_irq_return_iret(%rip),%rcx
        cmpq %rcx,RIP+8(%rsp)
        je error_swapgs
        movl %ecx,%eax  /* zero extend */
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
new file mode 100644 (file)
index 0000000..94d857f
--- /dev/null
@@ -0,0 +1,208 @@
+/* ----------------------------------------------------------------------- *
+ *
+ *   Copyright 2014 Intel Corporation; author: H. Peter Anvin
+ *
+ *   This program is free software; you can redistribute it and/or modify it
+ *   under the terms and conditions of the GNU General Public License,
+ *   version 2, as published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope it will be useful, but WITHOUT
+ *   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *   more details.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * The IRET instruction, when returning to a 16-bit segment, only
+ * restores the bottom 16 bits of the user space stack pointer.  This
+ * causes some 16-bit software to break, but it also leaks kernel state
+ * to user space.
+ *
+ * This works around this by creating percpu "ministacks", each of which
+ * is mapped 2^16 times 64K apart.  When we detect that the return SS is
+ * on the LDT, we copy the IRET frame to the ministack and use the
+ * relevant alias to return to userspace.  The ministacks are mapped
+ * readonly, so if the IRET fault we promote #GP to #DF which is an IST
+ * vector and thus has its own stack; we then do the fixup in the #DF
+ * handler.
+ *
+ * This file sets up the ministacks and the related page tables.  The
+ * actual ministack invocation is in entry_64.S.
+ */
+
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/setup.h>
+#include <asm/espfix.h>
+
+/*
+ * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
+ * it up to a cache line to avoid unnecessary sharing.
+ */
+#define ESPFIX_STACK_SIZE      (8*8UL)
+#define ESPFIX_STACKS_PER_PAGE (PAGE_SIZE/ESPFIX_STACK_SIZE)
+
+/* There is address space for how many espfix pages? */
+#define ESPFIX_PAGE_SPACE      (1UL << (PGDIR_SHIFT-PAGE_SHIFT-16))
+
+#define ESPFIX_MAX_CPUS                (ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
+#if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
+# error "Need more than one PGD for the ESPFIX hack"
+#endif
+
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+
+/* This contains the *bottom* address of the espfix stack */
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
+
+/* Initialization mutex - should this be a spinlock? */
+static DEFINE_MUTEX(espfix_init_mutex);
+
+/* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
+#define ESPFIX_MAX_PAGES  DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
+static void *espfix_pages[ESPFIX_MAX_PAGES];
+
+static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
+       __aligned(PAGE_SIZE);
+
+static unsigned int page_random, slot_random;
+
+/*
+ * This returns the bottom address of the espfix stack for a specific CPU.
+ * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case
+ * we have to account for some amount of padding at the end of each page.
+ */
+static inline unsigned long espfix_base_addr(unsigned int cpu)
+{
+       unsigned long page, slot;
+       unsigned long addr;
+
+       page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
+       slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
+       addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
+       addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16);
+       addr += ESPFIX_BASE_ADDR;
+       return addr;
+}
+
+#define PTE_STRIDE        (65536/PAGE_SIZE)
+#define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
+#define ESPFIX_PMD_CLONES PTRS_PER_PMD
+#define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
+
+#define PGTABLE_PROT     ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
+
+static void init_espfix_random(void)
+{
+       unsigned long rand;
+
+       /*
+        * This is run before the entropy pools are initialized,
+        * but this is hopefully better than nothing.
+        */
+       if (!arch_get_random_long(&rand)) {
+               /* The constant is an arbitrary large prime */
+               rdtscll(rand);
+               rand *= 0xc345c6b72fd16123UL;
+       }
+
+       slot_random = rand % ESPFIX_STACKS_PER_PAGE;
+       page_random = (rand / ESPFIX_STACKS_PER_PAGE)
+               & (ESPFIX_PAGE_SPACE - 1);
+}
+
+void __init init_espfix_bsp(void)
+{
+       pgd_t *pgd_p;
+       pteval_t ptemask;
+
+       ptemask = __supported_pte_mask;
+
+       /* Install the espfix pud into the kernel page directory */
+       pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
+       pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
+
+       /* Randomize the locations */
+       init_espfix_random();
+
+       /* The rest is the same as for any other processor */
+       init_espfix_ap();
+}
+
+void init_espfix_ap(void)
+{
+       unsigned int cpu, page;
+       unsigned long addr;
+       pud_t pud, *pud_p;
+       pmd_t pmd, *pmd_p;
+       pte_t pte, *pte_p;
+       int n;
+       void *stack_page;
+       pteval_t ptemask;
+
+       /* We only have to do this once... */
+       if (likely(this_cpu_read(espfix_stack)))
+               return;         /* Already initialized */
+
+       cpu = smp_processor_id();
+       addr = espfix_base_addr(cpu);
+       page = cpu/ESPFIX_STACKS_PER_PAGE;
+
+       /* Did another CPU already set this up? */
+       stack_page = ACCESS_ONCE(espfix_pages[page]);
+       if (likely(stack_page))
+               goto done;
+
+       mutex_lock(&espfix_init_mutex);
+
+       /* Did we race on the lock? */
+       stack_page = ACCESS_ONCE(espfix_pages[page]);
+       if (stack_page)
+               goto unlock_done;
+
+       ptemask = __supported_pte_mask;
+
+       pud_p = &espfix_pud_page[pud_index(addr)];
+       pud = *pud_p;
+       if (!pud_present(pud)) {
+               pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
+               pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
+               paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
+               for (n = 0; n < ESPFIX_PUD_CLONES; n++)
+                       set_pud(&pud_p[n], pud);
+       }
+
+       pmd_p = pmd_offset(&pud, addr);
+       pmd = *pmd_p;
+       if (!pmd_present(pmd)) {
+               pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
+               pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
+               paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
+               for (n = 0; n < ESPFIX_PMD_CLONES; n++)
+                       set_pmd(&pmd_p[n], pmd);
+       }
+
+       pte_p = pte_offset_kernel(&pmd, addr);
+       stack_page = (void *)__get_free_page(GFP_KERNEL);
+       pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
+       for (n = 0; n < ESPFIX_PTE_CLONES; n++)
+               set_pte(&pte_p[n*PTE_STRIDE], pte);
+
+       /* Job is done for this CPU and any CPU which shares this page */
+       ACCESS_ONCE(espfix_pages[page]) = stack_page;
+
+unlock_done:
+       mutex_unlock(&espfix_init_mutex);
+done:
+       this_cpu_write(espfix_stack, addr);
+       this_cpu_write(espfix_waddr, (unsigned long)stack_page
+                      + (addr & ~PAGE_MASK));
+}
index dcbbaa165bdeed61dd2b504a13ca05ced99737c7..c37886d759ccac2736c36b357cc0f399786f2fb3 100644 (file)
@@ -20,8 +20,6 @@
 #include <asm/mmu_context.h>
 #include <asm/syscalls.h>
 
-int sysctl_ldt16 = 0;
-
 #ifdef CONFIG_SMP
 static void flush_ldt(void *current_mm)
 {
@@ -231,16 +229,10 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
                }
        }
 
-       /*
-        * On x86-64 we do not support 16-bit segments due to
-        * IRET leaking the high bits of the kernel stack address.
-        */
-#ifdef CONFIG_X86_64
-       if (!ldt_info.seg_32bit && !sysctl_ldt16) {
+       if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
                error = -EINVAL;
                goto out_unlock;
        }
-#endif
 
        fill_ldt(&ldt, &ldt_info);
        if (oldmode)
index 3f08f34f93ebc480041d0b448e3b12869450a91d..a1da6737ba5b80c4ee636204d49d4813348ef903 100644 (file)
@@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
-DEF_NATIVE(pv_cpu_ops, iret, "iretq");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
@@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_irq_ops, save_fl);
                PATCH_SITE(pv_irq_ops, irq_enable);
                PATCH_SITE(pv_irq_ops, irq_disable);
-               PATCH_SITE(pv_cpu_ops, iret);
                PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
                PATCH_SITE(pv_cpu_ops, usergs_sysret32);
                PATCH_SITE(pv_cpu_ops, usergs_sysret64);
index 7305f7dfc7abe7ef46875a9028556de99f73e79e..0339f5c14bf9e26012988d1996bbbcb2827c9d88 100644 (file)
@@ -147,7 +147,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                childregs->bp = arg;
                childregs->orig_ax = -1;
                childregs->cs = __KERNEL_CS | get_kernel_rpl();
-               childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
+               childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
                p->fpu_counter = 0;
                p->thread.io_bitmap_ptr = NULL;
                memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
index 355ae06dbf94f4fb6de52a126a73f2570a8dad72..f99a242730e95ce185900990305e360444d36b44 100644 (file)
@@ -176,7 +176,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                childregs->bp = arg;
                childregs->orig_ax = -1;
                childregs->cs = __KERNEL_CS | get_kernel_rpl();
-               childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
+               childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
                return 0;
        }
        *childregs = *current_pt_regs();
index 29a8120e6fe88a88012490a51130be8b89ea2193..baa61e7370b751d6a42025db73f6650e06ce4f76 100644 (file)
@@ -1475,15 +1475,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
        force_sig_info(SIGTRAP, &info, tsk);
 }
 
-
-#ifdef CONFIG_X86_32
-# define IS_IA32       1
-#elif defined CONFIG_IA32_EMULATION
-# define IS_IA32       is_compat_task()
-#else
-# define IS_IA32       0
-#endif
-
 /*
  * We must return the syscall number to actually look up in the table.
  * This can be -1L to skip running any syscall at all.
@@ -1521,7 +1512,7 @@ long syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->orig_ax);
 
-       if (IS_IA32)
+       if (is_ia32_task())
                audit_syscall_entry(AUDIT_ARCH_I386,
                                    regs->orig_ax,
                                    regs->bx, regs->cx,
index 2a26819bb6a8dc82be7526e6380d28a390ce0155..80eab01c1a68a735538bd0b6c479520f10f853aa 100644 (file)
@@ -37,10 +37,12 @@ static void remove_e820_regions(struct resource *avail)
 
 void arch_remove_reservations(struct resource *avail)
 {
-       /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
+       /*
+        * Trim out BIOS area (high 2MB) and E820 regions. We do not remove
+        * the low 1MB unconditionally, as this area is needed for some ISA
+        * cards requiring a memory range, e.g. the i82365 PCMCIA controller.
+        */
        if (avail->flags & IORESOURCE_MEM) {
-               if (avail->start < BIOS_END)
-                       avail->start = BIOS_END;
                resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
 
                remove_e820_regions(avail);
index 087ab2af381abb99089748672f5eb7f122a7f5c3..66deef41512f44618475de3f2b9b07b3c566fb4e 100644 (file)
@@ -677,6 +677,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
                 * handler too.
                 */
                regs->flags &= ~X86_EFLAGS_TF;
+               /*
+                * Ensure the signal handler starts with the new fpu state.
+                */
+               if (used_math())
+                       drop_init_fpu(current);
        }
        signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
 }
index bfd348e9936926f56e13ffee8b67cc77ec4a4338..87084ab90d190c4753436922b592f54f82ae4f4c 100644 (file)
@@ -264,6 +264,13 @@ notrace static void __cpuinit start_secondary(void *unused)
         */
        check_tsc_sync_target();
 
+       /*
+        * Enable the espfix hack for this CPU
+        */
+#ifdef CONFIG_X86_ESPFIX64
+       init_espfix_ap();
+#endif
+
        /*
         * We need to hold vector_lock so there the set of online cpus
         * does not change while we are assigning vectors to cpus.  Holding
@@ -1277,6 +1284,9 @@ static void remove_siblinginfo(int cpu)
 
        for_each_cpu(sibling, cpu_sibling_mask(cpu))
                cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
+       for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
+               cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
+       cpumask_clear(cpu_llc_shared_mask(cpu));
        cpumask_clear(cpu_sibling_mask(cpu));
        cpumask_clear(cpu_core_mask(cpu));
        c->phys_proc_id = 0;
index 098b3cfda72ee152ab3ce5505ad5843b3778b699..4e27ba53c40cde3f8d1c7256d32f2358dd4d8b65 100644 (file)
@@ -968,14 +968,17 @@ void __init tsc_init(void)
 
        x86_init.timers.tsc_pre_init();
 
-       if (!cpu_has_tsc)
+       if (!cpu_has_tsc) {
+               setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
                return;
+       }
 
        tsc_khz = x86_platform.calibrate_tsc();
        cpu_khz = tsc_khz;
 
        if (!tsc_khz) {
                mark_tsc_unstable("could not calculate TSC khz");
+               setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
                return;
        }
 
index 9a907a67be8f48abc0399017865a4697968ae2c3..c52c07efe970ac8ed6d6b3fd014dbea2304415ed 100644 (file)
@@ -125,10 +125,10 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
        if (!show_unhandled_signals)
                return;
 
-       pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
-                             level, current->comm, task_pid_nr(current),
-                             message, regs->ip, regs->cs,
-                             regs->sp, regs->ax, regs->si, regs->di);
+       printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
+                          level, current->comm, task_pid_nr(current),
+                          message, regs->ip, regs->cs,
+                          regs->sp, regs->ax, regs->si, regs->di);
 }
 
 static int addr_to_vsyscall_nr(unsigned long addr)
index ada87a329edcde71763601e19b1fc4a8c9062f3d..1ee723298e90694ee417c1ea2549a0ef15359573 100644 (file)
@@ -268,8 +268,6 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
        if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
                return -1;
 
-       drop_init_fpu(tsk);     /* trigger finit */
-
        return 0;
 }
 
@@ -400,8 +398,11 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
                        set_used_math();
                }
 
-               if (use_eager_fpu())
+               if (use_eager_fpu()) {
+                       preempt_disable();
                        math_state_restore();
+                       preempt_enable();
+               }
 
                return err;
        } else {
index a47a3e54b964b5bd486e2d199816fb12acdc9170..bdccfb62aa0d1324dc62090981c485dbe6d50a6a 100644 (file)
@@ -27,6 +27,7 @@ config KVM
        select MMU_NOTIFIER
        select ANON_INODES
        select HAVE_KVM_IRQCHIP
+       select HAVE_KVM_IRQFD
        select HAVE_KVM_IRQ_ROUTING
        select HAVE_KVM_EVENTFD
        select KVM_APIC_ARCHITECTURE
@@ -38,6 +39,7 @@ config KVM
        select PERF_EVENTS
        select HAVE_KVM_MSI
        select HAVE_KVM_CPU_RELAX_INTERCEPT
+       select KVM_VFIO
        ---help---
          Support hosting fully virtualized guest machines using hardware
          virtualization extensions.  You will need a fairly recent
index d609e1d8404852d02b9fdfeed0aa2103580b7cda..25d22b2d6509e3e0924c39641214085de2aa2b52 100644 (file)
@@ -5,12 +5,13 @@ CFLAGS_x86.o := -I.
 CFLAGS_svm.o := -I.
 CFLAGS_vmx.o := -I.
 
-kvm-y                  += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
-                               coalesced_mmio.o irq_comm.o eventfd.o \
-                               irqchip.o)
-kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT)    += $(addprefix ../../../virt/kvm/, \
-                               assigned-dev.o iommu.o)
-kvm-$(CONFIG_KVM_ASYNC_PF)     += $(addprefix ../../../virt/kvm/, async_pf.o)
+KVM := ../../../virt/kvm
+
+kvm-y                  += $(KVM)/kvm_main.o $(KVM)/ioapic.o \
+                               $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o \
+                               $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o
+kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT)    += $(KVM)/assigned-dev.o $(KVM)/iommu.o
+kvm-$(CONFIG_KVM_ASYNC_PF)     += $(KVM)/async_pf.o
 
 kvm-y                  += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
                           i8254.o cpuid.o pmu.o
index a20ecb5b6cbf3543490ab6a74a969aa45a1862c5..89d288237b9c91b1b32077236c90a7eca5690428 100644 (file)
@@ -187,8 +187,14 @@ static bool supported_xcr0_bit(unsigned bit)
 
 #define F(x) bit(X86_FEATURE_##x)
 
-static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
-                        u32 index, int *nent, int maxnent)
+static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
+                                  u32 func, u32 index, int *nent, int maxnent)
+{
+       return 0;
+}
+
+static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+                                u32 index, int *nent, int maxnent)
 {
        int r;
        unsigned f_nx = is_efer_nx() ? F(NX) : 0;
@@ -480,6 +486,15 @@ out:
        return r;
 }
 
+static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
+                       u32 idx, int *nent, int maxnent, unsigned int type)
+{
+       if (type == KVM_GET_EMULATED_CPUID)
+               return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
+
+       return __do_cpuid_ent(entry, func, idx, nent, maxnent);
+}
+
 #undef F
 
 struct kvm_cpuid_param {
@@ -494,8 +509,34 @@ static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
        return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
 }
 
-int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
-                                     struct kvm_cpuid_entry2 __user *entries)
+static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
+                                __u32 num_entries, unsigned int ioctl_type)
+{
+       int i;
+
+       if (ioctl_type != KVM_GET_EMULATED_CPUID)
+               return false;
+
+       /*
+        * We want to make sure that ->padding is being passed clean from
+        * userspace in case we want to use it for something in the future.
+        *
+        * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
+        * have to give ourselves satisfied only with the emulated side. /me
+        * sheds a tear.
+        */
+       for (i = 0; i < num_entries; i++) {
+               if (entries[i].padding[0] ||
+                   entries[i].padding[1] ||
+                   entries[i].padding[2])
+                       return true;
+       }
+       return false;
+}
+
+int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
+                           struct kvm_cpuid_entry2 __user *entries,
+                           unsigned int type)
 {
        struct kvm_cpuid_entry2 *cpuid_entries;
        int limit, nent = 0, r = -E2BIG, i;
@@ -512,6 +553,10 @@ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
                goto out;
        if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
                cpuid->nent = KVM_MAX_CPUID_ENTRIES;
+
+       if (sanity_check_entries(entries, cpuid->nent, type))
+               return -EINVAL;
+
        r = -ENOMEM;
        cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
        if (!cpuid_entries)
@@ -525,7 +570,7 @@ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
                        continue;
 
                r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
-                               &nent, cpuid->nent);
+                               &nent, cpuid->nent, type);
 
                if (r)
                        goto out_free;
@@ -536,7 +581,7 @@ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
                limit = cpuid_entries[nent - 1].eax;
                for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
                        r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
-                                    &nent, cpuid->nent);
+                                    &nent, cpuid->nent, type);
 
                if (r)
                        goto out_free;
index b7fd07984888e9f348c04df9043c6596928c1cd3..f1e4895174b2472da123b8b74cbee2460b239ccf 100644 (file)
@@ -6,8 +6,9 @@
 void kvm_update_cpuid(struct kvm_vcpu *vcpu);
 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
                                              u32 function, u32 index);
-int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
-                                     struct kvm_cpuid_entry2 __user *entries);
+int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
+                           struct kvm_cpuid_entry2 __user *entries,
+                           unsigned int type);
 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
                             struct kvm_cpuid *cpuid,
                             struct kvm_cpuid_entry __user *entries);
index 5484d54582ca75e9eee093fa3f69996ac9bb9889..4c01f022c6ac724ecd232ee64018027a19775430 100644 (file)
@@ -663,11 +663,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
        masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
 }
 
-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
-{
-       register_address_increment(ctxt, &ctxt->_eip, rel);
-}
-
 static u32 desc_limit_scaled(struct desc_struct *desc)
 {
        u32 limit = get_desc_limit(desc);
@@ -741,6 +736,38 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
        return emulate_exception(ctxt, NM_VECTOR, 0, false);
 }
 
+static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
+                              int cs_l)
+{
+       switch (ctxt->op_bytes) {
+       case 2:
+               ctxt->_eip = (u16)dst;
+               break;
+       case 4:
+               ctxt->_eip = (u32)dst;
+               break;
+       case 8:
+               if ((cs_l && is_noncanonical_address(dst)) ||
+                   (!cs_l && (dst & ~(u32)-1)))
+                       return emulate_gp(ctxt, 0);
+               ctxt->_eip = dst;
+               break;
+       default:
+               WARN(1, "unsupported eip assignment size\n");
+       }
+       return X86EMUL_CONTINUE;
+}
+
+static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
+{
+       return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
+}
+
+static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+{
+       return assign_eip_near(ctxt, ctxt->_eip + rel);
+}
+
 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
 {
        u16 selector;
@@ -2161,13 +2188,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
        case 2: /* call near abs */ {
                long int old_eip;
                old_eip = ctxt->_eip;
-               ctxt->_eip = ctxt->src.val;
+               rc = assign_eip_near(ctxt, ctxt->src.val);
+               if (rc != X86EMUL_CONTINUE)
+                       break;
                ctxt->src.val = old_eip;
                rc = em_push(ctxt);
                break;
        }
        case 4: /* jmp abs */
-               ctxt->_eip = ctxt->src.val;
+               rc = assign_eip_near(ctxt, ctxt->src.val);
                break;
        case 5: /* jmp far */
                rc = em_jmp_far(ctxt);
@@ -2199,16 +2228,21 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
 
 static int em_ret(struct x86_emulate_ctxt *ctxt)
 {
-       ctxt->dst.type = OP_REG;
-       ctxt->dst.addr.reg = &ctxt->_eip;
-       ctxt->dst.bytes = ctxt->op_bytes;
-       return em_pop(ctxt);
+       int rc;
+       unsigned long eip;
+
+       rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+
+       return assign_eip_near(ctxt, eip);
 }
 
 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
 {
        int rc;
        unsigned long cs;
+       int cpl = ctxt->ops->cpl(ctxt);
 
        rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
        if (rc != X86EMUL_CONTINUE)
@@ -2218,6 +2252,9 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
        rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
        if (rc != X86EMUL_CONTINUE)
                return rc;
+       /* Outer-privilege level return is not implemented */
+       if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
+               return X86EMUL_UNHANDLEABLE;
        rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
        return rc;
 }
@@ -2465,7 +2502,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
 {
        const struct x86_emulate_ops *ops = ctxt->ops;
        struct desc_struct cs, ss;
-       u64 msr_data;
+       u64 msr_data, rcx, rdx;
        int usermode;
        u16 cs_sel = 0, ss_sel = 0;
 
@@ -2481,6 +2518,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
        else
                usermode = X86EMUL_MODE_PROT32;
 
+       rcx = reg_read(ctxt, VCPU_REGS_RCX);
+       rdx = reg_read(ctxt, VCPU_REGS_RDX);
+
        cs.dpl = 3;
        ss.dpl = 3;
        ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
@@ -2498,6 +2538,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
                ss_sel = cs_sel + 8;
                cs.d = 0;
                cs.l = 1;
+               if (is_noncanonical_address(rcx) ||
+                   is_noncanonical_address(rdx))
+                       return emulate_gp(ctxt, 0);
                break;
        }
        cs_sel |= SELECTOR_RPL_MASK;
@@ -2506,8 +2549,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
        ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
        ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
 
-       ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
-       *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
+       ctxt->_eip = rdx;
+       *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
 
        return X86EMUL_CONTINUE;
 }
@@ -3046,10 +3089,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
 
 static int em_call(struct x86_emulate_ctxt *ctxt)
 {
+       int rc;
        long rel = ctxt->src.val;
 
        ctxt->src.val = (unsigned long)ctxt->_eip;
-       jmp_rel(ctxt, rel);
+       rc = jmp_rel(ctxt, rel);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
        return em_push(ctxt);
 }
 
@@ -3081,11 +3127,12 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
 {
        int rc;
+       unsigned long eip;
 
-       ctxt->dst.type = OP_REG;
-       ctxt->dst.addr.reg = &ctxt->_eip;
-       ctxt->dst.bytes = ctxt->op_bytes;
-       rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
+       rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+       rc = assign_eip_near(ctxt, eip);
        if (rc != X86EMUL_CONTINUE)
                return rc;
        rsp_increment(ctxt, ctxt->src.val);
@@ -3375,20 +3422,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
 
 static int em_loop(struct x86_emulate_ctxt *ctxt)
 {
+       int rc = X86EMUL_CONTINUE;
+
        register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
        if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
            (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
-               jmp_rel(ctxt, ctxt->src.val);
+               rc = jmp_rel(ctxt, ctxt->src.val);
 
-       return X86EMUL_CONTINUE;
+       return rc;
 }
 
 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
 {
+       int rc = X86EMUL_CONTINUE;
+
        if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
-               jmp_rel(ctxt, ctxt->src.val);
+               rc = jmp_rel(ctxt, ctxt->src.val);
 
-       return X86EMUL_CONTINUE;
+       return rc;
 }
 
 static int em_in(struct x86_emulate_ctxt *ctxt)
@@ -4717,7 +4768,7 @@ special_insn:
                break;
        case 0x70 ... 0x7f: /* jcc (short) */
                if (test_cc(ctxt->b, ctxt->eflags))
-                       jmp_rel(ctxt, ctxt->src.val);
+                       rc = jmp_rel(ctxt, ctxt->src.val);
                break;
        case 0x8d: /* lea r16/r32, m */
                ctxt->dst.val = ctxt->src.addr.mem.ea;
@@ -4746,7 +4797,7 @@ special_insn:
                break;
        case 0xe9: /* jmp rel */
        case 0xeb: /* jmp rel short */
-               jmp_rel(ctxt, ctxt->src.val);
+               rc = jmp_rel(ctxt, ctxt->src.val);
                ctxt->dst.type = OP_NONE; /* Disable writeback. */
                break;
        case 0xf4:              /* hlt */
@@ -4858,7 +4909,7 @@ twobyte_insn:
                break;
        case 0x80 ... 0x8f: /* jnz rel, etc*/
                if (test_cc(ctxt->b, ctxt->eflags))
-                       jmp_rel(ctxt, ctxt->src.val);
+                       rc = jmp_rel(ctxt, ctxt->src.val);
                break;
        case 0x90 ... 0x9f:     /* setcc r/m8 */
                ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
index 518d86471b76f0be7460c6e038755a18083b26b4..298781d4cfb44b7c6d6536d6d2779ada2eeb150a 100644 (file)
@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
                return;
 
        timer = &pit->pit_state.timer;
+       mutex_lock(&pit->pit_state.lock);
        if (hrtimer_cancel(timer))
                hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+       mutex_unlock(&pit->pit_state.lock);
 }
 
 static void destroy_pit_timer(struct kvm_pit *pit)
index 484bc874688b4ba2bfb8199702671779ffd69d2f..3ec38cb56bd5666880e6959233fbfd174ca731ed 100644 (file)
@@ -108,7 +108,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
 
        vector = kvm_cpu_get_extint(v);
 
-       if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
+       if (vector != -1)
                return vector;                  /* PIC */
 
        return kvm_get_apic_interrupt(v);       /* APIC */
index 279d093524b415f36196dd69211ebedd3299176e..681e4e251f0003dcf89d25b41d892a6807121303 100644 (file)
@@ -362,25 +362,46 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
 
 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
 {
-       apic->irr_pending = false;
+       struct kvm_vcpu *vcpu;
+
+       vcpu = apic->vcpu;
+
        apic_clear_vector(vec, apic->regs + APIC_IRR);
-       if (apic_search_irr(apic) != -1)
-               apic->irr_pending = true;
+       if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+               /* try to update RVI */
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+       else {
+               vec = apic_search_irr(apic);
+               apic->irr_pending = (vec != -1);
+       }
 }
 
 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
 {
-       /* Note that we never get here with APIC virtualization enabled.  */
+       struct kvm_vcpu *vcpu;
+
+       if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
+               return;
+
+       vcpu = apic->vcpu;
 
-       if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
-               ++apic->isr_count;
-       BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
        /*
-        * ISR (in service register) bit is set when injecting an interrupt.
-        * The highest vector is injected. Thus the latest bit set matches
-        * the highest bit in ISR.
+        * With APIC virtualization enabled, all caching is disabled
+        * because the processor can modify ISR under the hood.  Instead
+        * just set SVI.
         */
-       apic->highest_isr_cache = vec;
+       if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+               kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
+       else {
+               ++apic->isr_count;
+               BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+               /*
+                * ISR (in service register) bit is set when injecting an interrupt.
+                * The highest vector is injected. Thus the latest bit set matches
+                * the highest bit in ISR.
+                */
+               apic->highest_isr_cache = vec;
+       }
 }
 
 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
@@ -1641,11 +1662,16 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
        int vector = kvm_apic_has_interrupt(vcpu);
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       /* Note that we never get here with APIC virtualization enabled.  */
-
        if (vector == -1)
                return -1;
 
+       /*
+        * We get here even with APIC virtualization enabled, if doing
+        * nested virtualization and L1 runs with the "acknowledge interrupt
+        * on exit" mode.  Then we cannot inject the interrupt via RVI,
+        * because the process would deliver it through the IDT.
+        */
+
        apic_set_isr(vector, apic);
        apic_update_ppr(apic);
        apic_clear_irr(vector, apic);
index 711c649f80b7eef961b910f20d98b6a5a2f34736..0cc34f594c2ad208f3c9a284f8c7eded1162f08e 100644 (file)
@@ -3072,7 +3072,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
 
-       vcpu_clear_mmio_info(vcpu, ~0ul);
+       vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
        kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
        if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -3241,7 +3241,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
        arch.direct_map = vcpu->arch.mmu.direct_map;
        arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
 
-       return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
+       return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch);
 }
 
 static bool can_do_async_pf(struct kvm_vcpu *vcpu)
@@ -4229,7 +4229,7 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
        if (nr_to_scan == 0)
                goto out;
 
-       raw_spin_lock(&kvm_lock);
+       spin_lock(&kvm_lock);
 
        list_for_each_entry(kvm, &vm_list, vm_list) {
                int idx;
@@ -4265,7 +4265,7 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
                break;
        }
 
-       raw_spin_unlock(&kvm_lock);
+       spin_unlock(&kvm_lock);
 
 out:
        return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
index 7e6090e13237326513c07e45e7f038bd4a53bf6f..a6a6370c20107a7fbcc0abc2702e4cfd5382e650 100644 (file)
@@ -69,6 +69,7 @@ struct guest_walker {
        pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
        gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
        pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
+       bool pte_writable[PT_MAX_FULL_LEVELS];
        unsigned pt_access;
        unsigned pte_access;
        gfn_t gfn;
@@ -130,6 +131,22 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                if (pte == orig_pte)
                        continue;
 
+               /*
+                * If the slot is read-only, simply do not process the accessed
+                * and dirty bits.  This is the correct thing to do if the slot
+                * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
+                * are only supported if the accessed and dirty bits are already
+                * set in the ROM (so that MMIO writes are never needed).
+                *
+                * Note that NPT does not allow this at all and faults, since
+                * it always wants nested page table entries for the guest
+                * page tables to be writable.  And EPT works but will simply
+                * overwrite the read-only memory to set the accessed and dirty
+                * bits.
+                */
+               if (unlikely(!walker->pte_writable[level - 1]))
+                       continue;
+
                ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
                if (ret)
                        return ret;
@@ -204,7 +221,8 @@ retry_walk:
                        goto error;
                real_gfn = gpa_to_gfn(real_gfn);
 
-               host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
+               host_addr = gfn_to_hva_prot(vcpu->kvm, real_gfn,
+                                           &walker->pte_writable[walker->level - 1]);
                if (unlikely(kvm_is_error_hva(host_addr)))
                        goto error;
 
index 765210d4d925f6724d8037790487ca48a56482fa..03f7d03c92a28f8356c81f4b9a364e0b6034e544 100644 (file)
@@ -606,7 +606,7 @@ static int has_svm(void)
        return 1;
 }
 
-static void svm_hardware_disable(void *garbage)
+static void svm_hardware_disable(void)
 {
        /* Make sure we clean up behind us */
        if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
@@ -617,7 +617,7 @@ static void svm_hardware_disable(void *garbage)
        amd_pmu_disable_virt();
 }
 
-static int svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void)
 {
 
        struct svm_cpu_data *sd;
@@ -3196,7 +3196,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
        msr.host_initiated = false;
 
        svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
-       if (svm_set_msr(&svm->vcpu, &msr)) {
+       if (kvm_set_msr(&svm->vcpu, &msr)) {
                trace_kvm_msr_write_ex(ecx, data);
                kvm_inject_gp(&svm->vcpu, 0);
        } else {
@@ -3478,9 +3478,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
 
        if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
            || !svm_exit_handlers[exit_code]) {
-               kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
-               kvm_run->hw.hardware_exit_reason = exit_code;
-               return 0;
+               WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 1;
        }
 
        return svm_exit_handlers[exit_code](svm);
index 7cdafb6dc705c43bea4896b70a9e281e85428c98..a3476bedd2017a0cec2df63ce4befd5c7af82f3c 100644 (file)
@@ -2493,12 +2493,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        break;
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
+                       u64 old_msr_data = msr->data;
                        msr->data = data;
                        if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
                                preempt_disable();
-                               kvm_set_shared_msr(msr->index, msr->data,
-                                                  msr->mask);
+                               ret = kvm_set_shared_msr(msr->index, msr->data,
+                                                        msr->mask);
                                preempt_enable();
+                               if (ret)
+                                       msr->data = old_msr_data;
                        }
                        break;
                }
@@ -2566,7 +2569,7 @@ static void kvm_cpu_vmxon(u64 addr)
                        : "memory", "cc");
 }
 
-static int hardware_enable(void *garbage)
+static int hardware_enable(void)
 {
        int cpu = raw_smp_processor_id();
        u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
@@ -2630,7 +2633,7 @@ static void kvm_cpu_vmxoff(void)
        asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
 }
 
-static void hardware_disable(void *garbage)
+static void hardware_disable(void)
 {
        if (vmm_exclusive) {
                vmclear_local_loaded_vmcss();
@@ -5062,7 +5065,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
        msr.data = data;
        msr.index = ecx;
        msr.host_initiated = false;
-       if (vmx_set_msr(vcpu, &msr) != 0) {
+       if (kvm_set_msr(vcpu, &msr) != 0) {
                trace_kvm_msr_write_ex(ecx, data);
                kvm_inject_gp(vcpu, 0);
                return 1;
@@ -6651,10 +6654,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
            && kvm_vmx_exit_handlers[exit_reason])
                return kvm_vmx_exit_handlers[exit_reason](vcpu);
        else {
-               vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
-               vcpu->run->hw.hardware_exit_reason = exit_reason;
+               WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 1;
        }
-       return 0;
 }
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
@@ -7949,7 +7952,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 
        kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
        kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
-       vmx_set_rflags(vcpu, X86_EFLAGS_BIT1);
+       vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
        /*
         * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
         * actually changed, because it depends on the current state of
index 1be0a9e75d1fe7eb2931f45d70081724219d2f5f..ce20cb65de58611708ac032b4730aead27e2b254 100644 (file)
@@ -225,24 +225,29 @@ static void kvm_shared_msr_cpu_online(void)
                shared_msr_update(i, shared_msrs_global.msrs[i]);
 }
 
-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
+int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
 {
        unsigned int cpu = smp_processor_id();
        struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
+       int err;
 
        if (((value ^ smsr->values[slot].curr) & mask) == 0)
-               return;
+               return 0;
        smsr->values[slot].curr = value;
-       wrmsrl(shared_msrs_global.msrs[slot], value);
+       err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
+       if (err)
+               return 1;
+
        if (!smsr->registered) {
                smsr->urn.on_user_return = kvm_on_user_return;
                user_return_notifier_register(&smsr->urn);
                smsr->registered = true;
        }
+       return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
 
-static void drop_user_return_notifiers(void *ignore)
+static void drop_user_return_notifiers(void)
 {
        unsigned int cpu = smp_processor_id();
        struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
@@ -920,7 +925,6 @@ void kvm_enable_efer_bits(u64 mask)
 }
 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
 
-
 /*
  * Writes msr value into into the appropriate "register".
  * Returns 0 on success, non-0 otherwise.
@@ -928,8 +932,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  */
 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
+       switch (msr->index) {
+       case MSR_FS_BASE:
+       case MSR_GS_BASE:
+       case MSR_KERNEL_GS_BASE:
+       case MSR_CSTAR:
+       case MSR_LSTAR:
+               if (is_noncanonical_address(msr->data))
+                       return 1;
+               break;
+       case MSR_IA32_SYSENTER_EIP:
+       case MSR_IA32_SYSENTER_ESP:
+               /*
+                * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
+                * non-canonical address is written on Intel but not on
+                * AMD (which ignores the top 32-bits, because it does
+                * not implement 64-bit SYSENTER).
+                *
+                * 64-bit code should hence be able to write a non-canonical
+                * value on AMD.  Making the address canonical ensures that
+                * vmentry does not fail on Intel after writing a non-canonical
+                * value, and that something deterministic happens if the guest
+                * invokes 64-bit SYSENTER.
+                */
+               msr->data = get_canonical(msr->data);
+       }
        return kvm_x86_ops->set_msr(vcpu, msr);
 }
+EXPORT_SYMBOL_GPL(kvm_set_msr);
 
 /*
  * Adapt set_msr() to msr_io()'s calling convention
@@ -1196,20 +1226,37 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        elapsed = ns - kvm->arch.last_tsc_nsec;
 
        if (vcpu->arch.virtual_tsc_khz) {
+               int faulted = 0;
+
                /* n.b - signed multiplication and division required */
                usdiff = data - kvm->arch.last_tsc_write;
 #ifdef CONFIG_X86_64
                usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
 #else
                /* do_div() only does unsigned */
-               asm("idivl %2; xor %%edx, %%edx"
-               : "=A"(usdiff)
-               : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
+               asm("1: idivl %[divisor]\n"
+                   "2: xor %%edx, %%edx\n"
+                   "   movl $0, %[faulted]\n"
+                   "3:\n"
+                   ".section .fixup,\"ax\"\n"
+                   "4: movl $1, %[faulted]\n"
+                   "   jmp  3b\n"
+                   ".previous\n"
+
+               _ASM_EXTABLE(1b, 4b)
+
+               : "=A"(usdiff), [faulted] "=r" (faulted)
+               : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
+
 #endif
                do_div(elapsed, 1000);
                usdiff -= elapsed;
                if (usdiff < 0)
                        usdiff = -usdiff;
+
+               /* idivl overflow => difference is larger than USEC_PER_SEC */
+               if (faulted)
+                       usdiff = USEC_PER_SEC;
        } else
                usdiff = USEC_PER_SEC; /* disable TSC match window below */
 
@@ -2503,7 +2550,7 @@ out:
        return r;
 }
 
-int kvm_dev_ioctl_check_extension(long ext)
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 {
        int r;
 
@@ -2513,6 +2560,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
        case KVM_CAP_SET_TSS_ADDR:
        case KVM_CAP_EXT_CPUID:
+       case KVM_CAP_EXT_EMUL_CPUID:
        case KVM_CAP_CLOCKSOURCE:
        case KVM_CAP_PIT:
        case KVM_CAP_NOP_IO_DELAY:
@@ -2622,15 +2670,17 @@ long kvm_arch_dev_ioctl(struct file *filp,
                r = 0;
                break;
        }
-       case KVM_GET_SUPPORTED_CPUID: {
+       case KVM_GET_SUPPORTED_CPUID:
+       case KVM_GET_EMULATED_CPUID: {
                struct kvm_cpuid2 __user *cpuid_arg = argp;
                struct kvm_cpuid2 cpuid;
 
                r = -EFAULT;
                if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
                        goto out;
-               r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
-                                                     cpuid_arg->entries);
+
+               r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
+                                           ioctl);
                if (r)
                        goto out;
 
@@ -4787,7 +4837,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 
        ++vcpu->stat.insn_emulation_fail;
        trace_kvm_emulate_insn_failed(vcpu);
-       if (!is_guest_mode(vcpu)) {
+       if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
                vcpu->run->internal.ndata = 0;
@@ -5106,7 +5156,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
 
        smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
 
-       raw_spin_lock(&kvm_lock);
+       spin_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (vcpu->cpu != freq->cpu)
@@ -5116,7 +5166,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
                                send_ipi = 1;
                }
        }
-       raw_spin_unlock(&kvm_lock);
+       spin_unlock(&kvm_lock);
 
        if (freq->old < freq->new && send_ipi) {
                /*
@@ -5263,12 +5313,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
        struct kvm_vcpu *vcpu;
        int i;
 
-       raw_spin_lock(&kvm_lock);
+       spin_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
                kvm_for_each_vcpu(i, vcpu, kvm)
                        set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests);
        atomic_set(&kvm_guest_has_master_clock, 0);
-       raw_spin_unlock(&kvm_lock);
+       spin_unlock(&kvm_lock);
 }
 
 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
@@ -5911,7 +5961,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                }
                if (need_resched()) {
                        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
-                       kvm_resched(vcpu);
+                       cond_resched();
                        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
                }
        }
@@ -6565,7 +6615,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
        kvm_rip_write(vcpu, 0);
 }
 
-int kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void)
 {
        struct kvm *kvm;
        struct kvm_vcpu *vcpu;
@@ -6576,7 +6626,7 @@ int kvm_arch_hardware_enable(void *garbage)
        bool stable, backwards_tsc = false;
 
        kvm_shared_msr_cpu_online();
-       ret = kvm_x86_ops->hardware_enable(garbage);
+       ret = kvm_x86_ops->hardware_enable();
        if (ret != 0)
                return ret;
 
@@ -6656,10 +6706,10 @@ int kvm_arch_hardware_enable(void *garbage)
        return 0;
 }
 
-void kvm_arch_hardware_disable(void *garbage)
+void kvm_arch_hardware_disable(void)
 {
-       kvm_x86_ops->hardware_disable(garbage);
-       drop_user_return_notifiers(garbage);
+       kvm_x86_ops->hardware_disable();
+       drop_user_return_notifiers();
 }
 
 int kvm_arch_hardware_setup(void)
@@ -6771,6 +6821,10 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
                static_key_slow_dec(&kvm_no_apic_vcpu);
 }
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
        if (type)
@@ -6862,7 +6916,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
                           struct kvm_memory_slot *dont)
 {
        int i;
@@ -6883,7 +6937,8 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
        }
 }
 
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages)
 {
        int i;
 
@@ -6941,6 +6996,10 @@ out_free:
        return -ENOMEM;
 }
 
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                struct kvm_memory_slot *memslot,
                                struct kvm_userspace_memory_region *mem,
@@ -7079,7 +7138,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
        int r;
 
        if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
-             is_error_page(work->page))
+             work->wakeup_all)
                return;
 
        r = kvm_mmu_reload(vcpu);
@@ -7189,7 +7248,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
        struct x86_exception fault;
 
        trace_kvm_async_pf_ready(work->arch.token, work->gva);
-       if (is_error_page(work->page))
+       if (work->wakeup_all)
                work->arch.token = ~0; /* broadcast wakeup */
        else
                kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
index 3186542f2fa3bb5069e0729f49450b1dabd1a1bc..7626d3efa064f5dc8df5826d373d2d69b321c9a7 100644 (file)
@@ -78,15 +78,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
        vcpu->arch.mmio_gva = gva & PAGE_MASK;
        vcpu->arch.access = access;
        vcpu->arch.mmio_gfn = gfn;
+       vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
+}
+
+static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
 }
 
 /*
- * Clear the mmio cache info for the given gva,
- * specially, if gva is ~0ul, we clear all mmio cache info.
+ * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
+ * clear all mmio cache info.
  */
+#define MMIO_GVA_ANY (~(gva_t)0)
+
 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
+       if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
                return;
 
        vcpu->arch.mmio_gva = 0;
@@ -94,7 +102,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
 
 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
 {
-       if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
+       if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
+             vcpu->arch.mmio_gva == (gva & PAGE_MASK))
                return true;
 
        return false;
@@ -102,7 +111,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
 
 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 {
-       if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
+       if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
+             vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
                return true;
 
        return false;
index 0002a3a33081c77134569c1872e5a70684dbd646..e04e67753238e65c75cab2c8b6484af15ba8883b 100644 (file)
@@ -30,11 +30,13 @@ struct pg_state {
        unsigned long start_address;
        unsigned long current_address;
        const struct addr_marker *marker;
+       unsigned long lines;
 };
 
 struct addr_marker {
        unsigned long start_address;
        const char *name;
+       unsigned long max_lines;
 };
 
 /* indices for address_markers; keep sync'd w/ address_markers below */
@@ -45,6 +47,7 @@ enum address_markers_idx {
        LOW_KERNEL_NR,
        VMALLOC_START_NR,
        VMEMMAP_START_NR,
+       ESPFIX_START_NR,
        HIGH_KERNEL_NR,
        MODULES_VADDR_NR,
        MODULES_END_NR,
@@ -67,6 +70,7 @@ static struct addr_marker address_markers[] = {
        { PAGE_OFFSET,          "Low Kernel Mapping" },
        { VMALLOC_START,        "vmalloc() Area" },
        { VMEMMAP_START,        "Vmemmap" },
+       { ESPFIX_BASE_ADDR,     "ESPfix Area", 16 },
        { __START_KERNEL_map,   "High Kernel Mapping" },
        { MODULES_VADDR,        "Modules" },
        { MODULES_END,          "End Modules" },
@@ -163,7 +167,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                      pgprot_t new_prot, int level)
 {
        pgprotval_t prot, cur;
-       static const char units[] = "KMGTPE";
+       static const char units[] = "BKMGTPE";
 
        /*
         * If we have a "break" in the series, we need to flush the state that
@@ -178,6 +182,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                st->current_prot = new_prot;
                st->level = level;
                st->marker = address_markers;
+               st->lines = 0;
                seq_printf(m, "---[ %s ]---\n", st->marker->name);
        } else if (prot != cur || level != st->level ||
                   st->current_address >= st->marker[1].start_address) {
@@ -188,17 +193,21 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                /*
                 * Now print the actual finished series
                 */
-               seq_printf(m, "0x%0*lx-0x%0*lx   ",
-                          width, st->start_address,
-                          width, st->current_address);
-
-               delta = (st->current_address - st->start_address) >> 10;
-               while (!(delta & 1023) && unit[1]) {
-                       delta >>= 10;
-                       unit++;
+               if (!st->marker->max_lines ||
+                   st->lines < st->marker->max_lines) {
+                       seq_printf(m, "0x%0*lx-0x%0*lx   ",
+                                  width, st->start_address,
+                                  width, st->current_address);
+
+                       delta = (st->current_address - st->start_address);
+                       while (!(delta & 1023) && unit[1]) {
+                               delta >>= 10;
+                               unit++;
+                       }
+                       seq_printf(m, "%9lu%c ", delta, *unit);
+                       printk_prot(m, st->current_prot, st->level);
                }
-               seq_printf(m, "%9lu%c ", delta, *unit);
-               printk_prot(m, st->current_prot, st->level);
+               st->lines++;
 
                /*
                 * We print markers for special areas of address space,
@@ -206,7 +215,15 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                 * This helps in the interpretation.
                 */
                if (st->current_address >= st->marker[1].start_address) {
+                       if (st->marker->max_lines &&
+                           st->lines > st->marker->max_lines) {
+                               unsigned long nskip =
+                                       st->lines - st->marker->max_lines;
+                               seq_printf(m, "... %lu entr%s skipped ... \n",
+                                          nskip, nskip == 1 ? "y" : "ies");
+                       }
                        st->marker++;
+                       st->lines = 0;
                        seq_printf(m, "---[ %s ]---\n", st->marker->name);
                }
 
index c1e9e4cbbd76d4aa1835e1e96715e098a003b6d0..d8b1ff68dbb9366692ba2502286172692fb36ef5 100644 (file)
@@ -842,23 +842,15 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
        force_sig_info_fault(SIGBUS, code, address, tsk, fault);
 }
 
-static noinline int
+static noinline void
 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
               unsigned long address, unsigned int fault)
 {
-       /*
-        * Pagefault was interrupted by SIGKILL. We have no reason to
-        * continue pagefault.
-        */
-       if (fatal_signal_pending(current)) {
-               if (!(fault & VM_FAULT_RETRY))
-                       up_read(&current->mm->mmap_sem);
-               if (!(error_code & PF_USER))
-                       no_context(regs, error_code, address, 0, 0);
-               return 1;
+       if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
+               up_read(&current->mm->mmap_sem);
+               no_context(regs, error_code, address, 0, 0);
+               return;
        }
-       if (!(fault & VM_FAULT_ERROR))
-               return 0;
 
        if (fault & VM_FAULT_OOM) {
                /* Kernel mode? Handle exceptions or die: */
@@ -866,7 +858,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
                        up_read(&current->mm->mmap_sem);
                        no_context(regs, error_code, address,
                                   SIGSEGV, SEGV_MAPERR);
-                       return 1;
+                       return;
                }
 
                up_read(&current->mm->mmap_sem);
@@ -884,7 +876,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
                else
                        BUG();
        }
-       return 1;
 }
 
 static int spurious_fault_check(unsigned long error_code, pte_t *pte)
@@ -1017,9 +1008,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
        unsigned long address;
        struct mm_struct *mm;
        int fault;
-       int write = error_code & PF_WRITE;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                       (write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        tsk = current;
        mm = tsk->mm;
@@ -1089,6 +1078,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
        if (user_mode_vm(regs)) {
                local_irq_enable();
                error_code |= PF_USER;
+               flags |= FAULT_FLAG_USER;
        } else {
                if (regs->flags & X86_EFLAGS_IF)
                        local_irq_enable();
@@ -1113,6 +1103,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
                return;
        }
 
+       if (error_code & PF_WRITE)
+               flags |= FAULT_FLAG_WRITE;
+
        /*
         * When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in
@@ -1191,9 +1184,17 @@ good_area:
         */
        fault = handle_mm_fault(mm, vma, address, flags);
 
-       if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
-               if (mm_fault_error(regs, error_code, address, fault))
-                       return;
+       /*
+        * If we need to retry but a fatal signal is pending, handle the
+        * signal first. We do not need to release the mmap_sem because it
+        * would already be released in __lock_page_or_retry in mm/filemap.c.
+        */
+       if (unlikely((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)))
+               return;
+
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               mm_fault_error(regs, error_code, address, fault);
+               return;
        }
 
        /*
index bb32480c2d713d57a52747f2f6e119a70286dc07..aabdf762f5921e95063edef2272947fc872482a9 100644 (file)
@@ -389,7 +389,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
        psize = page_level_size(level);
        pmask = page_level_mask(level);
        offset = virt_addr & ~pmask;
-       phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
+       phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
        return (phys_addr | offset);
 }
 EXPORT_SYMBOL_GPL(slow_virt_to_phys);
index 94919e307f8e97c52d4cd69cee64cf7b0eb68584..2883f08402016994b0b93cadc57db31e2210fbde 100644 (file)
@@ -162,6 +162,10 @@ pcibios_align_resource(void *data, const struct resource *res,
                        return start;
                if (start & 0x300)
                        start = (start + 0x3ff) & ~0x3ff;
+       } else if (res->flags & IORESOURCE_MEM) {
+               /* The low 1MB range is reserved for ISA cards */
+               if (start < BIOS_END)
+                       start = BIOS_END;
        }
        return start;
 }
index aabfb8380a1c6cf91e5af8aaed9dd30bd088d9de..01ed502554731cce6850f24f9da16c2030eabf19 100644 (file)
 348    i386    process_vm_writev       sys_process_vm_writev           compat_sys_process_vm_writev
 349    i386    kcmp                    sys_kcmp
 350    i386    finit_module            sys_finit_module
+# 351  i386    sched_setattr           sys_sched_setattr
+# 352  i386    sched_getattr           sys_sched_getattr
+# 353  i386    renameat2               sys_renameat2
+354    i386    seccomp                 sys_seccomp
index 63a899304d27cb5e976d74279993416a496d4818..c7b4ac76cd3744c34cdf92cbae0bb3f8457ce3a0 100644 (file)
 311    64      process_vm_writev       sys_process_vm_writev
 312    common  kcmp                    sys_kcmp
 313    common  finit_module            sys_finit_module
+# 314  common  sched_setattr           sys_sched_setattr
+# 315  common  sched_getattr           sys_sched_getattr
+# 316  common  renameat2               sys_renameat2
+317    common  seccomp                 sys_seccomp
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
index 0f134c7cfc246374273f2bc87fc8a12d4172c5ae..0faad646f5fda8eb40c64a95e685c2c919daadf6 100644 (file)
@@ -41,7 +41,6 @@ enum {
 #ifdef CONFIG_X86_64
 #define vdso_enabled                   sysctl_vsyscall32
 #define arch_setup_additional_pages    syscall32_setup_pages
-extern int sysctl_ldt16;
 #endif
 
 /*
@@ -381,13 +380,6 @@ static ctl_table abi_table2[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "ldt16",
-               .data           = &sysctl_ldt16,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {}
 };
 
index d7546c94da520625d7ae02c65c8e05e025535816..385efb23ddce6fde151ee321484908e0d6c5bced 100644 (file)
 #define VMALLOC_START          0xC0000000
 #define VMALLOC_END            0xC7FEFFFF
 #define TLBTEMP_BASE_1         0xC7FF0000
-#define TLBTEMP_BASE_2         0xC7FF8000
+#define TLBTEMP_BASE_2         (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
+#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
+#define TLBTEMP_SIZE           (2 * DCACHE_WAY_SIZE)
+#else
+#define TLBTEMP_SIZE           ICACHE_WAY_SIZE
+#endif
 
 /*
  * Xtensa Linux config PTE layout (when present):
index fd686dc45d1a95b5016de15341c6d3fe173837fe..c7211e7e182d56cd85e3ec923ee89f81748b26af 100644 (file)
  */
        .macro  get_fs  ad, sp
        GET_CURRENT(\ad,\sp)
+#if THREAD_CURRENT_DS > 1020
+       addi    \ad, \ad, TASK_THREAD
+       l32i    \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
+#else
        l32i    \ad, \ad, THREAD_CURRENT_DS
+#endif
        .endm
 
 /*
index b4cb1100c0fb01f6ca178ef5c3f0e4adc02b8f9a..a47909f0c34b4892848d06b9f55ebe4a37563d90 100644 (file)
 #define TCSETSW                0x5403
 #define TCSETSF                0x5404
 
-#define TCGETA         _IOR('t', 23, struct termio)
-#define TCSETA         _IOW('t', 24, struct termio)
-#define TCSETAW                _IOW('t', 25, struct termio)
-#define TCSETAF                _IOW('t', 28, struct termio)
+#define TCGETA         0x80127417      /* _IOR('t', 23, struct termio) */
+#define TCSETA         0x40127418      /* _IOW('t', 24, struct termio) */
+#define TCSETAW                0x40127419      /* _IOW('t', 25, struct termio) */
+#define TCSETAF                0x4012741C      /* _IOW('t', 28, struct termio) */
 
 #define TCSBRK         _IO('t', 29)
 #define TCXONC         _IO('t', 30)
 #define TCFLSH         _IO('t', 31)
 
-#define TIOCSWINSZ     _IOW('t', 103, struct winsize)
-#define TIOCGWINSZ     _IOR('t', 104, struct winsize)
+#define TIOCSWINSZ     0x40087467      /* _IOW('t', 103, struct winsize) */
+#define TIOCGWINSZ     0x80087468      /* _IOR('t', 104, struct winsize) */
 #define        TIOCSTART       _IO('t', 110)           /* start output, like ^Q */
 #define        TIOCSTOP        _IO('t', 111)           /* stop output, like ^S */
 #define TIOCOUTQ        _IOR('t', 115, int)     /* output queue size */
@@ -88,7 +88,6 @@
 #define TIOCSETD       _IOW('T', 35, int)
 #define TIOCGETD       _IOR('T', 36, int)
 #define TCSBRKP                _IOW('T', 37, int)   /* Needed for POSIX tcsendbreak()*/
-#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/
 #define TIOCSBRK       _IO('T', 39)         /* BSD compatibility */
 #define TIOCCBRK       _IO('T', 40)         /* BSD compatibility */
 #define TIOCGSID       _IOR('T', 41, pid_t) /* Return the session ID of FD*/
 #define TIOCSERGETLSR   _IOR('T', 89, unsigned int) /* Get line status reg. */
   /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
 # define TIOCSER_TEMT    0x01               /* Transmitter physically empty */
-#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config  */
-#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */
+#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config  */
+                       /* _IOR('T', 90, struct serial_multiport_struct) */
+#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */
+                       /* _IOW('T', 91, struct serial_multiport_struct) */
 
 #define TIOCMIWAIT     _IO('T', 92) /* wait for a change on serial input line(s) */
 #define TIOCGICOUNT    0x545D  /* read serial port inline interrupt counts */
index 51940fec6990f2b9a1c86fa5c43a199c3eade2f0..513effd48060743a3b6228b71b4c6703d601fca3 100644 (file)
@@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1)
 #define __NR_pivot_root                        175
 __SYSCALL(175, sys_pivot_root, 2)
 #define __NR_umount                            176
-__SYSCALL(176, sys_umount, 2)
+__SYSCALL(176, sys_oldumount, 1)
+#define __ARCH_WANT_SYS_OLDUMOUNT
 #define __NR_swapoff                           177
 __SYSCALL(177, sys_swapoff, 1)
 #define __NR_sync                              178
index aa7f9add7d773375fb2e94c075e9acf91094dda1..6e53174f85565ccad7899d4315f5de1f76248d80 100644 (file)
@@ -1121,9 +1121,8 @@ ENTRY(fast_syscall_xtensa)
        movi    a7, 4                   # sizeof(unsigned int)
        access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
 
-       addi    a6, a6, -1              # assuming SYS_XTENSA_ATOMIC_SET = 1
-       _bgeui  a6, SYS_XTENSA_COUNT - 1, .Lill
-       _bnei   a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
+       _bgeui  a6, SYS_XTENSA_COUNT, .Lill
+       _bnei   a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
 
        /* Fall through for ATOMIC_CMP_SWP. */
 
@@ -1135,27 +1134,26 @@ TRY     s32i    a5, a3, 0               # different, modify value
        l32i    a7, a2, PT_AREG7        # restore a7
        l32i    a0, a2, PT_AREG0        # restore a0
        movi    a2, 1                   # and return 1
-       addi    a6, a6, 1               # restore a6 (really necessary?)
        rfe
 
 1:     l32i    a7, a2, PT_AREG7        # restore a7
        l32i    a0, a2, PT_AREG0        # restore a0
        movi    a2, 0                   # return 0 (note that we cannot set
-       addi    a6, a6, 1               # restore a6 (really necessary?)
        rfe
 
 .Lnswp:        /* Atomic set, add, and exg_add. */
 
 TRY    l32i    a7, a3, 0               # orig
+       addi    a6, a6, -SYS_XTENSA_ATOMIC_SET
        add     a0, a4, a7              # + arg
        moveqz  a0, a4, a6              # set
+       addi    a6, a6, SYS_XTENSA_ATOMIC_SET
 TRY    s32i    a0, a3, 0               # write new value
 
        mov     a0, a2
        mov     a2, a7
        l32i    a7, a0, PT_AREG7        # restore a7
        l32i    a0, a0, PT_AREG0        # restore a0
-       addi    a6, a6, 1               # restore a6 (really necessary?)
        rfe
 
 CATCH
@@ -1164,7 +1162,7 @@ CATCH
        movi    a2, -EFAULT
        rfe
 
-.Lill: l32i    a7, a2, PT_AREG0        # restore a7
+.Lill: l32i    a7, a2, PT_AREG7        # restore a7
        l32i    a0, a2, PT_AREG0        # restore a0
        movi    a2, -EINVAL
        rfe
@@ -1703,7 +1701,7 @@ ENTRY(fast_second_level_miss)
        rsr     a0, excvaddr
        bltu    a0, a3, 2f
 
-       addi    a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
+       addi    a1, a0, -TLBTEMP_SIZE
        bgeu    a1, a3, 2f
 
        /* Check if we have to restore an ITLB mapping. */
@@ -1961,7 +1959,6 @@ ENTRY(_switch_to)
 
        entry   a1, 16
 
-       mov     a10, a2                 # preserve 'prev' (a2)
        mov     a11, a3                 # and 'next' (a3)
 
        l32i    a4, a2, TASK_THREAD_INFO
@@ -1969,8 +1966,14 @@ ENTRY(_switch_to)
 
        save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
 
-       s32i    a0, a10, THREAD_RA      # save return address
-       s32i    a1, a10, THREAD_SP      # save stack pointer
+#if THREAD_RA > 1020 || THREAD_SP > 1020
+       addi    a10, a2, TASK_THREAD
+       s32i    a0, a10, THREAD_RA - TASK_THREAD        # save return address
+       s32i    a1, a10, THREAD_SP - TASK_THREAD        # save stack pointer
+#else
+       s32i    a0, a2, THREAD_RA       # save return address
+       s32i    a1, a2, THREAD_SP       # save stack pointer
+#endif
 
        /* Disable ints while we manipulate the stack pointer. */
 
@@ -2011,7 +2014,6 @@ ENTRY(_switch_to)
        load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
 
        wsr     a14, ps
-       mov     a2, a10                 # return 'prev'
        rsync
 
        retw
index 2d9cc6dbfd78acacd5bc63dbc140e02dbe0a4dc3..e8b76b8e4b2910a17435fd971da65138b97cbfde 100644 (file)
@@ -49,9 +49,8 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
 
        /* We currently don't support coherent memory outside KSEG */
 
-       if (ret < XCHAL_KSEG_CACHED_VADDR
-           || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
-               BUG();
+       BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
+              ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
 
 
        if (ret != 0) {
@@ -68,10 +67,11 @@ EXPORT_SYMBOL(dma_alloc_coherent);
 void dma_free_coherent(struct device *hwdev, size_t size,
                         void *vaddr, dma_addr_t dma_handle)
 {
-       long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
+       unsigned long addr = (unsigned long)vaddr +
+               XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
 
-       if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
-               BUG();
+       BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
+              addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
 
        free_pages(addr, get_order(size));
 }
index 4b7bc8db170ff8ac6befadb8246ac15938b74cae..70fa7bc42b4a0853012af6f6daaeb254f9c5a086 100644 (file)
@@ -72,6 +72,8 @@ void do_page_fault(struct pt_regs *regs)
               address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
 #endif
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
index e8918ffaf96d4a0a2dacf75838b5d8a89e5e8ca3..1ff8e97f853ad719ee13994f0d5fc3fafb76a775 100644 (file)
@@ -876,6 +876,20 @@ void blkcg_drain_queue(struct request_queue *q)
 {
        lockdep_assert_held(q->queue_lock);
 
+       /*
+        * @q could be exiting and already have destroyed all blkgs as
+        * indicated by NULL root_blkg.  If so, don't confuse policies.
+        */
+       if (!q->root_blkg)
+               return;
+
+       /*
+        * @q could be exiting and already have destroyed all blkgs as
+        * indicated by NULL root_blkg.  If so, don't confuse policies.
+        */
+       if (!q->root_blkg)
+               return;
+
        blk_throtl_drain(q);
 }
 
index 53309333c2f015fff6b6a7ae8a9a01700105b3e8..ec00a0f7521206a9912c29407c2a64869a91b081 100644 (file)
@@ -553,7 +553,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                bottom = max(b->physical_block_size, b->io_min) + alignment;
 
                /* Verify that top and bottom intervals line up */
-               if (max(top, bottom) & (min(top, bottom) - 1)) {
+               if (max(top, bottom) % min(top, bottom)) {
                        t->misaligned = 1;
                        ret = -1;
                }
@@ -594,7 +594,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 
        /* Find lowest common alignment_offset */
        t->alignment_offset = lcm(t->alignment_offset, alignment)
-               & (max(t->physical_block_size, t->io_min) - 1);
+               % max(t->physical_block_size, t->io_min);
 
        /* Verify that new alignment_offset is on a logical block boundary */
        if (t->alignment_offset & (t->logical_block_size - 1)) {
index cc345e1d8d4ea0088832833ef985d6b1e764fa44..0c51b4b34f478b0d47460f954542c8ab643ba9eb 100644 (file)
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
 EXPORT_SYMBOL(blk_queue_find_tag);
 
 /**
- * __blk_free_tags - release a given set of tag maintenance info
+ * blk_free_tags - release a given set of tag maintenance info
  * @bqt:       the tag map to free
  *
- * Tries to free the specified @bqt.  Returns true if it was
- * actually freed and false if there are still references using it
+ * Drop the reference count on @bqt and frees it when the last reference
+ * is dropped.
  */
-static int __blk_free_tags(struct blk_queue_tag *bqt)
+void blk_free_tags(struct blk_queue_tag *bqt)
 {
-       int retval;
-
-       retval = atomic_dec_and_test(&bqt->refcnt);
-       if (retval) {
+       if (atomic_dec_and_test(&bqt->refcnt)) {
                BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
                                                        bqt->max_depth);
 
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
 
                kfree(bqt);
        }
-
-       return retval;
 }
+EXPORT_SYMBOL(blk_free_tags);
 
 /**
  * __blk_queue_free_tags - release tag maintenance info
@@ -69,27 +65,12 @@ void __blk_queue_free_tags(struct request_queue *q)
        if (!bqt)
                return;
 
-       __blk_free_tags(bqt);
+       blk_free_tags(bqt);
 
        q->queue_tags = NULL;
        queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 }
 
-/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt:       the tag map to free
- *
- * For externally managed @bqt frees the map.  Callers of this
- * function must guarantee to have released all the queues that
- * might have been using this tag map.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
-       if (unlikely(!__blk_free_tags(bqt)))
-               BUG();
-}
-EXPORT_SYMBOL(blk_free_tags);
-
 /**
  * blk_queue_free_tags - release tag maintenance info
  * @q:  the request queue for the device
index c410752c5c654592761248bc193d49fb38efda45..c981097dd6342e168f0bbded98d7971a19611223 100644 (file)
@@ -1275,12 +1275,16 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
 static void
 cfq_update_group_weight(struct cfq_group *cfqg)
 {
-       BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
-
        if (cfqg->new_weight) {
                cfqg->weight = cfqg->new_weight;
                cfqg->new_weight = 0;
        }
+}
+
+static void
+cfq_update_group_leaf_weight(struct cfq_group *cfqg)
+{
+       BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 
        if (cfqg->new_leaf_weight) {
                cfqg->leaf_weight = cfqg->new_leaf_weight;
@@ -1299,7 +1303,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
        /* add to the service tree */
        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 
-       cfq_update_group_weight(cfqg);
+       cfq_update_group_leaf_weight(cfqg);
        __cfq_group_service_tree_add(st, cfqg);
 
        /*
@@ -1323,6 +1327,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
         */
        while ((parent = cfqg_parent(pos))) {
                if (propagate) {
+                       cfq_update_group_weight(pos);
                        propagate = !parent->nr_active++;
                        parent->children_weight += pos->weight;
                }
index 7c668c8a6f953e1b6c298c9fc8af400924d7368a..21ad6869a5cef82b5920c1c986cad5fb99dce956 100644 (file)
@@ -689,6 +689,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case BLKROSET:
        case BLKDISCARD:
        case BLKSECDISCARD:
+       case BLKZEROOUT:
        /*
         * the ones below are implemented in blkdev_locked_ioctl,
         * but we call blkdev_ioctl, which gets the lock for us
index 6acda145311ff7bd58a9d831c4568159de7dab8f..6374a29fd783fc3a8fd80ef03093e97666e77db6 100644 (file)
@@ -28,10 +28,10 @@ struct kobject *block_depr;
 /* for extended dynamic devt allocation, currently only one major is used */
 #define NR_EXT_DEVT            (1 << MINORBITS)
 
-/* For extended devt allocation.  ext_devt_mutex prevents look up
+/* For extended devt allocation.  ext_devt_lock prevents look up
  * results from going away underneath its user.
  */
-static DEFINE_MUTEX(ext_devt_mutex);
+static DEFINE_SPINLOCK(ext_devt_lock);
 static DEFINE_IDR(ext_devt_idr);
 
 static struct device_type disk_type;
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
        }
 
        /* allocate ext devt */
-       mutex_lock(&ext_devt_mutex);
-       idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
-       mutex_unlock(&ext_devt_mutex);
+       idr_preload(GFP_KERNEL);
+
+       spin_lock(&ext_devt_lock);
+       idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
+       spin_unlock(&ext_devt_lock);
+
+       idr_preload_end();
        if (idx < 0)
                return idx == -ENOSPC ? -EBUSY : idx;
 
@@ -441,15 +445,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
  */
 void blk_free_devt(dev_t devt)
 {
-       might_sleep();
-
        if (devt == MKDEV(0, 0))
                return;
 
        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
-               mutex_lock(&ext_devt_mutex);
+               spin_lock(&ext_devt_lock);
                idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-               mutex_unlock(&ext_devt_mutex);
+               spin_unlock(&ext_devt_lock);
        }
 }
 
@@ -665,7 +667,6 @@ void del_gendisk(struct gendisk *disk)
                sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
        pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
        device_del(disk_to_dev(disk));
-       blk_free_devt(disk_to_dev(disk)->devt);
 }
 EXPORT_SYMBOL(del_gendisk);
 
@@ -690,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
        } else {
                struct hd_struct *part;
 
-               mutex_lock(&ext_devt_mutex);
+               spin_lock(&ext_devt_lock);
                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
                if (part && get_disk(part_to_disk(part))) {
                        *partno = part->partno;
                        disk = part_to_disk(part);
                }
-               mutex_unlock(&ext_devt_mutex);
+               spin_unlock(&ext_devt_lock);
        }
 
        return disk;
@@ -1098,6 +1099,7 @@ static void disk_release(struct device *dev)
 {
        struct gendisk *disk = dev_to_disk(dev);
 
+       blk_free_devt(dev->devt);
        disk_release_events(disk);
        kfree(disk->random);
        disk_replace_part_tbl(disk, NULL);
index c7942acf1379d1b21f664e9b2ad82403432cc0c7..47284e7126503ced01b47adc6bada024bf258e09 100644 (file)
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
 static void part_release(struct device *dev)
 {
        struct hd_struct *p = dev_to_part(dev);
+       blk_free_devt(dev->devt);
        free_part_stats(p);
        free_part_info(p);
        kfree(p);
@@ -264,7 +265,6 @@ void delete_partition(struct gendisk *disk, int partno)
        rcu_assign_pointer(ptbl->last_lookup, NULL);
        kobject_put(part->holder_dir);
        device_del(part_to_dev(part));
-       blk_free_devt(part_devt(part));
 
        hd_struct_put(part);
 }
index 93cb0d6d4b6067a6d18a620deee59807436121a9..0a20ababa44ba2683193df78740bdaf774a02c21 100755 (executable)
@@ -45,7 +45,7 @@ static struct rk_partition *newpart(char *s,
                /* No sense support partition less than 8B */
                if (size < ((PAGE_SIZE) >> 9))
                {
-                       printk(KERN_ERR ERRP "partition size too small (%llx)\n", size);
+                       printk(KERN_ERR ERRP "partition size too small (%llx)\n", (u64)size);
                        return NULL;
                }
        }
@@ -320,9 +320,9 @@ int rkpart_partition(struct parsed_partitions *state)
                strcpy(state->parts[i+1].info.volname, parts[i].name);
                 printk(KERN_INFO "%10s: 0x%09llx -- 0x%09llx (%llu MB)\n", 
                                parts[i].name,
-                               parts[i].from * 512,
-                               (parts[i].from + parts[i].size) * 512,
-                               parts[i].size / 2048);
+                               (u64)parts[i].from * 512,
+                               (u64)(parts[i].from + parts[i].size) * 512,
+                               (u64)parts[i].size / 2048);
        }
 
        rkpart_bootmode_fixup();
index a5ffcc988f0b00441a29f76e63e874892d121350..1b4988b4bc11e5ca5e6b224d628cc2f3e79e6c19 100644 (file)
@@ -506,7 +506,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
 
        if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
                err = DRIVER_ERROR << 24;
-               goto out;
+               goto error;
        }
 
        memset(sense, 0, sizeof(sense));
@@ -516,7 +516,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
 
        blk_execute_rq(q, disk, rq, 0);
 
-out:
        err = rq->errors & 0xff;        /* only 8 bit SCSI status */
        if (err) {
                if (rq->sense_len && rq->sense) {
index 62568b1fc885fa2f9f4ef61d61f1772339acc6c8..ffe7278d4bd83bd9b8ee5e7392af7bd417e037ae 100644 (file)
@@ -75,7 +75,7 @@ int ablk_encrypt(struct ablkcipher_request *req)
                struct ablkcipher_request *cryptd_req =
                        ablkcipher_request_ctx(req);
 
-               memcpy(cryptd_req, req, sizeof(*req));
+               *cryptd_req = *req;
                ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 
                return crypto_ablkcipher_encrypt(cryptd_req);
@@ -94,7 +94,7 @@ int ablk_decrypt(struct ablkcipher_request *req)
                struct ablkcipher_request *cryptd_req =
                        ablkcipher_request_ctx(req);
 
-               memcpy(cryptd_req, req, sizeof(*req));
+               *cryptd_req = *req;
                ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 
                return crypto_ablkcipher_decrypt(cryptd_req);
index ac33d5f3077823af1e714039e2bc093763f8c2fb..bf948e134981a6ed47da7ab71d0aff8f8de81f51 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/net.h>
 #include <linux/rwsem.h>
+#include <linux/security.h>
 
 struct alg_type_list {
        const struct af_alg_type *type;
@@ -243,6 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
 
        sock_init_data(newsock, sk2);
        sock_graft(sk2, newsock);
+       security_sk_clone(sk, sk2);
 
        err = type->accept(ask->private, sk2);
        if (err) {
index a19c027b29bde504ac8eae4b4572947a69ed2335..83187f497c7c65dddd2248170a50976e568d82e5 100644 (file)
@@ -49,7 +49,7 @@ struct skcipher_ctx {
        struct ablkcipher_request req;
 };
 
-#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
+#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
                      sizeof(struct scatterlist) - 1)
 
 static inline int skcipher_sndbuf(struct sock *sk)
index d5bfbd331bfdda124d92a195b3418ae34cfbc2af..95896886fc5ab8648bd705c157d0beaae9d32a6e 100644 (file)
@@ -254,6 +254,7 @@ struct acpi_create_field_info {
        u32 field_bit_position;
        u32 field_bit_length;
        u16 resource_length;
+       u16 pin_number_index;
        u8 field_flags;
        u8 attribute;
        u8 field_type;
index cc7ab6dd724e6234a835d0321c8cb4b6106796f4..a47cc78ffd4f0f9337f892b8b063312a1b791a69 100644 (file)
@@ -263,6 +263,7 @@ struct acpi_object_region_field {
        ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
        union acpi_operand_object *region_obj;  /* Containing op_region object */
        u8 *resource_buffer;    /* resource_template for serial regions/fields */
+       u16 pin_number_index;   /* Index relative to previous Connection/Template */
 };
 
 struct acpi_object_bank_field {
index feadeed1012dc2256c1596bb79e7ff6190e6cb41..e651d4ec7c4ccc99a052b171635de74605c3ad70 100644 (file)
@@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                         */
                        info->resource_buffer = NULL;
                        info->connection_node = NULL;
+                       info->pin_number_index = 0;
 
                        /*
                         * A Connection() is either an actual resource descriptor (buffer)
@@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                        }
 
                        info->field_bit_position += info->field_bit_length;
+                       info->pin_number_index++;       /* Index relative to previous Connection() */
                        break;
 
                default:
index 6555e350fc1fe21087cea7e219a9c4b170d70e25..8fab9262d98afc68e07c6fdf7794850b2ebd20d6 100644 (file)
@@ -141,6 +141,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
        union acpi_operand_object *region_obj2;
        void *region_context = NULL;
        struct acpi_connection_info *context;
+       acpi_physical_address address;
 
        ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
 
@@ -235,25 +236,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
        /* We have everything we need, we can invoke the address space handler */
 
        handler = handler_desc->address_space.handler;
-
-       ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-                         "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
-                         &region_obj->region.handler->address_space, handler,
-                         ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
-                                                 region_offset),
-                         acpi_ut_get_region_name(region_obj->region.
-                                                 space_id)));
+       address = (region_obj->region.address + region_offset);
 
        /*
         * Special handling for generic_serial_bus and general_purpose_io:
         * There are three extra parameters that must be passed to the
         * handler via the context:
-        *   1) Connection buffer, a resource template from Connection() op.
-        *   2) Length of the above buffer.
-        *   3) Actual access length from the access_as() op.
+        *   1) Connection buffer, a resource template from Connection() op
+        *   2) Length of the above buffer
+        *   3) Actual access length from the access_as() op
+        *
+        * In addition, for general_purpose_io, the Address and bit_width fields
+        * are defined as follows:
+        *   1) Address is the pin number index of the field (bit offset from
+        *      the previous Connection)
+        *   2) bit_width is the actual bit length of the field (number of pins)
         */
-       if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
-            (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
+       if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
            context && field_obj) {
 
                /* Get the Connection (resource_template) buffer */
@@ -262,6 +261,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
                context->length = field_obj->field.resource_length;
                context->access_length = field_obj->field.access_length;
        }
+       if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
+           context && field_obj) {
+
+               /* Get the Connection (resource_template) buffer */
+
+               context->connection = field_obj->field.resource_buffer;
+               context->length = field_obj->field.resource_length;
+               context->access_length = field_obj->field.access_length;
+               address = field_obj->field.pin_number_index;
+               bit_width = field_obj->field.bit_length;
+       }
+
+       ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+                         "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+                         &region_obj->region.handler->address_space, handler,
+                         ACPI_FORMAT_NATIVE_UINT(address),
+                         acpi_ut_get_region_name(region_obj->region.
+                                                 space_id)));
 
        if (!(handler_desc->address_space.handler_flags &
              ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
@@ -275,9 +292,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
 
        /* Call the handler */
 
-       status = handler(function,
-                        (region_obj->region.address + region_offset),
-                        bit_width, value, context,
+       status = handler(function, address, bit_width, value, context,
                         region_obj2->extra.region_context);
 
        if (ACPI_FAILURE(status)) {
index 7d4bae71e8c62da6399bc6af7e0f3fd132efa574..0108d59665abddb0edb55090b399675fcf663266 100644 (file)
@@ -178,6 +178,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
                buffer = &buffer_desc->integer.value;
        }
 
+       if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
+           (obj_desc->field.region_obj->region.space_id ==
+            ACPI_ADR_SPACE_GPIO)) {
+               /*
+                * For GPIO (general_purpose_io), the Address will be the bit offset
+                * from the previous Connection() operator, making it effectively a
+                * pin number index. The bit_length is the length of the field, which
+                * is thus the number of pins.
+                */
+               ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+                                 "GPIO FieldRead [FROM]:  Pin %u Bits %u\n",
+                                 obj_desc->field.pin_number_index,
+                                 obj_desc->field.bit_length));
+
+               /* Lock entire transaction if requested */
+
+               acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+               /* Perform the write */
+
+               status = acpi_ex_access_region(obj_desc, 0,
+                                              (u64 *)buffer, ACPI_READ);
+               acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+               if (ACPI_FAILURE(status)) {
+                       acpi_ut_remove_reference(buffer_desc);
+               } else {
+                       *ret_buffer_desc = buffer_desc;
+               }
+               return_ACPI_STATUS(status);
+       }
+
        ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
                          "FieldRead [TO]:   Obj %p, Type %X, Buf %p, ByteLen %X\n",
                          obj_desc, obj_desc->common.type, buffer,
@@ -325,6 +356,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 
                *result_desc = buffer_desc;
                return_ACPI_STATUS(status);
+       } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
+                  (obj_desc->field.region_obj->region.space_id ==
+                   ACPI_ADR_SPACE_GPIO)) {
+               /*
+                * For GPIO (general_purpose_io), we will bypass the entire field
+                * mechanism and handoff the bit address and bit width directly to
+                * the handler. The Address will be the bit offset
+                * from the previous Connection() operator, making it effectively a
+                * pin number index. The bit_length is the length of the field, which
+                * is thus the number of pins.
+                */
+               if (source_desc->common.type != ACPI_TYPE_INTEGER) {
+                       return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+               }
+
+               ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+                                 "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X  [TO]:  Pin %u Bits %u\n",
+                                 acpi_ut_get_type_name(source_desc->common.
+                                                       type),
+                                 source_desc->common.type,
+                                 (u32)source_desc->integer.value,
+                                 obj_desc->field.pin_number_index,
+                                 obj_desc->field.bit_length));
+
+               buffer = &source_desc->integer.value;
+
+               /* Lock entire transaction if requested */
+
+               acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+               /* Perform the write */
+
+               status = acpi_ex_access_region(obj_desc, 0,
+                                              (u64 *)buffer, ACPI_WRITE);
+               acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+               return_ACPI_STATUS(status);
        }
 
        /* Get a pointer to the data to be written */
index 6b728aef2dcab54804d11509e23660b06e11d21e..df212fe4cf6c1374fc777073879aa1df3749c166 100644 (file)
@@ -479,6 +479,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
                        obj_desc->field.resource_length = info->resource_length;
                }
 
+               obj_desc->field.pin_number_index = info->pin_number_index;
+
                /* Allow full data read from EC address space */
 
                if ((obj_desc->field.region_obj->region.space_id ==
index e4c9291fc0a3f530bfaea472895f6217a23f8b99..a63a4cdd2ce8389de96aeb2011d9d436f1a23313 100644 (file)
@@ -998,5 +998,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
                status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
        }
 
+       /* Delete the allocated object if copy failed */
+
+       if (ACPI_FAILURE(status)) {
+               acpi_ut_remove_reference(*dest_desc);
+       }
+
        return_ACPI_STATUS(status);
 }
index 4056d3175178d040edd14bccc04bac2d2ce5d5e3..a88894190e419eb697a3546170ca0652bb8f8db3 100644 (file)
@@ -1101,9 +1101,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 
        if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
 
-               cpuidle_pause_and_lock();
                /* Protect against cpu-hotplug */
                get_online_cpus();
+               cpuidle_pause_and_lock();
 
                /* Disable all cpuidle devices */
                for_each_online_cpu(cpu) {
@@ -1130,8 +1130,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
                                cpuidle_enable_device(dev);
                        }
                }
-               put_online_cpus();
                cpuidle_resume_and_unlock();
+               put_online_cpus();
        }
 
        return 0;
index cca761e80d898c538c31ec21887c97d72f4795e2..091682fb1617085ced673f304d0a4431e4b9f499 100644 (file)
@@ -769,12 +769,17 @@ static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
        device->driver->ops.notify(device, event);
 }
 
-static acpi_status acpi_device_notify_fixed(void *data)
+static void acpi_device_notify_fixed(void *data)
 {
        struct acpi_device *device = data;
 
        /* Fixed hardware devices have no handles */
        acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
+}
+
+static acpi_status acpi_device_fixed_event(void *data)
+{
+       acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
        return AE_OK;
 }
 
@@ -785,12 +790,12 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
        if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
                status =
                    acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
-                                                    acpi_device_notify_fixed,
+                                                    acpi_device_fixed_event,
                                                     device);
        else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
                status =
                    acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
-                                                    acpi_device_notify_fixed,
+                                                    acpi_device_fixed_event,
                                                     device);
        else
                status = acpi_install_notify_handler(device->handle,
@@ -807,10 +812,10 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
 {
        if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
                acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
-                                               acpi_device_notify_fixed);
+                                               acpi_device_fixed_event);
        else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
                acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
-                                               acpi_device_notify_fixed);
+                                               acpi_device_fixed_event);
        else
                acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
                                           acpi_device_notify);
index b0d33d9533aaa7f506658654576b2dbb987318e5..64150a9ffff3fb54668ca7f12a00ff12d2a5777f 100644 (file)
@@ -61,6 +61,7 @@ enum board_ids {
        /* board IDs by feature in alphabetical order */
        board_ahci,
        board_ahci_ign_iferr,
+       board_ahci_nomsi,
        board_ahci_noncq,
        board_ahci_nosntf,
        board_ahci_yes_fbs,
@@ -120,6 +121,13 @@ static const struct ata_port_info ahci_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &ahci_ops,
        },
+       [board_ahci_nomsi] = {
+               AHCI_HFLAGS     (AHCI_HFLAG_NO_MSI),
+               .flags          = AHCI_FLAG_COMMON,
+               .pio_mask       = ATA_PIO4,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_ops,
+       },
        [board_ahci_noncq] = {
                AHCI_HFLAGS     (AHCI_HFLAG_NO_NCQ),
                .flags          = AHCI_FLAG_COMMON,
@@ -304,6 +312,19 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
+       { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+       { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+       { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -441,6 +462,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9182 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 on some Gigabyte */
@@ -455,6 +478,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 
        /* Promise */
        { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
+       { PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
 
        /* Asmedia */
        { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },   /* ASM1060 */
@@ -463,10 +487,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1062 */
 
        /*
-        * Samsung SSDs found on some macbooks.  NCQ times out.
-        * https://bugzilla.kernel.org/show_bug.cgi?id=60731
+        * Samsung SSDs found on some macbooks.  NCQ times out if MSI is
+        * enabled.  https://bugzilla.kernel.org/show_bug.cgi?id=60731
         */
-       { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
+       { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
 
        /* Enmotus */
        { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
index b92913a528b61d418c28379b07f46567f3067514..82aa7b550ea5111af21446774f20d12fece22bc1 100644 (file)
@@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
        /* SATA Controller IDE (Coleto Creek) */
        { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
 
        { }     /* terminate list */
 };
index 1a17b45eb683b6c6fa49ec16eb3098dea19831de..4ce78e760ef59bf9ff0f713a571baf297d1ad6d1 100644 (file)
@@ -4758,6 +4758,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
  *     ata_qc_new - Request an available ATA command, for queueing
  *     @ap: target port
  *
+ *     Some ATA host controllers may implement a queue depth which is less
+ *     than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
+ *     the hardware limitation.
+ *
  *     LOCKING:
  *     None.
  */
@@ -4765,14 +4769,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
 {
        struct ata_queued_cmd *qc = NULL;
+       unsigned int max_queue = ap->host->n_tags;
        unsigned int i, tag;
 
        /* no command while frozen */
        if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
                return NULL;
 
-       for (i = 0; i < ATA_MAX_QUEUE; i++) {
-               tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+       for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
+               tag = tag < max_queue ? tag : 0;
 
                /* the last tag is reserved for internal command. */
                if (tag == ATA_TAG_INTERNAL)
@@ -6073,6 +6078,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
 {
        spin_lock_init(&host->lock);
        mutex_init(&host->eh_mutex);
+       host->n_tags = ATA_MAX_QUEUE - 1;
        host->dev = dev;
        host->ops = ops;
 }
@@ -6154,6 +6160,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 {
        int i, rc;
 
+       host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
+
        /* host must have been started */
        if (!(host->flags & ATA_HOST_STARTED)) {
                dev_err(host->dev, "BUG: trying to register unstarted host\n");
index b603720b877dd0344478ddecf234bb14f77975fa..37acda6fa7e4b4ad192767985692e31118968d24 100644 (file)
@@ -2008,13 +2008,15 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
 
        DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
 
-       /* software reset.  causes dev0 to be selected */
-       iowrite8(ap->ctl, ioaddr->ctl_addr);
-       udelay(20);     /* FIXME: flush */
-       iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
-       udelay(20);     /* FIXME: flush */
-       iowrite8(ap->ctl, ioaddr->ctl_addr);
-       ap->last_ctl = ap->ctl;
+       if (ap->ioaddr.ctl_addr) {
+               /* software reset.  causes dev0 to be selected */
+               iowrite8(ap->ctl, ioaddr->ctl_addr);
+               udelay(20);     /* FIXME: flush */
+               iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+               udelay(20);     /* FIXME: flush */
+               iowrite8(ap->ctl, ioaddr->ctl_addr);
+               ap->last_ctl = ap->ctl;
+       }
 
        /* wait the port to become ready */
        return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
@@ -2215,10 +2217,6 @@ void ata_sff_error_handler(struct ata_port *ap)
 
        spin_unlock_irqrestore(ap->lock, flags);
 
-       /* ignore ata_sff_softreset if ctl isn't accessible */
-       if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
-               softreset = NULL;
-
        /* ignore built-in hardresets if SCR access is not available */
        if ((hardreset == sata_std_hardreset ||
             hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
index f35f15f4d83e30288abf6506e9802c66f969ace7..f7badaa39eb612422a4cc51b69f8c902e62fb696 100644 (file)
@@ -586,7 +586,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
  *     Note: Original code is ata_bus_softreset().
  */
 
-static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
                                       unsigned long deadline)
 {
        struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -600,9 +600,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
        udelay(20);
        out_be32(ioaddr->ctl_addr, ap->ctl);
 
-       scc_wait_after_reset(&ap->link, devmask, deadline);
-
-       return 0;
+       return scc_wait_after_reset(&ap->link, devmask, deadline);
 }
 
 /**
@@ -619,7 +617,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
 {
        struct ata_port *ap = link->ap;
        unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
-       unsigned int devmask = 0, err_mask;
+       unsigned int devmask = 0;
+       int rc;
        u8 err;
 
        DPRINTK("ENTER\n");
@@ -635,9 +634,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
 
        /* issue bus reset */
        DPRINTK("about to softreset, devmask=%x\n", devmask);
-       err_mask = scc_bus_softreset(ap, devmask, deadline);
-       if (err_mask) {
-               ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
+       rc = scc_bus_softreset(ap, devmask, deadline);
+       if (rc) {
+               ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
                return -EIO;
        }
 
index f3febbce6c462d2c7d432533630bcc17a5cd86d4..34c91ac3a814ee814b0a3b6358da713d131650b6 100644 (file)
@@ -252,12 +252,18 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
        pci_write_config_byte(pdev, 0x54, ultra_cfg);
 }
 
-static struct scsi_host_template serverworks_sht = {
+static struct scsi_host_template serverworks_osb4_sht = {
+       ATA_BMDMA_SHT(DRV_NAME),
+       .sg_tablesize   = LIBATA_DUMB_MAX_PRD,
+};
+
+static struct scsi_host_template serverworks_csb_sht = {
        ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations serverworks_osb4_port_ops = {
        .inherits       = &ata_bmdma_port_ops,
+       .qc_prep        = ata_bmdma_dumb_qc_prep,
        .cable_detect   = serverworks_cable_detect,
        .mode_filter    = serverworks_osb4_filter,
        .set_piomode    = serverworks_set_piomode,
@@ -266,6 +272,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
 
 static struct ata_port_operations serverworks_csb_port_ops = {
        .inherits       = &serverworks_osb4_port_ops,
+       .qc_prep        = ata_bmdma_qc_prep,
        .mode_filter    = serverworks_csb_filter,
 };
 
@@ -405,6 +412,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
                }
        };
        const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
+       struct scsi_host_template *sht = &serverworks_csb_sht;
        int rc;
 
        rc = pcim_enable_device(pdev);
@@ -418,6 +426,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
                /* Select non UDMA capable OSB4 if we can't do fixups */
                if (rc < 0)
                        ppi[0] = &info[1];
+               sht = &serverworks_osb4_sht;
        }
        /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
        else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
@@ -434,7 +443,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
                        ppi[1] = &ata_dummy_port_info;
        }
 
-       return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
+       return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
 }
 
 #ifdef CONFIG_PM
index ca4bcb8b3938605322bcdfc8e81163dcf31a0a96..2a19097a7cb19c87ae8a2d0015a41e3edd7b9571 100644 (file)
@@ -765,12 +765,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
        return &dir->kobj;
 }
 
+static DEFINE_MUTEX(gdp_mutex);
 
 static struct kobject *get_device_parent(struct device *dev,
                                         struct device *parent)
 {
        if (dev->class) {
-               static DEFINE_MUTEX(gdp_mutex);
                struct kobject *kobj = NULL;
                struct kobject *parent_kobj;
                struct kobject *k;
@@ -834,7 +834,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
            glue_dir->kset != &dev->class->p->glue_dirs)
                return;
 
+       mutex_lock(&gdp_mutex);
        kobject_put(glue_dir);
+       mutex_unlock(&gdp_mutex);
 }
 
 static void cleanup_device_parent(struct device *dev)
index 607efe6b7dc84609b50d3fc0f3b1694a420dfbdd..ac292eee83d6b738184dd53511e4accff6035d1d 100644 (file)
@@ -319,7 +319,7 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
        cpu->dev.id = num;
        cpu->dev.bus = &cpu_subsys;
        cpu->dev.release = cpu_device_release;
-#ifdef CONFIG_HAVE_CPU_AUTOPROBE
+#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
        cpu->dev.bus->uevent = cpu_uevent;
 #endif
        error = device_register(&cpu->dev);
index 01e21037d8feb5c04e6aedee608e9cdedc0d87bd..9cf8dc7448e257446a54c0b4d904b37ae4321463 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pm.h>
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
+#include <linux/reboot.h>
 
 #include <generated/utsrelease.h>
 
@@ -130,6 +131,7 @@ struct firmware_buf {
        struct page **pages;
        int nr_pages;
        int page_array_size;
+       struct list_head pending_list;
 #endif
        char fw_id[];
 };
@@ -171,6 +173,9 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
        strcpy(buf->fw_id, fw_name);
        buf->fwc = fwc;
        init_completion(&buf->completion);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+       INIT_LIST_HEAD(&buf->pending_list);
+#endif
 
        pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
 
@@ -446,10 +451,8 @@ static struct firmware_priv *to_firmware_priv(struct device *dev)
        return container_of(dev, struct firmware_priv, dev);
 }
 
-static void fw_load_abort(struct firmware_priv *fw_priv)
+static void __fw_load_abort(struct firmware_buf *buf)
 {
-       struct firmware_buf *buf = fw_priv->buf;
-
        /*
         * There is a small window in which user can write to 'loading'
         * between loading done and disappearance of 'loading'
@@ -457,8 +460,16 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
        if (test_bit(FW_STATUS_DONE, &buf->status))
                return;
 
+       list_del_init(&buf->pending_list);
        set_bit(FW_STATUS_ABORT, &buf->status);
        complete_all(&buf->completion);
+}
+
+static void fw_load_abort(struct firmware_priv *fw_priv)
+{
+       struct firmware_buf *buf = fw_priv->buf;
+
+       __fw_load_abort(buf);
 
        /* avoid user action after loading abort */
        fw_priv->buf = NULL;
@@ -467,6 +478,25 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
 #define is_fw_load_aborted(buf)        \
        test_bit(FW_STATUS_ABORT, &(buf)->status)
 
+static LIST_HEAD(pending_fw_head);
+
+/* reboot notifier for avoid deadlock with usermode_lock */
+static int fw_shutdown_notify(struct notifier_block *unused1,
+                             unsigned long unused2, void *unused3)
+{
+       mutex_lock(&fw_lock);
+       while (!list_empty(&pending_fw_head))
+               __fw_load_abort(list_first_entry(&pending_fw_head,
+                                              struct firmware_buf,
+                                              pending_list));
+       mutex_unlock(&fw_lock);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block fw_shutdown_nb = {
+       .notifier_call = fw_shutdown_notify,
+};
+
 static ssize_t firmware_timeout_show(struct class *class,
                                     struct class_attribute *attr,
                                     char *buf)
@@ -619,6 +649,7 @@ static ssize_t firmware_loading_store(struct device *dev,
                         * is completed.
                         * */
                        fw_map_pages_buf(fw_buf);
+                       list_del_init(&fw_buf->pending_list);
                        complete_all(&fw_buf->completion);
                        break;
                }
@@ -853,8 +884,15 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
                goto err_del_dev;
        }
 
+       mutex_lock(&fw_lock);
+       list_add(&buf->pending_list, &pending_fw_head);
+       mutex_unlock(&fw_lock);
+
        retval = device_create_file(f_dev, &dev_attr_loading);
        if (retval) {
+               mutex_lock(&fw_lock);
+               list_del_init(&buf->pending_list);
+               mutex_unlock(&fw_lock);
                dev_err(f_dev, "%s: device_create_file failed\n", __func__);
                goto err_del_bin_attr;
        }
@@ -1021,6 +1059,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
        if (!firmware_p)
                return -EINVAL;
 
+       if (!name || name[0] == '\0')
+               return -EINVAL;
+
        ret = _request_firmware_prepare(&fw, name, device);
        if (ret <= 0) /* error or already assigned */
                goto out;
@@ -1526,6 +1567,7 @@ static int __init firmware_class_init(void)
 {
        fw_cache_init();
 #ifdef CONFIG_FW_LOADER_USER_HELPER
+       register_reboot_notifier(&fw_shutdown_nb);
        return class_register(&firmware_class);
 #else
        return 0;
@@ -1539,6 +1581,7 @@ static void __exit firmware_class_exit(void)
        unregister_pm_notifier(&fw_cache.pm_notify);
 #endif
 #ifdef CONFIG_FW_LOADER_USER_HELPER
+       unregister_reboot_notifier(&fw_shutdown_nb);
        class_unregister(&firmware_class);
 #endif
 }
index a42c3548bdd34bcfe4d3e47390c438333c7d35cc..2c41a74d0eda09e2b087db0ecbd0b49b68128fa2 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/cpufreq.h>
 #include <linux/cpuidle.h>
 #include <linux/timer.h>
+#include <linux/wakeup_reason.h>
 
 #include "../base.h"
 #include "power.h"
@@ -942,6 +943,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
 static int dpm_suspend_noirq(pm_message_t state)
 {
        ktime_t starttime = ktime_get();
+       char suspend_abort[MAX_SUSPEND_ABORT_LEN];
        int error = 0;
 
        cpuidle_pause();
@@ -969,6 +971,9 @@ static int dpm_suspend_noirq(pm_message_t state)
                put_device(dev);
 
                if (pm_wakeup_pending()) {
+                       pm_get_active_wakeup_sources(suspend_abort,
+                               MAX_SUSPEND_ABORT_LEN);
+                       log_suspend_abort_reason(suspend_abort);
                        error = -EBUSY;
                        break;
                }
@@ -1027,6 +1032,7 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
 static int dpm_suspend_late(pm_message_t state)
 {
        ktime_t starttime = ktime_get();
+       char suspend_abort[MAX_SUSPEND_ABORT_LEN];
        int error = 0;
 
        mutex_lock(&dpm_list_mtx);
@@ -1052,6 +1058,9 @@ static int dpm_suspend_late(pm_message_t state)
                put_device(dev);
 
                if (pm_wakeup_pending()) {
+                       pm_get_active_wakeup_sources(suspend_abort,
+                               MAX_SUSPEND_ABORT_LEN);
+                       log_suspend_abort_reason(suspend_abort);
                        error = -EBUSY;
                        break;
                }
@@ -1119,6 +1128,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        char *info = NULL;
        int error = 0;
        struct dpm_watchdog wd;
+       char suspend_abort[MAX_SUSPEND_ABORT_LEN];
 
        dpm_wait_for_children(dev, async);
 
@@ -1135,6 +1145,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
                pm_wakeup_event(dev, 0);
 
        if (pm_wakeup_pending()) {
+               pm_get_active_wakeup_sources(suspend_abort,
+                       MAX_SUSPEND_ABORT_LEN);
+               log_suspend_abort_reason(suspend_abort);
                async_error = -EBUSY;
                goto Complete;
        }
index 79715e7fa43e34c9cbf9a4521a657d7bb952f9c6..bea700736f2476f8de18f943ec1090b82d11bd37 100644 (file)
@@ -659,6 +659,22 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
 }
 EXPORT_SYMBOL_GPL(pm_wakeup_event);
 
+void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
+{
+       struct wakeup_source *ws;
+       int len = 0;
+       rcu_read_lock();
+       len += snprintf(pending_wakeup_source, max, "Pending Wakeup Sources: ");
+       list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+               if (ws->active) {
+                       len += snprintf(pending_wakeup_source + len, max,
+                               "%s ", ws->name);
+               }
+       }
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
+
 static void print_active_wakeup_sources(void)
 {
        struct wakeup_source *ws;
index 975719bc345008a4d396c72b5b1e160c50b8d2a8..b41994fd846034e565ec6c9b384e2938419bda04 100644 (file)
@@ -460,16 +460,20 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
 {
        struct rb_node *next;
        struct regmap_range_node *range_node;
+       const char *devname = "dummy";
 
        INIT_LIST_HEAD(&map->debugfs_off_cache);
        mutex_init(&map->cache_lock);
 
+       if (map->dev)
+               devname = dev_name(map->dev);
+
        if (name) {
                map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
-                                             dev_name(map->dev), name);
+                                             devname, name);
                name = map->debugfs_name;
        } else {
-               name = dev_name(map->dev);
+               name = devname;
        }
 
        map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
index d0c81d1f409c07bf2bb4eac3f34469a7b9626dd9..6a66f0b7d3d46602d75d25b4fcdc5edba27682d5 100644 (file)
@@ -114,7 +114,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
 
 bool regmap_volatile(struct regmap *map, unsigned int reg)
 {
-       if (!regmap_readable(map, reg))
+       if (!map->format.format_write && !regmap_readable(map, reg))
                return false;
 
        if (map->volatile_reg)
@@ -1177,7 +1177,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
        }
 
 #ifdef LOG_DEVICE
-       if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+       if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
                dev_info(map->dev, "%x <= %x\n", reg, val);
 #endif
 
@@ -1437,7 +1437,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
        ret = map->reg_read(context, reg, val);
        if (ret == 0) {
 #ifdef LOG_DEVICE
-               if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+               if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
                        dev_info(map->dev, "%x => %x\n", reg, *val);
 #endif
 
index e8d11b6630eeb6ed7b815ffe2e21588965882587..0ab546558c4e2e125ba5ab3c1a294e2bd3b3e144 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mutex.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/wakeup_reason.h>
 
 static LIST_HEAD(syscore_ops_list);
 static DEFINE_MUTEX(syscore_ops_lock);
@@ -73,6 +74,8 @@ int syscore_suspend(void)
        return 0;
 
  err_out:
+       log_suspend_abort_reason("System core suspend callback %pF failed",
+               ops->suspend);
        pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
 
        list_for_each_entry_continue(ops, &syscore_ops_list, node)
index 89c497c630b4ee60839571aabf59c9ad553fb229..04a14e0f887830440b307be70075b38655b4a245 100644 (file)
@@ -79,6 +79,7 @@ bool
 drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
 {
        struct rb_node **new = &root->rb_node, *parent = NULL;
+       sector_t this_end = this->sector + (this->size >> 9);
 
        BUG_ON(!IS_ALIGNED(this->size, 512));
 
@@ -87,6 +88,8 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
                        rb_entry(*new, struct drbd_interval, rb);
 
                parent = *new;
+               if (here->end < this_end)
+                       here->end = this_end;
                if (this->sector < here->sector)
                        new = &(*new)->rb_left;
                else if (this->sector > here->sector)
@@ -99,6 +102,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
                        return false;
        }
 
+       this->end = this_end;
        rb_link_node(&this->rb, parent, new);
        rb_insert_augmented(&this->rb, root, &augment_callbacks);
        return true;
index 9e3f441e7e8441e83d61273ed0f34f1c309c518b..9c37f3d896a24805793542642dd4d4fbdf5ef6ca 100644 (file)
@@ -514,6 +514,12 @@ void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
        struct task_struct *opa;
 
        kref_get(&tconn->kref);
+       /* We may just have force_sig()'ed this thread
+        * to get it out of some blocking network function.
+        * Clear signals; otherwise kthread_run(), which internally uses
+        * wait_on_completion_killable(), will mistake our pending signal
+        * for a new fatal signal and fail. */
+       flush_signals(current);
        opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
        if (IS_ERR(opa)) {
                conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
index 07caf44d57550ca1e195ec98da6d8ab90b33b9b3..9951e66b85028a450e63ba092f9da428d2cb3165 100644 (file)
@@ -3227,7 +3227,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
        page_count = (u32) calc_pages_for(offset, length);
        pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
        if (IS_ERR(pages))
-               ret = PTR_ERR(pages);
+               return PTR_ERR(pages);
 
        ret = -ENOMEM;
        obj_request = rbd_obj_request_create(object_name, offset, length,
index 5814deb6963d52a875708e78a4a3a38eb148145e..0ebadf93b6c5610cb0d7e46edcbdfa040719c199 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/blkdev.h>
 #include <linux/hdreg.h>
 #include <linux/genhd.h>
+#include <linux/cdrom.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/completion.h>
@@ -22,8 +23,8 @@
 
 #define DRV_MODULE_NAME                "sunvdc"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "1.0"
-#define DRV_MODULE_RELDATE     "June 25, 2007"
+#define DRV_MODULE_VERSION     "1.1"
+#define DRV_MODULE_RELDATE     "February 13, 2013"
 
 static char version[] =
        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -32,7 +33,7 @@ MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 
-#define VDC_TX_RING_SIZE       256
+#define VDC_TX_RING_SIZE       512
 
 #define WAITING_FOR_LINK_UP    0x01
 #define WAITING_FOR_TX_SPACE   0x02
@@ -65,11 +66,9 @@ struct vdc_port {
        u64                     operations;
        u32                     vdisk_size;
        u8                      vdisk_type;
+       u8                      vdisk_mtype;
 
        char                    disk_name[32];
-
-       struct vio_disk_geom    geom;
-       struct vio_disk_vtoc    label;
 };
 
 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
@@ -79,9 +78,16 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
 
 /* Ordered from largest major to lowest */
 static struct vio_version vdc_versions[] = {
+       { .major = 1, .minor = 1 },
        { .major = 1, .minor = 0 },
 };
 
+static inline int vdc_version_supported(struct vdc_port *port,
+                                       u16 major, u16 minor)
+{
+       return port->vio.ver.major == major && port->vio.ver.minor >= minor;
+}
+
 #define VDCBLK_NAME    "vdisk"
 static int vdc_major;
 #define PARTITION_SHIFT        3
@@ -94,18 +100,54 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
 static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 {
        struct gendisk *disk = bdev->bd_disk;
-       struct vdc_port *port = disk->private_data;
+       sector_t nsect = get_capacity(disk);
+       sector_t cylinders = nsect;
 
-       geo->heads = (u8) port->geom.num_hd;
-       geo->sectors = (u8) port->geom.num_sec;
-       geo->cylinders = port->geom.num_cyl;
+       geo->heads = 0xff;
+       geo->sectors = 0x3f;
+       sector_div(cylinders, geo->heads * geo->sectors);
+       geo->cylinders = cylinders;
+       if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
+               geo->cylinders = 0xffff;
 
        return 0;
 }
 
+/* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
+ * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
+ * Needed to be able to install inside an ldom from an iso image.
+ */
+static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
+                    unsigned command, unsigned long argument)
+{
+       int i;
+       struct gendisk *disk;
+
+       switch (command) {
+       case CDROMMULTISESSION:
+               pr_debug(PFX "Multisession CDs not supported\n");
+               for (i = 0; i < sizeof(struct cdrom_multisession); i++)
+                       if (put_user(0, (char __user *)(argument + i)))
+                               return -EFAULT;
+               return 0;
+
+       case CDROM_GET_CAPABILITY:
+               disk = bdev->bd_disk;
+
+               if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
+                       return 0;
+               return -EINVAL;
+
+       default:
+               pr_debug(PFX "ioctl %08x not supported\n", command);
+               return -EINVAL;
+       }
+}
+
 static const struct block_device_operations vdc_fops = {
        .owner          = THIS_MODULE,
        .getgeo         = vdc_getgeo,
+       .ioctl          = vdc_ioctl,
 };
 
 static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
@@ -165,9 +207,9 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
        struct vio_disk_attr_info *pkt = arg;
 
        viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
-              "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
+              "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
               pkt->tag.stype, pkt->operations,
-              pkt->vdisk_size, pkt->vdisk_type,
+              pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
               pkt->xfer_mode, pkt->vdisk_block_size,
               pkt->max_xfer_size);
 
@@ -192,8 +234,11 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
                }
 
                port->operations = pkt->operations;
-               port->vdisk_size = pkt->vdisk_size;
                port->vdisk_type = pkt->vdisk_type;
+               if (vdc_version_supported(port, 1, 1)) {
+                       port->vdisk_size = pkt->vdisk_size;
+                       port->vdisk_mtype = pkt->vdisk_mtype;
+               }
                if (pkt->max_xfer_size < port->max_xfer_size)
                        port->max_xfer_size = pkt->max_xfer_size;
                port->vdisk_block_size = pkt->vdisk_block_size;
@@ -236,7 +281,9 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
 
        __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
 
-       if (blk_queue_stopped(port->disk->queue))
+       /* restart blk queue when ring is half emptied */
+       if (blk_queue_stopped(port->disk->queue) &&
+           vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
                blk_start_queue(port->disk->queue);
 }
 
@@ -388,12 +435,6 @@ static int __send_request(struct request *req)
        for (i = 0; i < nsg; i++)
                len += sg[i].length;
 
-       if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
-               blk_stop_queue(port->disk->queue);
-               err = -ENOMEM;
-               goto out;
-       }
-
        desc = vio_dring_cur(dr);
 
        err = ldc_map_sg(port->vio.lp, sg, nsg,
@@ -433,21 +474,32 @@ static int __send_request(struct request *req)
                port->req_id++;
                dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
        }
-out:
 
        return err;
 }
 
-static void do_vdc_request(struct request_queue *q)
+static void do_vdc_request(struct request_queue *rq)
 {
-       while (1) {
-               struct request *req = blk_fetch_request(q);
+       struct request *req;
 
-               if (!req)
-                       break;
+       while ((req = blk_peek_request(rq)) != NULL) {
+               struct vdc_port *port;
+               struct vio_dring_state *dr;
 
-               if (__send_request(req) < 0)
-                       __blk_end_request_all(req, -EIO);
+               port = req->rq_disk->private_data;
+               dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+               if (unlikely(vdc_tx_dring_avail(dr) < 1))
+                       goto wait;
+
+               blk_start_request(req);
+
+               if (__send_request(req) < 0) {
+                       blk_requeue_request(rq, req);
+wait:
+                       /* Avoid pointless unplugs. */
+                       blk_stop_queue(rq);
+                       break;
+               }
        }
 }
 
@@ -656,25 +708,27 @@ static int probe_disk(struct vdc_port *port)
        if (comp.err)
                return comp.err;
 
-       err = generic_request(port, VD_OP_GET_VTOC,
-                             &port->label, sizeof(port->label));
-       if (err < 0) {
-               printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
-               return err;
-       }
-
-       err = generic_request(port, VD_OP_GET_DISKGEOM,
-                             &port->geom, sizeof(port->geom));
-       if (err < 0) {
-               printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
-                      "error %d\n", err);
-               return err;
+       if (vdc_version_supported(port, 1, 1)) {
+               /* vdisk_size should be set during the handshake, if it wasn't
+                * then the underlying disk is reserved by another system
+                */
+               if (port->vdisk_size == -1)
+                       return -ENODEV;
+       } else {
+               struct vio_disk_geom geom;
+
+               err = generic_request(port, VD_OP_GET_DISKGEOM,
+                                     &geom, sizeof(geom));
+               if (err < 0) {
+                       printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
+                              "error %d\n", err);
+                       return err;
+               }
+               port->vdisk_size = ((u64)geom.num_cyl *
+                                   (u64)geom.num_hd *
+                                   (u64)geom.num_sec);
        }
 
-       port->vdisk_size = ((u64)port->geom.num_cyl *
-                           (u64)port->geom.num_hd *
-                           (u64)port->geom.num_sec);
-
        q = blk_init_queue(do_vdc_request, &port->vio.lock);
        if (!q) {
                printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
@@ -691,6 +745,10 @@ static int probe_disk(struct vdc_port *port)
 
        port->disk = g;
 
+       /* Each segment in a request is up to an aligned page in size. */
+       blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+       blk_queue_max_segment_size(q, PAGE_SIZE);
+
        blk_queue_max_segments(q, port->ring_cookies);
        blk_queue_max_hw_sectors(q, port->max_xfer_size);
        g->major = vdc_major;
@@ -704,9 +762,32 @@ static int probe_disk(struct vdc_port *port)
 
        set_capacity(g, port->vdisk_size);
 
-       printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n",
+       if (vdc_version_supported(port, 1, 1)) {
+               switch (port->vdisk_mtype) {
+               case VD_MEDIA_TYPE_CD:
+                       pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
+                       g->flags |= GENHD_FL_CD;
+                       g->flags |= GENHD_FL_REMOVABLE;
+                       set_disk_ro(g, 1);
+                       break;
+
+               case VD_MEDIA_TYPE_DVD:
+                       pr_info(PFX "Virtual DVD %s\n", port->disk_name);
+                       g->flags |= GENHD_FL_CD;
+                       g->flags |= GENHD_FL_REMOVABLE;
+                       set_disk_ro(g, 1);
+                       break;
+
+               case VD_MEDIA_TYPE_FIXED:
+                       pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
+                       break;
+               }
+       }
+
+       pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
               g->disk_name,
-              port->vdisk_size, (port->vdisk_size >> (20 - 9)));
+              port->vdisk_size, (port->vdisk_size >> (20 - 9)),
+              port->vio.ver.major, port->vio.ver.minor);
 
        add_disk(g);
 
@@ -765,6 +846,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        else
                snprintf(port->disk_name, sizeof(port->disk_name),
                         VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
+       port->vdisk_size = -1;
 
        err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
                              vdc_versions, ARRAY_SIZE(vdc_versions),
index 45aa8e7601247f2b2b7ff8fe29dbeae1cc0bc632..61a8ec4e5f4d7045bd280a86b2989fc0db9b11bc 100644 (file)
@@ -302,6 +302,9 @@ static void btusb_intr_complete(struct urb *urb)
                        BT_ERR("%s corrupted event packet", hdev->name);
                        hdev->stat.err_rx++;
                }
+       } else if (urb->status == -ENOENT) {
+               /* Avoid suspend failed when usb_kill_urb */
+               return;
        }
 
        if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
@@ -390,6 +393,9 @@ static void btusb_bulk_complete(struct urb *urb)
                        BT_ERR("%s corrupted ACL packet", hdev->name);
                        hdev->stat.err_rx++;
                }
+       } else if (urb->status == -ENOENT) {
+               /* Avoid suspend failed when usb_kill_urb */
+               return;
        }
 
        if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
@@ -484,6 +490,9 @@ static void btusb_isoc_complete(struct urb *urb)
                                hdev->stat.err_rx++;
                        }
                }
+       } else if (urb->status == -ENOENT) {
+               /* Avoid suspend failed when usb_kill_urb */
+               return;
        }
 
        if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
index b6154d5a07a51cf954b1e34d869cb6d3feb75f53..db35c542eb20c2bd3433ba83fbcb737d07ae08c9 100644 (file)
@@ -237,7 +237,7 @@ static void h5_pkt_cull(struct h5 *h5)
                        break;
 
                to_remove--;
-               seq = (seq - 1) % 8;
+               seq = (seq - 1) & 0x07;
        }
 
        if (seq != h5->rx_ack)
@@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
            H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
                BT_ERR("Non-link packet received in non-active state");
                h5_reset_rx(h5);
+               return 0;
        }
 
        h5->rx_func = h5_rx_payload;
index 81eefa1c0d3f9c8b7fa664a4fd809a5bb46d174c..aee3464a5bdc84c5809ad5c245bb61241b5333ed 100644 (file)
@@ -933,8 +933,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
         * pool while mixing, and hash one final time.
         */
        sha_transform(hash.w, extract, workspace);
-       memset(extract, 0, sizeof(extract));
-       memset(workspace, 0, sizeof(workspace));
+       memzero_explicit(extract, sizeof(extract));
+       memzero_explicit(workspace, sizeof(workspace));
 
        /*
         * In case the hash function has some recognizable output
@@ -957,7 +957,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
        }
 
        memcpy(out, &hash, EXTRACT_SIZE);
-       memset(&hash, 0, sizeof(hash));
+       memzero_explicit(&hash, sizeof(hash));
 }
 
 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
@@ -1005,7 +1005,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
        }
 
        /* Wipe data just returned from memory */
-       memset(tmp, 0, sizeof(tmp));
+       memzero_explicit(tmp, sizeof(tmp));
 
        return ret;
 }
@@ -1043,7 +1043,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
        }
 
        /* Wipe data just returned from memory */
-       memset(tmp, 0, sizeof(tmp));
+       memzero_explicit(tmp, sizeof(tmp));
 
        return ret;
 }
index 7c3b3dcbfbc8359e3aad78c60281ea2eecfb9a9d..f659a571ad23e3b2d10a03a02449126cebd4190f 100644 (file)
@@ -533,11 +533,10 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
 int tpm_get_timeouts(struct tpm_chip *chip)
 {
        struct tpm_cmd_t tpm_cmd;
-       struct timeout_t *timeout_cap;
+       unsigned long new_timeout[4];
+       unsigned long old_timeout[4];
        struct duration_t *duration_cap;
        ssize_t rc;
-       u32 timeout;
-       unsigned int scale = 1;
 
        tpm_cmd.header.in = tpm_getcap_header;
        tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
@@ -571,25 +570,46 @@ int tpm_get_timeouts(struct tpm_chip *chip)
            != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
                return -EINVAL;
 
-       timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
-       /* Don't overwrite default if value is 0 */
-       timeout = be32_to_cpu(timeout_cap->a);
-       if (timeout && timeout < 1000) {
-               /* timeouts in msec rather usec */
-               scale = 1000;
-               chip->vendor.timeout_adjusted = true;
+       old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
+       old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
+       old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
+       old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+       memcpy(new_timeout, old_timeout, sizeof(new_timeout));
+
+       /*
+        * Provide ability for vendor overrides of timeout values in case
+        * of misreporting.
+        */
+       if (chip->vendor.update_timeouts != NULL)
+               chip->vendor.timeout_adjusted =
+                       chip->vendor.update_timeouts(chip, new_timeout);
+
+       if (!chip->vendor.timeout_adjusted) {
+               /* Don't overwrite default if value is 0 */
+               if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
+                       int i;
+
+                       /* timeouts in msec rather usec */
+                       for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
+                               new_timeout[i] *= 1000;
+                       chip->vendor.timeout_adjusted = true;
+               }
        }
-       if (timeout)
-               chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
-       timeout = be32_to_cpu(timeout_cap->b);
-       if (timeout)
-               chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
-       timeout = be32_to_cpu(timeout_cap->c);
-       if (timeout)
-               chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
-       timeout = be32_to_cpu(timeout_cap->d);
-       if (timeout)
-               chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
+
+       /* Report adjusted timeouts */
+       if (chip->vendor.timeout_adjusted) {
+               dev_info(chip->dev,
+                        HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
+                        old_timeout[0], new_timeout[0],
+                        old_timeout[1], new_timeout[1],
+                        old_timeout[2], new_timeout[2],
+                        old_timeout[3], new_timeout[3]);
+       }
+
+       chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]);
+       chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]);
+       chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]);
+       chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]);
 
 duration:
        tpm_cmd.header.in = tpm_getcap_header;
@@ -1423,13 +1443,13 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
        int err, total = 0, retries = 5;
        u8 *dest = out;
 
+       if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
+               return -EINVAL;
+
        chip = tpm_chip_find_get(chip_num);
        if (chip == NULL)
                return -ENODEV;
 
-       if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
-               return -EINVAL;
-
        do {
                tpm_cmd.header.in = tpm_getrandom_header;
                tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
@@ -1448,6 +1468,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
                num_bytes -= recd;
        } while (retries-- && total < max);
 
+       tpm_chip_put(chip);
        return total ? total : -EIO;
 }
 EXPORT_SYMBOL_GPL(tpm_get_random);
index 0770d1d79366d31d4832dd2186d677a6ad3a565c..deffda7678a0b2ec154d5ed99323f2a76d694731 100644 (file)
@@ -95,6 +95,9 @@ struct tpm_vendor_specific {
        int (*send) (struct tpm_chip *, u8 *, size_t);
        void (*cancel) (struct tpm_chip *);
        u8 (*status) (struct tpm_chip *);
+       bool (*update_timeouts)(struct tpm_chip *chip,
+                               unsigned long *timeout_cap);
+
        void (*release) (struct device *);
        struct miscdevice miscdev;
        struct attribute_group *attr_group;
index 8a41b6be23a057bd5ff033f30d7de52ac7085a38..72f21377fa02f975af9a88e16a9a697dc897bf60 100644 (file)
@@ -373,6 +373,36 @@ out_err:
        return rc;
 }
 
+struct tis_vendor_timeout_override {
+       u32 did_vid;
+       unsigned long timeout_us[4];
+};
+
+static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
+       /* Atmel 3204 */
+       { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
+                       (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
+};
+
+static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
+                                   unsigned long *timeout_cap)
+{
+       int i;
+       u32 did_vid;
+
+       did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
+
+       for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
+               if (vendor_timeout_overrides[i].did_vid != did_vid)
+                       continue;
+               memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
+                      sizeof(vendor_timeout_overrides[i].timeout_us));
+               return true;
+       }
+
+       return false;
+}
+
 /*
  * Early probing for iTPM with STS_DATA_EXPECT flaw.
  * Try sending command without itpm flag set and if that
@@ -475,6 +505,7 @@ static struct tpm_vendor_specific tpm_tis = {
        .recv = tpm_tis_recv,
        .send = tpm_tis_send,
        .cancel = tpm_tis_ready,
+       .update_timeouts = tpm_tis_update_timeouts,
        .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
        .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
        .req_canceled = tpm_tis_req_canceled,
index 197051dec37edc20afe25f177a9f64d7ed24d632..fa1bbedb7daee4c2dcbfdb3d43325db11d0911ed 100644 (file)
@@ -408,6 +408,7 @@ const struct clk_ops clkops_rate_core_peri = {
        .set_rate       = NULL,
 };
 
+#if 0
 static unsigned long clk_ddr_recalc_rate(struct clk_hw *hw,
                unsigned long parent_rate)
 {
@@ -494,6 +495,25 @@ const struct clk_ops clkops_rate_ddr_div2 = {
        .determine_rate = clk_ddr_determine_rate,
 };
 
+static unsigned long clk_ddr_div4_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       /* Same as clk_core, we should NOT set clk_ddr's parent
+        * (dpll) rate directly as a side effect.
+        */
+       struct clk *parent = __clk_get_parent(hw->clk);
+
+       return clk_divider_recalc_rate(hw, __clk_get_rate(parent))/4;
+}
+
+const struct clk_ops clkops_rate_ddr_div4 = {
+       .recalc_rate    = clk_ddr_div4_recalc_rate,
+       .round_rate     = clk_ddr_round_rate,
+       .set_rate       = clk_ddr_set_rate,
+       .determine_rate = clk_ddr_determine_rate,
+};
+#endif
+
 static unsigned long clk_3288_i2s_recalc_rate(struct clk_hw *hw,
                unsigned long parent_rate)
 {
@@ -696,6 +716,72 @@ const struct clk_ops clkops_rate_3288_dclk_lcdc1 = {
        .recalc_rate    = clk_divider_recalc_rate,
 };
 
+#define CONFIG_RK3368_MUX_NO_USE_NPLL
+
+static long clk_3368_mux_div_determine_rate(struct clk_hw *div_hw,
+                                           unsigned long rate,
+                                           unsigned long *best_parent_rate,
+                                           struct clk **best_parent_p)
+{
+       struct clk *clk = div_hw->clk, *parent = NULL, *best_parent = NULL;
+       int i, num_parents;
+       unsigned long parent_rate = 0, best_prate = 0, best = 0, now = 0;
+
+       parent = __clk_get_parent(clk);
+       if (!parent) {
+               best = __clk_get_rate(clk);
+               goto out;
+       }
+
+       /* if NO_REPARENT flag set, pass through to current parent */
+       if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
+               best_prate = __clk_get_rate(parent);
+               best = clk_divider_ops.round_rate(div_hw, rate, &best_prate);
+               goto out;
+       }
+
+       /* find the parent that can provide the fastest rate <= rate */
+       num_parents = clk->num_parents;
+       for (i = 0; i < num_parents; i++) {
+               parent = clk_get_parent_by_index(clk, i);
+               if (!parent)
+                       continue;
+
+#ifdef CONFIG_RK3368_MUX_NO_USE_NPLL
+               if (!strcmp(__clk_get_name(parent), "clk_npll"))
+                       continue;
+#endif
+               parent_rate = __clk_get_rate(parent);
+               now = clk_divider_ops.round_rate(div_hw, rate, &parent_rate);
+
+               if (now <= rate && now > best) {
+                       best_parent = parent;
+                       best_prate = parent_rate;
+                       best = now;
+               }
+       }
+
+out:
+       if (best_prate)
+               *best_parent_rate = best_prate;
+
+       if (best_parent)
+               *best_parent_p = best_parent;
+
+       clk_debug("clk name = %s, determine rate = %lu, best = %lu\n"
+                 "\tbest_parent name = %s, best_prate = %lu\n",
+                 clk->name, rate, best,
+                 __clk_get_name(*best_parent_p), *best_parent_rate);
+
+       return best;
+}
+
+const struct clk_ops clkops_rate_3368_auto_parent = {
+       .recalc_rate    = clk_divider_recalc_rate,
+       .round_rate     = clk_divider_round_rate,
+       .set_rate       = clk_divider_set_rate,
+       .determine_rate = clk_3368_mux_div_determine_rate,
+};
 
 struct clk_ops_table rk_clkops_rate_table[] = {
        {.index = CLKOPS_RATE_MUX_DIV,          .clk_ops = &clkops_rate_auto_parent},
@@ -705,12 +791,14 @@ struct clk_ops_table rk_clkops_rate_table[] = {
        {.index = CLKOPS_RATE_FRAC,             .clk_ops = &clkops_rate_frac},
        {.index = CLKOPS_RATE_CORE,             .clk_ops = &clkops_rate_core},
        {.index = CLKOPS_RATE_CORE_CHILD,       .clk_ops = &clkops_rate_core_peri},
-       {.index = CLKOPS_RATE_DDR,              .clk_ops = &clkops_rate_ddr},
+       {.index = CLKOPS_RATE_DDR,              .clk_ops = NULL},
        {.index = CLKOPS_RATE_RK3288_I2S,       .clk_ops = &clkops_rate_3288_i2s},
        {.index = CLKOPS_RATE_RK3288_USB480M,   .clk_ops = &clkops_rate_3288_usb480m},
        {.index = CLKOPS_RATE_RK3288_DCLK_LCDC0,.clk_ops = &clkops_rate_3288_dclk_lcdc0},
        {.index = CLKOPS_RATE_RK3288_DCLK_LCDC1,.clk_ops = &clkops_rate_3288_dclk_lcdc1},
-       {.index = CLKOPS_RATE_DDR_DIV2, .clk_ops = &clkops_rate_ddr_div2},
+       {.index = CLKOPS_RATE_DDR_DIV2,         .clk_ops = NULL},
+       {.index = CLKOPS_RATE_DDR_DIV4,         .clk_ops = NULL},
+       {.index = CLKOPS_RATE_RK3368_MUX_DIV_NPLL,   .clk_ops = &clkops_rate_3368_auto_parent},
        {.index = CLKOPS_RATE_I2S,              .clk_ops = NULL},
        {.index = CLKOPS_RATE_CIFOUT,           .clk_ops = NULL},
        {.index = CLKOPS_RATE_UART,             .clk_ops = NULL},
index 933bd029e795aa825c706eac99077ee419abfe59..7d43745de3998d7bf2801575bdbed176de6655e6 100644 (file)
@@ -25,10 +25,9 @@ const struct clk_ops *rk_get_clkops(unsigned int idx);
 
 #define clk_err(fmt, args...) printk(KERN_ERR "rkclk: "fmt, ##args)
 
+u32 cru_readl(u32 offset);
+void cru_writel(u32 val, u32 offset);
 
-#define cru_readl(offset)      readl(RK_CRU_VIRT + (offset))
-#define cru_writel(v, o)       do {writel(v, RK_CRU_VIRT + (o)); dsb();} \
-                               while (0)
-#define grf_readl(offset)      readl_relaxed(RK_GRF_VIRT + (offset))
+u32 grf_readl(u32 offset);
 
 #endif /* __RK_CLKOPS_H */
index cf95ade9c6eb0d044a0ec35ff9a13d8410831c18..14fc1e47e97eff296c0a22eec032591f1bafefc0 100644 (file)
@@ -100,12 +100,12 @@ static int clk_pd_endisable(struct clk_hw *hw, bool enable)
 {
        struct clk_pd *pd = to_clk_pd(hw);
        unsigned long flags = 0;
-       int ret;
+       int ret = 0;
 
        if (pd->lock)
                spin_lock_irqsave(pd->lock, flags);
 
-       ret = rockchip_pmu_ops.set_power_domain(pd->id, enable);
+       /* ret = rockchip_pmu_ops.set_power_domain(pd->id, enable); */
 
        if (pd->lock)
                spin_unlock_irqrestore(pd->lock, flags);
@@ -135,12 +135,14 @@ static void clk_pd_disable(struct clk_hw *hw)
        __clk_pd_notify(hw->clk, RK_CLK_PD_POST_DISABLE);
 }
 
+/*
 static int clk_pd_is_enabled(struct clk_hw *hw)
 {
        struct clk_pd *pd = to_clk_pd(hw);
 
        return rockchip_pmu_ops.power_domain_is_on(pd->id);
 }
+*/
 
 static int clk_pd_prepare(struct clk_hw *hw)
 {
@@ -159,7 +161,7 @@ const struct clk_ops clk_pd_ops = {
        .unprepare = clk_pd_unprepare,
        .enable = clk_pd_enable,
        .disable = clk_pd_disable,
-       .is_enabled = clk_pd_is_enabled,
+       /*.is_enabled = clk_pd_is_enabled,*/
 };
 
 static int clk_pd_virt_enable(struct clk_hw *hw)
index cdba78589c4cc686e25d0e117a78d1644c5b2682..776445b1b232aebed5b3db7220e47e9a4bd49f1a 100755 (executable)
@@ -31,8 +31,10 @@ static const struct pll_clk_set rk3188plus_pll_com_table[] = {
        _RK3188PLUS_PLL_SET_CLKS(891000,        8,      594,    2),
        _RK3188PLUS_PLL_SET_CLKS(768000,        1,      64,     2),
        _RK3188PLUS_PLL_SET_CLKS(594000,        2,      198,    4),
+       _RK3188PLUS_PLL_SET_CLKS(576000,        1,      48,     2),
        _RK3188PLUS_PLL_SET_CLKS(500000,        3,      250,    4),
        _RK3188PLUS_PLL_SET_CLKS(408000,        1,      68,     4),
+       _RK3188PLUS_PLL_SET_CLKS(400000,        3,      200,    4),
        _RK3188PLUS_PLL_SET_CLKS(396000,        1,      66,     4),
        _RK3188PLUS_PLL_SET_CLKS(384000,        2,      128,    4),
        _RK3188PLUS_PLL_SET_CLKS(360000,        1,      60,     4),
@@ -235,12 +237,103 @@ static const struct pll_clk_set rk312xplus_pll_com_table[] = {
        _RK3036_PLL_SET_CLKS(400000, 6, 400, 2, 2, 1, 0),
 };
 
+static const struct apll_clk_set rk3368_apllb_table[] = {
+                       /*(_mhz,        nr,     nf,     no,     aclkm,  atclk,  pclk_dbg)*/
+       _RK3368_APLL_SET_CLKS(1608,     1,      67,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1560,     1,      65,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1512,     1,      63,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1488,     1,      62,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1464,     1,      61,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1440,     1,      60,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1416,     1,      59,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1392,     1,      58,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1368,     1,      57,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1344,     1,      56,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1320,     1,      55,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1296,     1,      54,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1272,     1,      53,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1248,     1,      52,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1224,     1,      51,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1200,     1,      50,     1,      2,      4,      4),
+       _RK3368_APLL_SET_CLKS(1176,     1,      49,     1,      2,      4,      4),
+       _RK3368_APLL_SET_CLKS(1128,     1,      47,     1,      2,      4,      4),
+       _RK3368_APLL_SET_CLKS(1104,     1,      46,     1,      2,      4,      4),
+       _RK3368_APLL_SET_CLKS(1008,     1,      84,     2,      2,      4,      4),
+       _RK3368_APLL_SET_CLKS(912,      1,      76,     2,      2,      4,      4),
+       _RK3368_APLL_SET_CLKS(888,      1,      74,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(816,      1,      68,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(792,      1,      66,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(696,      1,      58,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(672,      1,      56,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(648,      1,      54,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(624,      1,      52,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(600,      1,      50,     2,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(576,      1,      48,     2,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(552,      1,      92,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(528,      1,      88,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(504,      1,      84,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(480,      1,      80,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(456,      1,      76,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(408,      1,      68,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(312,      1,      52,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(252,      1,      84,     8,      2,      1,      1),
+       _RK3368_APLL_SET_CLKS(216,      1,      72,     8,      2,      1,      1),
+       _RK3368_APLL_SET_CLKS(126,      2,      84,     8,      2,      1,      1),
+       _RK3368_APLL_SET_CLKS(48,       2,      32,     8,      2,      1,      1),
+       _RK3368_APLL_SET_CLKS(0,        1,      32,     16,     2,      1,      1),
+};
+
+static const struct apll_clk_set rk3368_aplll_table[] = {
+                       /*(_mhz,        nr,     nf,     no,     aclkm,  atclk,  pclk_dbg)*/
+       _RK3368_APLL_SET_CLKS(1608,     1,      67,     1,      2,      7,      7),
+       _RK3368_APLL_SET_CLKS(1560,     1,      65,     1,      2,      7,      7),
+       _RK3368_APLL_SET_CLKS(1512,     1,      63,     1,      2,      7,      7),
+       _RK3368_APLL_SET_CLKS(1488,     1,      62,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1464,     1,      61,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1440,     1,      60,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1416,     1,      59,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1392,     1,      58,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1368,     1,      57,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1344,     1,      56,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1320,     1,      55,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1296,     1,      54,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1272,     1,      53,     1,      2,      6,      6),
+       _RK3368_APLL_SET_CLKS(1248,     1,      52,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1224,     1,      51,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1200,     1,      50,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1176,     1,      49,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1128,     1,      47,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1104,     1,      46,     1,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(1008,     1,      84,     2,      2,      5,      5),
+       _RK3368_APLL_SET_CLKS(912,      1,      76,     2,      2,      4,      4),
+       _RK3368_APLL_SET_CLKS(888,      1,      74,     2,      2,      4,      4),
+       _RK3368_APLL_SET_CLKS(816,      1,      68,     2,      2,      4,      4),
+       _RK3368_APLL_SET_CLKS(792,      1,      66,     2,      2,      4,      4),
+       _RK3368_APLL_SET_CLKS(696,      1,      58,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(672,      1,      56,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(648,      1,      54,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(624,      1,      52,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(600,      1,      50,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(576,      1,      48,     2,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(552,      1,      92,     4,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(528,      1,      88,     4,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(504,      1,      84,     4,      2,      3,      3),
+       _RK3368_APLL_SET_CLKS(480,      1,      80,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(456,      1,      76,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(408,      1,      68,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(312,      1,      52,     4,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(252,      1,      84,     8,      2,      2,      2),
+       _RK3368_APLL_SET_CLKS(216,      1,      72,     8,      2,      1,      1),
+       _RK3368_APLL_SET_CLKS(126,      2,      84,     8,      2,      1,      1),
+       _RK3368_APLL_SET_CLKS(48,       2,      32,     8,      2,      1,      1),
+       _RK3368_APLL_SET_CLKS(0,        1,      32,     16,     2,      1,      1),
+};
+
 static void pll_wait_lock(struct clk_hw *hw)
 {
        struct clk_pll *pll = to_clk_pll(hw);
        int delay = 24000000;
 
-
        while (delay > 0) {
                if (grf_readl(pll->status_offset) & (1 << pll->status_shift))
                        break;
@@ -394,12 +487,12 @@ static int _pll_clk_set_rate_3188(struct pll_clk_set *clk_set,
        cru_writel(_RK3188_PLL_MODE_SLOW_SET(pll->mode_shift), pll->mode_offset);
        //pll power down
        cru_writel((0x1 << (16+1)) | (0x1<<1), pll->reg + RK3188_PLL_CON(3));
-       dsb();
-       dsb();
-       dsb();
-       dsb();
-       dsb();
-       dsb();
+       dsb(sy);
+       dsb(sy);
+       dsb(sy);
+       dsb(sy);
+       dsb(sy);
+       dsb(sy);
        cru_writel(clk_set->pllcon0, pll->reg + RK3188_PLL_CON(0));
        cru_writel(clk_set->pllcon1, pll->reg + RK3188_PLL_CON(1));
 
@@ -604,12 +697,12 @@ CHANGE_APLL:
 
        /* PLL power down */
        cru_writel((0x1 << (16+1)) | (0x1<<1), pll->reg + RK3188_PLL_CON(3));
-       dsb();
-       dsb();
-       dsb();
-       dsb();
-       dsb();
-       dsb();
+       dsb(sy);
+       dsb(sy);
+       dsb(sy);
+       dsb(sy);
+       dsb(sy);
+       dsb(sy);
        cru_writel(ps->pllcon0, pll->reg + RK3188_PLL_CON(0));
        cru_writel(ps->pllcon1, pll->reg + RK3188_PLL_CON(1));
 
@@ -1466,7 +1559,7 @@ CHANGE_APLL:
        /* reparent to apll, and set div to 1 */
        if (sel_gpll) {
                if (temp_div == 1) {
-                       /* when rate/2 < (old_rate-arm_gpll_rate),
+                       /* when rate/2 < (rate-arm_gpll_rate),
                           we can set div to make rate change more gently */
                        if (rate > (2*arm_gpll_rate)) {
                                cru_writel(RK3288_CORE_CLK_DIV(2), RK3288_CRU_CLKSELS_CON(0));
@@ -1895,6 +1988,464 @@ static const struct clk_ops clk_pll_ops_312xplus = {
        .set_rate = clk_cpll_set_rate_312xplus,
 };
 
+static long clk_pll_round_rate_3368_apllb(struct clk_hw *hw, unsigned long rate,
+                                         unsigned long *prate)
+{
+       struct clk *parent = __clk_get_parent(hw->clk);
+
+       if (parent && (rate == __clk_get_rate(parent))) {
+               clk_debug("pll %s round rate=%lu equal to parent rate\n",
+                         __clk_get_name(hw->clk), rate);
+               return rate;
+       }
+
+       return (apll_get_best_set(rate, rk3368_apllb_table)->rate);
+}
+
+/* 1: use, 0: no use */
+#define RK3368_APLLB_USE_GPLL  1
+
+/* when define 1, we will set div to make rate change gently, but it will cost
+ more time */
+#define RK3368_APLLB_DIV_MORE  1
+
+static int clk_pll_set_rate_3368_apllb(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long parent_rate)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+       struct clk *clk = hw->clk;
+       struct clk *arm_gpll = __clk_lookup("clk_gpll");
+       unsigned long arm_gpll_rate, temp_rate, old_rate;
+       const struct apll_clk_set *ps;
+       u32 temp_div;
+       unsigned long flags;
+       int sel_gpll = 0;
+
+       ps = apll_get_best_set(rate, rk3368_apllb_table);
+       clk_debug("apllb will set rate %lu\n", ps->rate);
+       clk_debug("table con:%08x,%08x,%08x, sel:%08x,%08x\n",
+                 ps->pllcon0, ps->pllcon1, ps->pllcon2,
+                 ps->clksel0, ps->clksel1);
+
+#if !RK3368_APLLB_USE_GPLL
+       goto CHANGE_APLL;
+#endif
+
+       /* prepare arm_gpll before reparent clk_core to it */
+       if (!arm_gpll) {
+               clk_err("clk arm_gpll is NULL!\n");
+               goto CHANGE_APLL;
+       }
+
+       arm_gpll_rate = __clk_get_rate(arm_gpll);
+       old_rate = __clk_get_rate(clk);
+
+       temp_rate = (old_rate > rate) ? old_rate : rate;
+       temp_div = DIV_ROUND_UP(arm_gpll_rate, temp_rate);
+
+       if (temp_div > RK3368_CORE_CLK_MAX_DIV) {
+               clk_debug("temp_div %d > max_div %d\n", temp_div,
+                         RK3368_CORE_CLK_MAX_DIV);
+               clk_debug("can't get rate %lu from arm_gpll rate %lu\n",
+                         __clk_get_rate(clk), arm_gpll_rate);
+               goto CHANGE_APLL;
+       }
+
+#if 0
+       if (clk_prepare(arm_gpll)) {
+               clk_err("fail to prepare arm_gpll path\n");
+               clk_unprepare(arm_gpll);
+               goto CHANGE_APLL;
+       }
+
+       if (clk_enable(arm_gpll)) {
+               clk_err("fail to enable arm_gpll path\n");
+               clk_disable(arm_gpll);
+               clk_unprepare(arm_gpll);
+               goto CHANGE_APLL;
+       }
+#endif
+
+       local_irq_save(flags);
+
+       if (rate >= old_rate) {
+               cru_writel(ps->clksel0, RK3368_CRU_CLKSELS_CON(0));
+               cru_writel(ps->clksel1, RK3368_CRU_CLKSELS_CON(1));
+       }
+
+       /* select gpll */
+#if RK3368_APLLB_DIV_MORE
+       if (temp_div == 1) {
+               /* when old_rate/2 < (old_rate-arm_gpll_rate),
+                  we can set div to make rate change more gently */
+               if (old_rate > (2*arm_gpll_rate)) {
+                       cru_writel(RK3368_CORE_CLK_DIV(2), RK3368_CRU_CLKSELS_CON(0));
+                       udelay(10);
+                       cru_writel(RK3368_CORE_CLK_DIV(3), RK3368_CRU_CLKSELS_CON(0));
+                       udelay(10);
+                       cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_GPLL,
+                                  RK3368_CRU_CLKSELS_CON(0));
+                       udelay(10);
+                       cru_writel(RK3368_CORE_CLK_DIV(2), RK3368_CRU_CLKSELS_CON(0));
+                       udelay(10);
+                       cru_writel(RK3368_CORE_CLK_DIV(1), RK3368_CRU_CLKSELS_CON(0));
+               } else {
+                       cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_GPLL,
+                                  RK3368_CRU_CLKSELS_CON(0));
+               }
+       } else {
+               cru_writel(RK3368_CORE_CLK_DIV(temp_div), RK3368_CRU_CLKSELS_CON(0));
+               cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_GPLL,
+                          RK3368_CRU_CLKSELS_CON(0));
+       }
+#else
+       cru_writel(RK3368_CORE_CLK_DIV(temp_div), RK3368_CRU_CLKSELS_CON(0));
+       cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_GPLL,
+                  RK3368_CRU_CLKSELS_CON(0));
+#endif
+
+       sel_gpll = 1;
+
+       smp_wmb();
+
+       local_irq_restore(flags);
+
+       clk_debug("temp select arm_gpll path, get rate %lu\n",
+                 arm_gpll_rate/temp_div);
+       clk_debug("from arm_gpll rate %lu, temp_div %d\n", arm_gpll_rate,
+                 temp_div);
+
+CHANGE_APLL:
+       local_irq_save(flags);
+
+       /* If core src don't select gpll, apll need to enter slow mode
+        * before reset
+        */
+       if (!sel_gpll)
+               cru_writel(_RK3188_PLL_MODE_SLOW_SET(pll->mode_shift),
+                          pll->mode_offset);
+
+       /* PLL enter reset */
+       cru_writel(_RK3188PLUS_PLL_RESET_SET(1), pll->reg + RK3188_PLL_CON(3));
+
+       cru_writel(ps->pllcon0, pll->reg + RK3188_PLL_CON(0));
+       cru_writel(ps->pllcon1, pll->reg + RK3188_PLL_CON(1));
+       cru_writel(ps->pllcon2, pll->reg + RK3188_PLL_CON(2));
+
+       udelay(5);
+
+       /* return from rest */
+       cru_writel(_RK3188PLUS_PLL_RESET_SET(0), pll->reg + RK3188_PLL_CON(3));
+
+       /* wating lock state */
+       udelay(ps->rst_dly);
+       pll_wait_lock(hw);
+
+       /* PLL return from slow mode */
+       if (!sel_gpll) {
+               if (rate >= old_rate) {
+                       cru_writel(ps->clksel0, RK3368_CRU_CLKSELS_CON(0));
+                       cru_writel(ps->clksel1, RK3368_CRU_CLKSELS_CON(1));
+               }
+               cru_writel(_RK3188_PLL_MODE_NORM_SET(pll->mode_shift),
+                          pll->mode_offset);
+       }
+
+       /* reparent to apll, and set div to 1 */
+       if (sel_gpll) {
+#if RK3368_APLLB_DIV_MORE
+               if (temp_div == 1) {
+                       /* when rate/2 < (rate-arm_gpll_rate),
+                        we can set div to make rate change more gently */
+                       if (rate > (2*arm_gpll_rate)) {
+                               cru_writel(RK3368_CORE_CLK_DIV(2), RK3368_CRU_CLKSELS_CON(0));
+                               udelay(10);
+                               cru_writel(RK3368_CORE_CLK_DIV(3), RK3368_CRU_CLKSELS_CON(0));
+                               udelay(10);
+                               cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_APLL,
+                                          RK3368_CRU_CLKSELS_CON(0));
+                               udelay(10);
+                               cru_writel(RK3368_CORE_CLK_DIV(2), RK3368_CRU_CLKSELS_CON(0));
+                               udelay(10);
+                               cru_writel(RK3368_CORE_CLK_DIV(1), RK3368_CRU_CLKSELS_CON(0));
+                       } else {
+                               cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_APLL,
+                                          RK3368_CRU_CLKSELS_CON(0));
+                       }
+               } else {
+                       cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_APLL,
+                                  RK3368_CRU_CLKSELS_CON(0));
+                       cru_writel(RK3368_CORE_CLK_DIV(1), RK3368_CRU_CLKSELS_CON(0));
+               }
+#else
+               cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_APLL,
+                          RK3368_CRU_CLKSELS_CON(0));
+               cru_writel(RK3368_CORE_CLK_DIV(1), RK3368_CRU_CLKSELS_CON(0));
+#endif
+       }
+
+       if (rate < old_rate) {
+               cru_writel(ps->clksel0, RK3368_CRU_CLKSELS_CON(0));
+               cru_writel(ps->clksel1, RK3368_CRU_CLKSELS_CON(1));
+       }
+
+       smp_wmb();
+
+       local_irq_restore(flags);
+
+       if (sel_gpll) {
+               sel_gpll = 0;
+               /* clk_disable(arm_gpll);
+               clk_unprepare(arm_gpll); */
+       }
+
+       clk_debug("apll set rate %lu, con(%x,%x,%x,%x), sel(%x,%x)\n",
+                 ps->rate,
+                 cru_readl(pll->reg + RK3188_PLL_CON(0)),
+                 cru_readl(pll->reg + RK3188_PLL_CON(1)),
+                 cru_readl(pll->reg + RK3188_PLL_CON(2)),
+                 cru_readl(pll->reg + RK3188_PLL_CON(3)),
+                 cru_readl(RK3368_CRU_CLKSELS_CON(0)),
+                 cru_readl(RK3368_CRU_CLKSELS_CON(1)));
+
+       return 0;
+}
+
+static const struct clk_ops clk_pll_ops_3368_apllb = {
+       .recalc_rate = clk_pll_recalc_rate_3188plus,
+       .round_rate = clk_pll_round_rate_3368_apllb,
+       .set_rate = clk_pll_set_rate_3368_apllb,
+};
+
+static long clk_pll_round_rate_3368_aplll(struct clk_hw *hw, unsigned long rate,
+                                         unsigned long *prate)
+{
+       struct clk *parent = __clk_get_parent(hw->clk);
+
+       if (parent && (rate == __clk_get_rate(parent))) {
+               clk_debug("pll %s round rate=%lu equal to parent rate\n",
+                         __clk_get_name(hw->clk), rate);
+               return rate;
+       }
+
+       return (apll_get_best_set(rate, rk3368_aplll_table)->rate);
+}
+
+/* 1: use, 0: no use */
+#define RK3368_APLLL_USE_GPLL  1
+
+/* when define 1, we will set div to make rate change gently, but it will cost
+ more time */
+#define RK3368_APLLL_DIV_MORE  1
+
+static int clk_pll_set_rate_3368_aplll(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long parent_rate)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+       struct clk *clk = hw->clk;
+       struct clk *arm_gpll = __clk_lookup("clk_gpll");
+       unsigned long arm_gpll_rate, temp_rate, old_rate;
+       const struct apll_clk_set *ps;
+       u32 temp_div;
+       unsigned long flags;
+       int sel_gpll = 0;
+
+       ps = apll_get_best_set(rate, rk3368_aplll_table);
+       clk_debug("aplll will set rate %lu\n", ps->rate);
+       clk_debug("table con:%08x,%08x,%08x, sel:%08x,%08x\n",
+                 ps->pllcon0, ps->pllcon1, ps->pllcon2,
+                 ps->clksel0, ps->clksel1);
+
+#if !RK3368_APLLL_USE_GPLL
+       goto CHANGE_APLL;
+#endif
+
+       /* prepare arm_gpll before reparent clk_core to it */
+       if (!arm_gpll) {
+               clk_err("clk arm_gpll is NULL!\n");
+               goto CHANGE_APLL;
+       }
+
+       arm_gpll_rate = __clk_get_rate(arm_gpll);
+       old_rate = __clk_get_rate(clk);
+
+       temp_rate = (old_rate > rate) ? old_rate : rate;
+       temp_div = DIV_ROUND_UP(arm_gpll_rate, temp_rate);
+
+       if (temp_div > RK3368_CORE_CLK_MAX_DIV) {
+               clk_debug("temp_div %d > max_div %d\n", temp_div,
+                         RK3368_CORE_CLK_MAX_DIV);
+               clk_debug("can't get rate %lu from arm_gpll rate %lu\n",
+                         __clk_get_rate(clk), arm_gpll_rate);
+               goto CHANGE_APLL;
+       }
+
+#if 0
+       if (clk_prepare(arm_gpll)) {
+               clk_err("fail to prepare arm_gpll path\n");
+               clk_unprepare(arm_gpll);
+               goto CHANGE_APLL;
+       }
+
+       if (clk_enable(arm_gpll)) {
+               clk_err("fail to enable arm_gpll path\n");
+               clk_disable(arm_gpll);
+               clk_unprepare(arm_gpll);
+               goto CHANGE_APLL;
+       }
+#endif
+
+       local_irq_save(flags);
+
+       if (rate >= old_rate) {
+               cru_writel(ps->clksel0, RK3368_CRU_CLKSELS_CON(2));
+               cru_writel(ps->clksel1, RK3368_CRU_CLKSELS_CON(3));
+       }
+
+       /* select gpll */
+#if RK3368_APLLL_DIV_MORE
+       if (temp_div == 1) {
+               /* when old_rate/2 < (old_rate-arm_gpll_rate),
+                  we can set div to make rate change more gently */
+               if (old_rate > (2*arm_gpll_rate)) {
+                       cru_writel(RK3368_CORE_CLK_DIV(2), RK3368_CRU_CLKSELS_CON(2));
+                       udelay(10);
+                       cru_writel(RK3368_CORE_CLK_DIV(3), RK3368_CRU_CLKSELS_CON(2));
+                       udelay(10);
+                       cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_GPLL,
+                                  RK3368_CRU_CLKSELS_CON(2));
+                       udelay(10);
+                       cru_writel(RK3368_CORE_CLK_DIV(2), RK3368_CRU_CLKSELS_CON(2));
+                       udelay(10);
+                       cru_writel(RK3368_CORE_CLK_DIV(1), RK3368_CRU_CLKSELS_CON(2));
+               } else {
+                       cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_GPLL,
+                                  RK3368_CRU_CLKSELS_CON(2));
+               }
+       } else {
+               cru_writel(RK3368_CORE_CLK_DIV(temp_div), RK3368_CRU_CLKSELS_CON(2));
+               cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_GPLL,
+                          RK3368_CRU_CLKSELS_CON(2));
+       }
+#else
+               cru_writel(RK3368_CORE_CLK_DIV(temp_div), RK3368_CRU_CLKSELS_CON(2));
+               cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_GPLL,
+                          RK3368_CRU_CLKSELS_CON(2));
+#endif
+
+       sel_gpll = 1;
+
+       smp_wmb();
+
+       local_irq_restore(flags);
+
+       clk_debug("temp select arm_gpll path, get rate %lu\n",
+                 arm_gpll_rate/temp_div);
+       clk_debug("from arm_gpll rate %lu, temp_div %d\n", arm_gpll_rate,
+                 temp_div);
+
+CHANGE_APLL:
+       local_irq_save(flags);
+
+       /* If core src don't select gpll, apll need to enter slow mode
+        * before reset
+        */
+       if (!sel_gpll)
+               cru_writel(_RK3188_PLL_MODE_SLOW_SET(pll->mode_shift),
+                          pll->mode_offset);
+
+       /* PLL enter reset */
+       cru_writel(_RK3188PLUS_PLL_RESET_SET(1), pll->reg + RK3188_PLL_CON(3));
+
+       cru_writel(ps->pllcon0, pll->reg + RK3188_PLL_CON(0));
+       cru_writel(ps->pllcon1, pll->reg + RK3188_PLL_CON(1));
+       cru_writel(ps->pllcon2, pll->reg + RK3188_PLL_CON(2));
+
+       udelay(5);
+
+       /* return from rest */
+       cru_writel(_RK3188PLUS_PLL_RESET_SET(0), pll->reg + RK3188_PLL_CON(3));
+
+       /* wating lock state */
+       udelay(ps->rst_dly);
+       pll_wait_lock(hw);
+
+       /* PLL return from slow mode */
+       if (!sel_gpll) {
+               if (rate >= old_rate) {
+                       cru_writel(ps->clksel0, RK3368_CRU_CLKSELS_CON(2));
+                       cru_writel(ps->clksel1, RK3368_CRU_CLKSELS_CON(3));
+               }
+               cru_writel(_RK3188_PLL_MODE_NORM_SET(pll->mode_shift),
+                          pll->mode_offset);
+       }
+
+       /* reparent to apll, and set div to 1 */
+       if (sel_gpll) {
+#if RK3368_APLLL_DIV_MORE
+               if (temp_div == 1) {
+                       /* when rate/2 < (rate-arm_gpll_rate),
+                        we can set div to make rate change more gently */
+                       if (rate > (2*arm_gpll_rate)) {
+                               cru_writel(RK3368_CORE_CLK_DIV(2), RK3368_CRU_CLKSELS_CON(2));
+                               udelay(10);
+                               cru_writel(RK3368_CORE_CLK_DIV(3), RK3368_CRU_CLKSELS_CON(2));
+                               udelay(10);
+                               cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_APLL,
+                                          RK3368_CRU_CLKSELS_CON(2));
+                               udelay(10);
+                               cru_writel(RK3368_CORE_CLK_DIV(2), RK3368_CRU_CLKSELS_CON(2));
+                               udelay(10);
+                               cru_writel(RK3368_CORE_CLK_DIV(1), RK3368_CRU_CLKSELS_CON(2));
+                       } else {
+                               cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_APLL,
+                                          RK3368_CRU_CLKSELS_CON(2));
+                       }
+               } else {
+                       cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_APLL,
+                                  RK3368_CRU_CLKSELS_CON(2));
+                       cru_writel(RK3368_CORE_CLK_DIV(1), RK3368_CRU_CLKSELS_CON(2));
+               }
+#else
+               cru_writel(RK3368_CORE_SEL_PLL_W_MSK|RK3368_CORE_SEL_APLL,
+                          RK3368_CRU_CLKSELS_CON(2));
+               cru_writel(RK3368_CORE_CLK_DIV(1), RK3368_CRU_CLKSELS_CON(2));
+#endif
+       }
+
+       if (rate < old_rate) {
+               cru_writel(ps->clksel0, RK3368_CRU_CLKSELS_CON(2));
+               cru_writel(ps->clksel1, RK3368_CRU_CLKSELS_CON(3));
+       }
+
+       smp_wmb();
+
+       local_irq_restore(flags);
+
+       if (sel_gpll) {
+               sel_gpll = 0;
+               /* clk_disable(arm_gpll);
+               clk_unprepare(arm_gpll); */
+       }
+
+       clk_debug("apll set rate %lu, con(%x,%x,%x,%x), sel(%x,%x)\n",
+                 ps->rate,
+                 cru_readl(pll->reg + RK3188_PLL_CON(0)),
+                 cru_readl(pll->reg + RK3188_PLL_CON(1)),
+                 cru_readl(pll->reg + RK3188_PLL_CON(2)),
+                 cru_readl(pll->reg + RK3188_PLL_CON(3)),
+                 cru_readl(RK3368_CRU_CLKSELS_CON(2)),
+                 cru_readl(RK3368_CRU_CLKSELS_CON(3)));
+
+       return 0;
+}
+
+static const struct clk_ops clk_pll_ops_3368_aplll = {
+       .recalc_rate = clk_pll_recalc_rate_3188plus,
+       .round_rate = clk_pll_round_rate_3368_aplll,
+       .set_rate = clk_pll_set_rate_3368_aplll,
+};
+
 const struct clk_ops *rk_get_pll_ops(u32 pll_flags)
 {
        switch (pll_flags) {
@@ -1925,6 +2476,12 @@ const struct clk_ops *rk_get_pll_ops(u32 pll_flags)
                case CLK_PLL_312XPLUS:
                        return &clk_pll_ops_312xplus;
 
+               case CLK_PLL_3368_APLLB:
+                       return &clk_pll_ops_3368_apllb;
+
+               case CLK_PLL_3368_APLLL:
+                       return &clk_pll_ops_3368_aplll;
+
                default:
                        clk_err("%s: unknown pll_flags!\n", __func__);
                        return NULL;
index df41b57644fe36bb33e20fbd007220d7513f8579..3ecc178a5903d993efac14fe363f693bfb977f51 100755 (executable)
        .pllcon2 = RK3036_PLL_SET_FRAC(_frac),  \
 }
 
+/***************************RK3368 PLL**************************************/
+/*******************CLKSEL0/2 BITS***************************/
+#define RK3368_CORE_SEL_PLL_W_MSK      (1 << 23)
+#define RK3368_CORE_SEL_APLL           (0 << 7)
+#define RK3368_CORE_SEL_GPLL           (1 << 7)
+
+#define RK3368_CORE_CLK_SHIFT          0
+#define RK3368_CORE_CLK_WIDTH          5
+#define RK3368_CORE_CLK_DIV(i) \
+       CLK_DIV_PLUS_ONE_SET(i, RK3368_CORE_CLK_SHIFT, RK3368_CORE_CLK_WIDTH)
+#define RK3368_CORE_CLK_MAX_DIV                (2<<RK3368_CORE_CLK_WIDTH)
+
+#define RK3368_ACLKM_CORE_SHIFT                8
+#define RK3368_ACLKM_CORE_WIDTH                5
+#define RK3368_ACLKM_CORE_DIV(i)       \
+       CLK_DIV_PLUS_ONE_SET(i, RK3368_ACLKM_CORE_SHIFT, RK3368_ACLKM_CORE_WIDTH)
+
+/*******************CLKSEL1/3 BITS***************************/
+#define RK3368_ATCLK_CORE_SHIFT                0
+#define RK3368_ATCLK_CORE_WIDTH                5
+#define RK3368_ATCLK_CORE_DIV(i)       \
+       CLK_DIV_PLUS_ONE_SET(i, RK3368_ATCLK_CORE_SHIFT, RK3368_ATCLK_CORE_WIDTH)
+
+#define RK3368_PCLK_DBG_SHIFT          8
+#define RK3368_PCLK_DBG_WIDTH          5
+#define RK3368_PCLK_DBG_DIV(i) \
+       CLK_DIV_PLUS_ONE_SET(i, RK3368_PCLK_DBG_SHIFT, RK3368_PCLK_DBG_WIDTH)
+
+#define _RK3368_APLL_SET_CLKS(_mhz, nr, nf, no, aclkm_div, atclk_div, pclk_dbg_div) \
+{ \
+       .rate   = _mhz * MHZ, \
+       .pllcon0 = RK3188PLUS_PLL_CLKR_SET(nr) | RK3188PLUS_PLL_CLKOD_SET(no), \
+       .pllcon1 = RK3188PLUS_PLL_CLKF_SET(nf),\
+       .pllcon2 = RK3188PLUS_PLL_CLK_BWADJ_SET(nf >> 1),\
+       .rst_dly = ((nr*500)/24+1),\
+       .clksel0 = RK3368_ACLKM_CORE_DIV(aclkm_div),\
+       .clksel1 = RK3368_ATCLK_CORE_DIV(atclk_div) | RK3368_PCLK_DBG_DIV(pclk_dbg_div) \
+}
+
 struct pll_clk_set {
        unsigned long   rate;
        u32     pllcon0;
index aaad37e87a7e3b4412b413c59c4d81e99bab8802..6678dfc06e3c3d86d8d6536cf50f6055124d01b3 100755 (executable)
 #include "clk-pll.h"
 #include "clk-pd.h"
 
+static void __iomem *rk_cru_base;
+static void __iomem *rk_grf_base;
+
+u32 cru_readl(u32 offset)
+{
+       return readl(rk_cru_base + (offset));
+}
+
+void cru_writel(u32 val, u32 offset)
+{
+       writel(val, rk_cru_base + (offset));
+       dsb(sy);
+}
+
+u32 grf_readl(u32 offset)
+{
+       return readl(rk_grf_base + (offset));
+}
 
 struct rkclk_muxinfo {
        const char              *clk_name;
@@ -1050,6 +1068,31 @@ out:
        return ret;
 }
 
+static int __init rkclk_init_special_regs(struct device_node *np)
+{
+       struct device_node *node;
+       const char *compatible;
+       void __iomem *reg = 0;
+       int ret = 0;
+
+
+       for_each_available_child_of_node(np, node) {
+               clk_debug("\n");
+               of_property_read_string(node, "compatible", &compatible);
+               if (strcmp(compatible, "rockchip,rk3188-mux-con") == 0) {
+                       reg = of_iomap(node, 0);
+                       ret = rkclk_init_muxinfo(node, reg);
+                       if (ret != 0) {
+                               clk_err("%s: init mux con err\n", __func__);
+                               goto out;
+                       }
+               }
+       }
+
+out:
+       return ret;
+}
+
 static int __init rkclk_init_pd(struct device_node *np)
 {
        struct device_node *node = NULL;
@@ -1477,6 +1520,10 @@ static void rkclk_add_provider(struct device_node *np)
                        for_each_available_child_of_node(node, node_prd) {
                                 _rkclk_add_provider(node_prd);
                        }
+               } else if (strcmp(compatible, "rockchip,rk-clock-special-regs") == 0) {
+                       for_each_available_child_of_node(node, node_prd) {
+                                _rkclk_add_provider(node_prd);
+                       }
                } else {
                        clk_err("%s: unknown\n", __func__);
                }
@@ -1522,7 +1569,8 @@ static void rkclk_cache_parents(struct rkclk *rkclk)
 void rk_dump_cru(void)
 {
        printk(KERN_WARNING "CRU:\n");
-       print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 4, RK_CRU_VIRT, 0x220, false);
+       print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 4, rk_cru_base,
+                      0x220, false);
 }
 EXPORT_SYMBOL_GPL(rk_dump_cru);
 
@@ -1821,15 +1869,23 @@ static void __init rk_clk_tree_init(struct device_node *np)
        struct rkclk *rkclk;
        const char *compatible;
 
-       printk("%s start! cru base = 0x%08x\n", __func__, (u32)RK_CRU_VIRT);
+       printk("%s start!\n", __func__);
 
-       node_init=of_find_node_by_name(NULL,"clocks-init");
+       node_init = of_find_node_by_name(NULL, "clocks-init");
        if (!node_init) {
                clk_err("%s: can not get clocks-init node\n", __func__);
                return;
        }
-        clk_root_node=of_find_node_by_name(NULL,"clock_regs");
 
+       clk_root_node = of_find_node_by_name(NULL, "clock_regs");
+       rk_cru_base = of_iomap(clk_root_node, 0);
+       if (!rk_cru_base) {
+               clk_err("%s: could not map cru region\n", __func__);
+               return;
+       }
+
+       node = of_parse_phandle(np, "rockchip,grf", 0);
+       rk_grf_base = of_iomap(node, 0);
 
        for_each_available_child_of_node(np, node) {
                clk_debug("\n");
@@ -1839,22 +1895,27 @@ static void __init rk_clk_tree_init(struct device_node *np)
                if (strcmp(compatible, "rockchip,rk-fixed-rate-cons") == 0) {
                        if (rkclk_init_fixed_rate(node) != 0) {
                                clk_err("%s: init fixed_rate err\n", __func__);
-                               return ;
+                               return;
                        }
                } else if (strcmp(compatible, "rockchip,rk-fixed-factor-cons") == 0) {
                        if (rkclk_init_fixed_factor(node) != 0) {
                                clk_err("%s: init fixed_factor err\n", __func__);
-                               return ;
+                               return;
                        }
                } else if (strcmp(compatible, "rockchip,rk-clock-regs") == 0) {
                        if (rkclk_init_regcon(node) != 0) {
                                clk_err("%s: init reg cons err\n", __func__);
-                               return ;
+                               return;
                        }
                } else if (strcmp(compatible, "rockchip,rk-pd-cons") == 0) {
                        if (rkclk_init_pd(node) != 0) {
                                clk_err("%s: init pd err\n", __func__);
-                               return ;
+                               return;
+                       }
+               } else if (strcmp(compatible, "rockchip,rk-clock-special-regs") == 0) {
+                       if (rkclk_init_special_regs(node) != 0) {
+                               clk_err("%s: init special reg err\n", __func__);
+                               return;
                        }
                } else {
                        clk_err("%s: unknown\n", __func__);
@@ -2178,7 +2239,7 @@ u32 clk_suspend_clkgt_info_get(u32 *clk_ungt_msk,u32 *clk_ungt_msk_last,u32 buf_
                     {
                         reg_n=of_iomap(node_gt, 0);
 
-                        if(((u32)reg_n-(u32)reg_p)!=4)
+                        if(((long)reg_n-(long)reg_p)!=4)
                         {
                             printk("%s: gt reg is not continue\n",__FUNCTION__);
                             return 0;
@@ -2186,7 +2247,7 @@ u32 clk_suspend_clkgt_info_get(u32 *clk_ungt_msk,u32 *clk_ungt_msk_last,u32 buf_
                         reg_p=reg_n;
                     }
 
-                    clk_debug("%s:gt%d,reg=%x,val=(%x,%x)\n",__FUNCTION__,gt_cnt,(u32)reg_n,
+                    clk_debug("%s:gt%d,reg=%p,val=(%x,%x)\n",__FUNCTION__,gt_cnt, reg_n,
                     clk_ungt_msk[gt_cnt], clk_ungt_msk_last[gt_cnt]);
 
                     gt_cnt++;
index 319ef5049f8bf23cee228569e64fb87e1fcc35a5..a8be6cf3eed81f480f7d9a02e239a73f4729962d 100644 (file)
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 
+#ifdef CONFIG_LOCAL_TIMERS
 #include <asm/localtimer.h>
+#endif
+#ifdef CONFIG_ARM
 #include <asm/sched_clock.h>
+#endif
 
 #define TIMER_NAME "rk_timer"
 
@@ -67,13 +71,13 @@ static struct bc_timer bc_timer;
 static inline void rk_timer_disable(void __iomem *base)
 {
        writel_relaxed(TIMER_DISABLE, base + TIMER_CONTROL_REG);
-       dsb();
+       dsb(sy);
 }
 
 static inline void rk_timer_enable(void __iomem *base, u32 flags)
 {
        writel_relaxed(TIMER_ENABLE | flags, base + TIMER_CONTROL_REG);
-       dsb();
+       dsb(sy);
 }
 
 static inline u32 rk_timer_read_current_value(void __iomem *base)
@@ -98,15 +102,17 @@ static inline int rk_timer_do_set_next_event(unsigned long cycles, void __iomem
        rk_timer_disable(base);
        writel_relaxed(cycles, base + TIMER_LOAD_COUNT0);
        writel_relaxed(0, base + TIMER_LOAD_COUNT1);
-       dsb();
+       dsb(sy);
        rk_timer_enable(base, TIMER_MODE_USER_DEFINED_COUNT | TIMER_INT_UNMASK);
        return 0;
 }
 
+#ifdef CONFIG_LOCAL_TIMERS
 static int rk_timer_set_next_event(unsigned long cycles, struct clock_event_device *ce)
 {
        return rk_timer_do_set_next_event(cycles, __get_cpu_var(ce_timer).base);
 }
+#endif
 
 static int rk_timer_broadcast_set_next_event(unsigned long cycles, struct clock_event_device *ce)
 {
@@ -119,7 +125,7 @@ static inline void rk_timer_do_set_mode(enum clock_event_mode mode, void __iomem
        case CLOCK_EVT_MODE_PERIODIC:
                rk_timer_disable(base);
                writel_relaxed(24000000 / HZ - 1, base + TIMER_LOAD_COUNT0);
-               dsb();
+               dsb(sy);
                rk_timer_enable(base, TIMER_MODE_FREE_RUNNING | TIMER_INT_UNMASK);
        case CLOCK_EVT_MODE_RESUME:
        case CLOCK_EVT_MODE_ONESHOT:
@@ -131,10 +137,12 @@ static inline void rk_timer_do_set_mode(enum clock_event_mode mode, void __iomem
        }
 }
 
+#ifdef CONFIG_LOCAL_TIMERS
 static void rk_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
 {
        rk_timer_do_set_mode(mode, __get_cpu_var(ce_timer).base);
 }
+#endif
 
 static void rk_timer_broadcast_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
 {
@@ -148,7 +156,7 @@ static inline irqreturn_t rk_timer_interrupt(void __iomem *base, struct clock_ev
        if (ce->mode == CLOCK_EVT_MODE_ONESHOT) {
                writel_relaxed(TIMER_DISABLE, base + TIMER_CONTROL_REG);
        }
-       dsb();
+       dsb(sy);
 
        ce->event_handler(ce);
 
@@ -165,6 +173,7 @@ static irqreturn_t rk_timer_broadcast_interrupt(int irq, void *dev_id)
        return rk_timer_interrupt(bc_timer.base, dev_id);
 }
 
+#ifdef CONFIG_LOCAL_TIMERS
 static __cpuinit int rk_timer_init_clockevent(struct clock_event_device *ce, unsigned int cpu)
 {
        struct ce_timer *timer = &per_cpu(ce_timer, cpu);
@@ -192,6 +201,7 @@ static __cpuinit int rk_timer_init_clockevent(struct clock_event_device *ce, uns
 
        return 0;
 }
+#endif
 
 static __init void rk_timer_init_broadcast(struct device_node *np)
 {
@@ -228,6 +238,7 @@ static __init void rk_timer_init_broadcast(struct device_node *np)
        clockevents_config_and_register(ce, 24000000, 0xF, 0xFFFFFFFF);
 }
 
+#ifdef CONFIG_LOCAL_TIMERS
 static int __cpuinit rk_local_timer_setup(struct clock_event_device *ce)
 {
        ce->rating = 450;
@@ -244,6 +255,7 @@ static struct local_timer_ops rk_local_timer_ops __cpuinitdata = {
        .setup  = rk_local_timer_setup,
        .stop   = rk_local_timer_stop,
 };
+#endif
 
 static cycle_t rk_timer_read(struct clocksource *cs)
 {
@@ -274,11 +286,12 @@ static void __init rk_timer_init_clocksource(struct device_node *np)
        rk_timer_disable(base);
        writel_relaxed(0xFFFFFFFF, base + TIMER_LOAD_COUNT0);
        writel_relaxed(0xFFFFFFFF, base + TIMER_LOAD_COUNT1);
-       dsb();
+       dsb(sy);
        rk_timer_enable(base, TIMER_MODE_FREE_RUNNING | TIMER_INT_MASK);
        clocksource_register_hz(cs, 24000000);
 }
 
+#ifdef CONFIG_ARM
 static u32 rockchip_read_sched_clock(void)
 {
        return ~rk_timer_read_current_value(cs_timer.base);
@@ -288,6 +301,7 @@ static u32 rockchip_read_sched_clock_up(void)
 {
        return rk_timer_read_current_value(cs_timer.base);
 }
+#endif
 
 static void __init rk_timer_init_ce_timer(struct device_node *np, unsigned int cpu)
 {
@@ -302,25 +316,32 @@ static void __init rk_timer_init_ce_timer(struct device_node *np, unsigned int c
        irq->handler = rk_timer_clockevent_interrupt;
 }
 
+#ifdef CONFIG_ARM
 static struct delay_timer rk_delay_timer = {
        .read_current_timer = (unsigned long (*)(void))rockchip_read_sched_clock,
        .freq = 24000000,
 };
+#endif
 
 static void __init rk_timer_init(struct device_node *np)
 {
        u32 val = 0;
        if (of_property_read_u32(np, "rockchip,percpu", &val) == 0) {
+#ifdef CONFIG_LOCAL_TIMERS
                local_timer_register(&rk_local_timer_ops);
+#endif
                rk_timer_init_ce_timer(np, val);
        } else if (of_property_read_u32(np, "rockchip,clocksource", &val) == 0 && val) {
                u32 count_up = 0;
                of_property_read_u32(np, "rockchip,count-up", &count_up);
                if (count_up) {
                        rk_timer_clocksource.read = rk_timer_read_up;
+#ifdef CONFIG_ARM
                        rk_delay_timer.read_current_timer = (unsigned long (*)(void))rockchip_read_sched_clock_up;
+#endif
                }
                rk_timer_init_clocksource(np);
+#ifdef CONFIG_ARM
                if (!lpj_fine) {
                        if (count_up)
                                setup_sched_clock(rockchip_read_sched_clock_up, 32, 24000000);
@@ -328,6 +349,7 @@ static void __init rk_timer_init(struct device_node *np)
                                setup_sched_clock(rockchip_read_sched_clock, 32, 24000000);
                        register_current_timer_delay(&rk_delay_timer);
                }
+#endif
        } else if (of_property_read_u32(np, "rockchip,broadcast", &val) == 0 && val) {
                rk_timer_init_broadcast(np);
        }
index 076f25a59e00c1f89184c0b9548e6b0d87128f7c..eaee6e222207a53660b8f9a935dde44cd8ae16d9 100644 (file)
@@ -456,9 +456,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
        cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
 
        if (cur_cluster < MAX_CLUSTERS) {
+               int cpu;
+
                cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
 
-               per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+               for_each_cpu(cpu, policy->cpus)
+                       per_cpu(physical_cluster, cpu) = cur_cluster;
        } else {
                /* Assumption: during init, we are always running on A15 */
                per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
index aea1999b56ae4391fd5240f684ed5fcdc677b0d2..79f5149bd4de5671f4ba577613e543cad3c26984 100644 (file)
@@ -1343,7 +1343,6 @@ static struct subsys_interface cpufreq_interface = {
 void cpufreq_suspend(void)
 {
        struct cpufreq_policy *policy;
-       int cpu;
 
        if (!cpufreq_driver)
                return;
@@ -1353,20 +1352,15 @@ void cpufreq_suspend(void)
 
        pr_debug("%s: Suspending Governors\n", __func__);
 
-       for_each_possible_cpu(cpu) {
-               if (!cpu_online(cpu))
-                       continue;
-
-               policy = cpufreq_cpu_get(cpu);
+       policy = cpufreq_cpu_get(0);
 
-               if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
-                       pr_err("%s: Failed to stop governor for policy: %p\n",
-                               __func__, policy);
-               else if (cpufreq_driver->suspend
-                   && cpufreq_driver->suspend(policy))
-                       pr_err("%s: Failed to suspend driver: %p\n", __func__,
-                               policy);
-       }
+       if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
+               pr_err("%s: Failed to stop governor for policy: %p\n",
+                       __func__, policy);
+       else if (cpufreq_driver->suspend
+           && cpufreq_driver->suspend(policy))
+               pr_err("%s: Failed to suspend driver: %p\n", __func__,
+                       policy);
 
        cpufreq_suspended = true;
 }
@@ -1380,7 +1374,6 @@ void cpufreq_suspend(void)
 void cpufreq_resume(void)
 {
        struct cpufreq_policy *policy;
-       int cpu;
 
        if (!cpufreq_driver)
                return;
@@ -1392,29 +1385,18 @@ void cpufreq_resume(void)
 
        cpufreq_suspended = false;
 
-       for_each_possible_cpu(cpu) {
-               if (!cpu_online(cpu))
-                       continue;
+       policy = cpufreq_cpu_get(0);
 
-               policy = cpufreq_cpu_get(cpu);
-
-               if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
-                   || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
-                       pr_err("%s: Failed to start governor for policy: %p\n",
-                               __func__, policy);
-               else if (cpufreq_driver->resume
-                   && cpufreq_driver->resume(policy))
-                       pr_err("%s: Failed to resume driver: %p\n", __func__,
-                               policy);
-
-               /*
-                * schedule call cpufreq_update_policy() for boot CPU, i.e. last
-                * policy in list. It will verify that the current freq is in
-                * sync with what we believe it to be.
-                */
-               if (cpu == 0)
-                       schedule_work(&policy->update);
-       }
+       if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
+           || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
+               pr_err("%s: Failed to start governor for policy: %p\n",
+                       __func__, policy);
+       else if (cpufreq_driver->resume
+           && cpufreq_driver->resume(policy))
+               pr_err("%s: Failed to resume driver: %p\n", __func__,
+                       policy);
+
+       schedule_work(&policy->update);
 }
 
 /**
index f007924197dfe8ee43557a1d4034b056672881d7..ece1df8eac852f0c5da415fef74b2f848554a1cb 100644 (file)
@@ -53,7 +53,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 
        policy = cdbs->cur_policy;
 
-       /* Get Absolute Load (in terms of freq for ondemand gov) */
+       /* Get Absolute Load */
        for_each_cpu(j, policy->cpus) {
                struct cpu_dbs_common_info *j_cdbs;
                u64 cur_wall_time, cur_idle_time;
@@ -104,14 +104,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 
                load = 100 * (wall_time - idle_time) / wall_time;
 
-               if (dbs_data->cdata->governor == GOV_ONDEMAND) {
-                       int freq_avg = __cpufreq_driver_getavg(policy, j);
-                       if (freq_avg <= 0)
-                               freq_avg = policy->cur;
-
-                       load *= freq_avg;
-               }
-
                if (load > max_load)
                        max_load = load;
        }
index c501ca83d7599006067ba39cb7ca0ef02e23ca92..c8028ce7554e06718a79e53c834ab30e83c4dec9 100644 (file)
@@ -169,7 +169,6 @@ struct od_dbs_tuners {
        unsigned int sampling_rate;
        unsigned int sampling_down_factor;
        unsigned int up_threshold;
-       unsigned int adj_up_threshold;
        unsigned int powersave_bias;
        unsigned int io_is_busy;
 };
index 260b60b27c256c881755a05a95f680fcd5ce2f72..4e3f38c8607b296fcb5850524c09cefcdf3b9358 100644 (file)
@@ -127,7 +127,7 @@ struct cpufreq_interactive_tunables {
 };
 
 /* For cases where we have single governor instance for system */
-struct cpufreq_interactive_tunables *common_tunables;
+static struct cpufreq_interactive_tunables *common_tunables;
 
 static struct attribute_group *get_sysfs_attr(void);
 
index c087347d66884f03a4f1b2a94bf4fcbaceeda7a0..25438bbf96bbb6020316d04f3fa1e64cca0a2b34 100644 (file)
 #include "cpufreq_governor.h"
 
 /* On-demand governor macros */
-#define DEF_FREQUENCY_DOWN_DIFFERENTIAL                (10)
 #define DEF_FREQUENCY_UP_THRESHOLD             (80)
 #define DEF_SAMPLING_DOWN_FACTOR               (1)
 #define MAX_SAMPLING_DOWN_FACTOR               (100000)
-#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL      (3)
 #define MICRO_FREQUENCY_UP_THRESHOLD           (95)
 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE                (10000)
 #define MIN_FREQUENCY_UP_THRESHOLD             (11)
@@ -161,14 +159,10 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
 
 /*
  * Every sampling_rate, we check, if current idle time is less than 20%
- * (default), then we try to increase frequency. Every sampling_rate, we look
- * for the lowest frequency which can sustain the load while keeping idle time
- * over 30%. If such a frequency exist, we try to decrease to this frequency.
- *
- * Any frequency increase takes it to the maximum frequency. Frequency reduction
- * happens at minimum steps of 5% (default) of current frequency
+ * (default), then we try to increase frequency. Else, we adjust the frequency
+ * proportional to load.
  */
-static void od_check_cpu(int cpu, unsigned int load_freq)
+static void od_check_cpu(int cpu, unsigned int load)
 {
        struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
        struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
@@ -178,29 +172,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
        dbs_info->freq_lo = 0;
 
        /* Check for frequency increase */
-       if (load_freq > od_tuners->up_threshold * policy->cur) {
+       if (load > od_tuners->up_threshold) {
                /* If switching to max speed, apply sampling_down_factor */
                if (policy->cur < policy->max)
                        dbs_info->rate_mult =
                                od_tuners->sampling_down_factor;
                dbs_freq_increase(policy, policy->max);
                return;
-       }
-
-       /* Check for frequency decrease */
-       /* if we cannot reduce the frequency anymore, break out early */
-       if (policy->cur == policy->min)
-               return;
-
-       /*
-        * The optimal frequency is the frequency that is the lowest that can
-        * support the current CPU usage without triggering the up policy. To be
-        * safe, we focus 10 points under the threshold.
-        */
-       if (load_freq < od_tuners->adj_up_threshold
-                       * policy->cur) {
+       } else {
+               /* Calculate the next frequency proportional to load */
                unsigned int freq_next;
-               freq_next = load_freq / od_tuners->adj_up_threshold;
+               freq_next = load * policy->cpuinfo.max_freq / 100;
 
                /* No longer fully busy, reset rate_mult */
                dbs_info->rate_mult = 1;
@@ -374,9 +356,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
                        input < MIN_FREQUENCY_UP_THRESHOLD) {
                return -EINVAL;
        }
-       /* Calculate the new adj_up_threshold */
-       od_tuners->adj_up_threshold += input;
-       od_tuners->adj_up_threshold -= od_tuners->up_threshold;
 
        od_tuners->up_threshold = input;
        return count;
@@ -525,8 +504,6 @@ static int od_init(struct dbs_data *dbs_data)
        if (idle_time != -1ULL) {
                /* Idle micro accounting is supported. Use finer thresholds */
                tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
-               tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
-                       MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
                /*
                 * In nohz/micro accounting case we set the minimum frequency
                 * not depending on HZ, but fixed (very low). The deferred
@@ -535,8 +512,6 @@ static int od_init(struct dbs_data *dbs_data)
                dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
        } else {
                tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
-               tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
-                       DEF_FREQUENCY_DOWN_DIFFERENTIAL;
 
                /* For correct statistics, we need 10 ticks for each measure */
                dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
index 553221ec2ad78a1a39ae3b8f7adffbd7a12cc2b0..04570b2d63d15dfc75184d19a7d9c92e16d97228 100644 (file)
@@ -110,7 +110,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
        for (i = 0; i < stat->state_num; i++) {
                len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
                        (unsigned long long)
-                       cputime64_to_clock_t(stat->time_in_state[i]));
+                       jiffies_64_to_clock_t(stat->time_in_state[i]));
        }
        return len;
 }
@@ -285,19 +285,19 @@ put_ref:
 
 static void cpufreq_allstats_free(void)
 {
-       int i;
+       int cpu;
        struct all_cpufreq_stats *all_stat;
 
        sysfs_remove_file(cpufreq_global_kobject,
                                                &_attr_all_time_in_state.attr);
 
-       for (i = 0; i < total_cpus; i++) {
-               all_stat = per_cpu(all_cpufreq_stats, i);
+       for_each_possible_cpu(cpu) {
+               all_stat = per_cpu(all_cpufreq_stats, cpu);
                if (!all_stat)
                        continue;
                kfree(all_stat->time_in_state);
                kfree(all_stat);
-               per_cpu(all_cpufreq_stats, i) = NULL;
+               per_cpu(all_cpufreq_stats, cpu) = NULL;
        }
        if (all_freq_table) {
                kfree(all_freq_table->freq_table);
index 34d19b1984a19b76f92352adbde66e83796730b7..decf84e7194371a420a6aedacc5beb251850ed0d 100644 (file)
@@ -599,6 +599,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
                limits.min_perf_pct = 100;
                limits.min_perf = int_tofp(1);
+               limits.max_policy_pct = 100;
                limits.max_perf_pct = 100;
                limits.max_perf = int_tofp(1);
                limits.no_turbo = 0;
index 3b03329174e78c4c85de7081dc233835b8c7bf6d..42627d357a966ec288ca6a4d46996c9607aba42d 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/rockchip/cpu.h>
 #include <linux/rockchip/dvfs.h>
 #include <asm/smp_plat.h>
-#include <asm/cpu.h>
 #include <asm/unistd.h>
 #include <asm/uaccess.h>
 #include <asm/system_misc.h>
@@ -251,15 +250,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
 
        policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;        // make ondemand default sampling_rate to 40000
 
-       /*
-        * On SMP configuartion, both processors share the voltage
-        * and clock. So both CPUs needs to be scaled together and hence
-        * needs software co-ordination. Use cpufreq affected_cpus
-        * interface to handle this scenario. Additional is_smp() check
-        * is to keep SMP_ON_UP build working.
-        */
-       if (is_smp())
-               cpumask_setall(policy->cpus);
+       cpumask_setall(policy->cpus);
 
        return 0;
 
index 32f480622b9784336e9e281b4bceffccbd847773..3833bd71cc5df4527f44a8ba7a369182060f93a9 100644 (file)
@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx)
 static irqreturn_t cryp_interrupt_handler(int irq, void *param)
 {
        struct cryp_ctx *ctx;
-       int i;
+       int count;
        struct cryp_device_data *device_data;
 
        if (param == NULL) {
@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
        if (cryp_pending_irq_src(device_data,
                                 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
                if (ctx->outlen / ctx->blocksize > 0) {
-                       for (i = 0; i < ctx->blocksize / 4; i++) {
-                               *(ctx->outdata) = readl_relaxed(
-                                               &device_data->base->dout);
-                               ctx->outdata += 4;
-                               ctx->outlen -= 4;
-                       }
+                       count = ctx->blocksize / 4;
+
+                       readsl(&device_data->base->dout, ctx->outdata, count);
+                       ctx->outdata += count;
+                       ctx->outlen -= count;
 
                        if (ctx->outlen == 0) {
                                cryp_disable_irq_src(device_data,
@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
        } else if (cryp_pending_irq_src(device_data,
                                        CRYP_IRQ_SRC_INPUT_FIFO)) {
                if (ctx->datalen / ctx->blocksize > 0) {
-                       for (i = 0 ; i < ctx->blocksize / 4; i++) {
-                               writel_relaxed(ctx->indata,
-                                               &device_data->base->din);
-                               ctx->indata += 4;
-                               ctx->datalen -= 4;
-                       }
+                       count = ctx->blocksize / 4;
+
+                       writesl(&device_data->base->din, ctx->indata, count);
+
+                       ctx->indata += count;
+                       ctx->datalen -= count;
 
                        if (ctx->datalen == 0)
                                cryp_disable_irq_src(device_data,
index 7f3c57113ba11c1d8867a3cdd4f65894c3b37f29..1e08ce765f0c5be605309240189e56466632127b 100644 (file)
@@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
 
        if (apiexcp & UECC_EXCP_DETECTED) {
                cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
-               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
                                     pfn, offset, 0,
                                     csrow, -1, -1,
                                     mci->ctl_name, "");
index 1c4056a5038396e11c10945d695de060d00ad47c..2697deae3ab76f61e9eb9982bff7f1064f37a1c4 100644 (file)
@@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
 static void process_ce_no_info(struct mem_ctl_info *mci)
 {
        edac_dbg(3, "\n");
-       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
+       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
                             "e7xxx CE log register overflow", "");
 }
 
index aa44c1718f50382eac5a87b1c9bef5e33db020e1..71b26513b93bc312ef4132e3abba51367247ae41 100644 (file)
@@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
                                             -1, -1,
                                             "i3000 UE", "");
                } else if (log & I3200_ECCERRLOG_CE) {
-                       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+                       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
                                             0, 0, eccerrlog_syndrome(log),
                                             eccerrlog_row(channel, log),
                                             -1, -1,
-                                            "i3000 UE", "");
+                                            "i3000 CE", "");
                }
        }
 }
index 3e3e431c83011313dadc901afe8347501a23e911..b93b0d006ebb0a0bf994d352b99454fac6ee3df3 100644 (file)
@@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
                                     dimm->location[0], dimm->location[1], -1,
                                     "i82860 UE", "");
        else
-               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
                                     info->eap, 0, info->derrsyn,
                                     dimm->location[0], dimm->location[1], -1,
                                     "i82860 CE", "");
index ac1b43a0428531273c4fdaefd56a0b83f1545ce2..4f73c727a97acd222dbeb1fcab3812092b349b43 100644 (file)
@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
            _IOC_SIZE(cmd) > sizeof(buffer))
                return -ENOTTY;
 
-       if (_IOC_DIR(cmd) == _IOC_READ)
-               memset(&buffer, 0, _IOC_SIZE(cmd));
+       memset(&buffer, 0, sizeof(buffer));
 
        if (_IOC_DIR(cmd) & _IOC_WRITE)
                if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
index f0a43646a2f3f4b36ddd64af33d39920c3cb006a..5abe943e34042df45d8d1f643b0334e6ceb19748 100644 (file)
@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
  */
 static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
 {
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        list_del(&entry->list);
        spin_unlock_irq(&__efivars->lock);
@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
        const struct efivar_operations *ops = __efivars->ops;
        efi_status_t status;
 
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        status = ops->set_variable(entry->var.VariableName,
                                   &entry->var.VendorGuid,
@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
        int strsize1, strsize2;
        bool found = false;
 
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        list_for_each_entry_safe(entry, n, head, list) {
                strsize1 = ucs2_strsize(name, 1024);
@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
        const struct efivar_operations *ops = __efivars->ops;
        efi_status_t status;
 
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        status = ops->get_variable(entry->var.VariableName,
                                   &entry->var.VendorGuid,
index e46ccb9b8064546102c13832763becf2b0b71273..b2358bbc1293a721c47c37981afa54bb0606f409 100644 (file)
@@ -24,8 +24,8 @@ config GATOR_MALI_4XXMP
        bool "Mali-400MP or Mali-450MP"
        select GATOR_WITH_MALI_SUPPORT
 
-config GATOR_MALI_T6XX
-       bool "Mali-T604 or Mali-T658"
+config GATOR_MALI_MIDGARD
+       bool "Mali-T60x, Mali-T62x, Mali-T72x or Mali-T76x"
        select GATOR_WITH_MALI_SUPPORT
 
 endchoice
index 3dc9d059a4b409c30c193b87fd38cb4774e25953..28d2070b11d54304605612c05b1dd198f3afc783 100644 (file)
@@ -7,23 +7,23 @@ CONFIG_GATOR ?= m
 obj-$(CONFIG_GATOR) := gator.o
 
 gator-y :=     gator_main.o \
-               gator_events_irq.o \
-               gator_events_sched.o \
-               gator_events_net.o \
                gator_events_block.o \
+               gator_events_irq.o \
                gator_events_meminfo.o \
-               gator_events_perf_pmu.o \
                gator_events_mmapped.o \
+               gator_events_net.o \
+               gator_events_perf_pmu.o \
+               gator_events_sched.o \
 
 # Convert the old GATOR_WITH_MALI_SUPPORT to the new kernel flags
 ifneq ($(GATOR_WITH_MALI_SUPPORT),)
   CONFIG_GATOR_WITH_MALI_SUPPORT := y
-  ifeq ($(GATOR_WITH_MALI_SUPPORT),MALI_T6xx)
+  ifeq ($(GATOR_WITH_MALI_SUPPORT),MALI_MIDGARD)
     CONFIG_GATOR_MALI_4XXMP := n
-    CONFIG_GATOR_MALI_T6XX := y
+    CONFIG_GATOR_MALI_MIDGARD := y
   else
     CONFIG_GATOR_MALI_4XXMP := y
-    CONFIG_GATOR_MALI_T6XX := n
+    CONFIG_GATOR_MALI_MIDGARD := n
   endif
   EXTRA_CFLAGS += -DMALI_SUPPORT=$(GATOR_WITH_MALI_SUPPORT)
   ifneq ($(GATOR_MALI_INTERFACE_STYLE),)
@@ -32,10 +32,10 @@ ifneq ($(GATOR_WITH_MALI_SUPPORT),)
 endif
 
 ifeq ($(CONFIG_GATOR_WITH_MALI_SUPPORT),y)
-  ifeq ($(CONFIG_GATOR_MALI_T6XX),y)
-    gator-y += gator_events_mali_t6xx.o \
-               gator_events_mali_t6xx_hw.o
-    include $(src)/mali_t6xx.mk
+  ifeq ($(CONFIG_GATOR_MALI_MIDGARD),y)
+    gator-y += gator_events_mali_midgard.o \
+               gator_events_mali_midgard_hw.o
+    include $(src)/mali_midgard.mk
   else
     gator-y += gator_events_mali_4xx.o
   endif
@@ -45,20 +45,23 @@ ifeq ($(CONFIG_GATOR_WITH_MALI_SUPPORT),y)
     ccflags-y += -I$(CONFIG_GATOR_MALI_PATH)
   endif
   ccflags-$(CONFIG_GATOR_MALI_4XXMP) += -DMALI_SUPPORT=MALI_4xx
-  ccflags-$(CONFIG_GATOR_MALI_T6XX) += -DMALI_SUPPORT=MALI_T6xx
+  ccflags-$(CONFIG_GATOR_MALI_MIDGARD) += -DMALI_SUPPORT=MALI_MIDGARD
 endif
 
-# GATOR_TEST controls whether to include (=1) or exclude (=0) test code. 
+# GATOR_TEST controls whether to include (=1) or exclude (=0) test code.
 GATOR_TEST ?= 0
 EXTRA_CFLAGS +=        -DGATOR_TEST=$(GATOR_TEST)
 
+# Should the original or new block_rq_complete API be used?
+OLD_BLOCK_RQ_COMPLETE := $(shell grep -A3 block_rq_complete $(srctree)/include/trace/events/block.h | grep nr_bytes -q; echo $$?)
+EXTRA_CFLAGS += -DOLD_BLOCK_RQ_COMPLETE=$(OLD_BLOCK_RQ_COMPLETE)
+
 gator-$(CONFIG_ARM) += gator_events_armv6.o \
                        gator_events_armv7.o \
-                       gator_events_ccn-504.o \
                        gator_events_l2c-310.o \
                        gator_events_scorpion.o
 
-gator-$(CONFIG_ARM64) +=       gator_events_ccn-504.o
+gator-$(CONFIG_ARM64) +=
 
 else
 
index 586cd9e742fb14bc2a6d2e8f808fe5539ec95919..5cc73a388c4f71a13fd5baecf4a42e68c36f3922 100644 (file)
 #include <linux/mm.h>
 #include <linux/list.h>
 
-#define GATOR_PERF_SUPPORT             LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
-#define GATOR_PERF_PMU_SUPPORT  GATOR_PERF_SUPPORT && defined(CONFIG_PERF_EVENTS) && (!(defined(__arm__) || defined(__aarch64__)) || defined(CONFIG_HW_PERF_EVENTS))
+#define GATOR_PERF_SUPPORT      (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+#define GATOR_PERF_PMU_SUPPORT  (GATOR_PERF_SUPPORT && defined(CONFIG_PERF_EVENTS) && (!(defined(__arm__) || defined(__aarch64__)) || defined(CONFIG_HW_PERF_EVENTS)))
 #define GATOR_NO_PERF_SUPPORT   (!(GATOR_PERF_SUPPORT))
-#define GATOR_CPU_FREQ_SUPPORT  (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)) && defined(CONFIG_CPU_FREQ)
+#define GATOR_CPU_FREQ_SUPPORT  ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)) && defined(CONFIG_CPU_FREQ))
 #define GATOR_IKS_SUPPORT       defined(CONFIG_BL_SWITCHER)
 
-// cpu ids
+/* cpu ids */
 #define ARM1136     0xb36
 #define ARM1156     0xb56
 #define ARM1176     0xb76
@@ -29,7 +29,6 @@
 #define CORTEX_A7   0xc07
 #define CORTEX_A8   0xc08
 #define CORTEX_A9   0xc09
-#define CORTEX_A12  0xc0d
 #define CORTEX_A15  0xc0f
 #define CORTEX_A17  0xc0e
 #define SCORPION    0x00f
 #define AARCH64     0xd0f
 #define OTHER       0xfff
 
+/* gpu enums */
+#define MALI_4xx     1
+#define MALI_MIDGARD 2
+
 #define MAXSIZE_CORE_NAME 32
 
 struct gator_cpu {
        const int cpuid;
-       // Human readable name
+       /* Human readable name */
        const char core_name[MAXSIZE_CORE_NAME];
-       // gatorfs event and Perf PMU name
-       const char * const pmnc_name;
-       // compatible from Documentation/devicetree/bindings/arm/cpus.txt
-       const char * const dt_name;
+       /* gatorfs event and Perf PMU name */
+       const char *const pmnc_name;
+       /* compatible from Documentation/devicetree/bindings/arm/cpus.txt */
+       const char *const dt_name;
        const int pmnc_counters;
 };
 
@@ -82,28 +85,40 @@ int gatorfs_create_ro_ulong(struct super_block *sb, struct dentry *root,
                register_trace_##probe_name(probe_##probe_name)
 #      define GATOR_UNREGISTER_TRACE(probe_name) \
                unregister_trace_##probe_name(probe_##probe_name)
-#else
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
 #      define GATOR_DEFINE_PROBE(probe_name, proto) \
                static void probe_##probe_name(void *data, PARAMS(proto))
 #      define GATOR_REGISTER_TRACE(probe_name) \
                register_trace_##probe_name(probe_##probe_name, NULL)
 #      define GATOR_UNREGISTER_TRACE(probe_name) \
                unregister_trace_##probe_name(probe_##probe_name, NULL)
+#else
+#      define GATOR_DEFINE_PROBE(probe_name, proto) \
+               extern struct tracepoint *gator_tracepoint_##probe_name; \
+               static void probe_##probe_name(void *data, PARAMS(proto))
+#      define GATOR_REGISTER_TRACE(probe_name) \
+               ((gator_tracepoint_##probe_name == NULL) || tracepoint_probe_register(gator_tracepoint_##probe_name, probe_##probe_name, NULL))
+#      define GATOR_UNREGISTER_TRACE(probe_name) \
+               tracepoint_probe_unregister(gator_tracepoint_##probe_name, probe_##probe_name, NULL)
 #endif
 
 /******************************************************************************
  * Events
  ******************************************************************************/
 struct gator_interface {
-       void (*shutdown)(void); // Complementary function to init
+       /* Complementary function to init */
+       void (*shutdown)(void);
        int (*create_files)(struct super_block *sb, struct dentry *root);
        int (*start)(void);
-       void (*stop)(void);             // Complementary function to start
+       /* Complementary function to start */
+       void (*stop)(void);
        int (*online)(int **buffer, bool migrate);
        int (*offline)(int **buffer, bool migrate);
-       void (*online_dispatch)(int cpu, bool migrate); // called in process context but may not be running on core 'cpu'
-       void (*offline_dispatch)(int cpu, bool migrate);        // called in process context but may not be running on core 'cpu'
-       int (*read)(int **buffer);
+       /* called in process context but may not be running on core 'cpu' */
+       void (*online_dispatch)(int cpu, bool migrate);
+       /* called in process context but may not be running on core 'cpu' */
+       void (*offline_dispatch)(int cpu, bool migrate);
+       int (*read)(int **buffer, bool sched_switch);
        int (*read64)(long long **buffer);
        int (*read_proc)(long long **buffer, struct task_struct *);
        struct list_head list;
@@ -115,6 +130,8 @@ u32 gator_cpuid(void);
 
 void gator_backtrace_handler(struct pt_regs *const regs);
 
+void gator_marshal_activity_switch(int core, int key, int activity, int pid);
+
 #if !GATOR_IKS_SUPPORT
 
 #define get_physical_cpu() smp_processor_id()
@@ -132,4 +149,4 @@ int pcpu_to_lcpu(const int pcpu);
 #define get_logical_cpu() smp_processor_id()
 #define on_primary_core() (get_logical_cpu() == 0)
 
-#endif // GATOR_H_
+#endif /* GATOR_H_ */
index 7e2c6e5d871510033cb92c971a9a53235023b601..ff9a3cef7b2e3b8b3bc51d7dc4d5d632e46cf70f 100644 (file)
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/current.h>
 #include <linux/spinlock.h>
 
 static DEFINE_SPINLOCK(annotate_lock);
-static bool collect_annotations = false;
+static bool collect_annotations;
 
 static int annotate_copy(struct file *file, char const __user *buf, size_t count)
 {
@@ -24,10 +24,10 @@ static int annotate_copy(struct file *file, char const __user *buf, size_t count
        int write = per_cpu(gator_buffer_write, cpu)[ANNOTATE_BUF];
 
        if (file == NULL) {
-               // copy from kernel
+               /* copy from kernel */
                memcpy(&per_cpu(gator_buffer, cpu)[ANNOTATE_BUF][write], buf, count);
        } else {
-               // copy from user space
+               /* copy from user space */
                if (copy_from_user(&per_cpu(gator_buffer, cpu)[ANNOTATE_BUF][write], buf, count) != 0)
                        return -1;
        }
@@ -41,70 +41,70 @@ static ssize_t annotate_write(struct file *file, char const __user *buf, size_t
        int pid, cpu, header_size, available, contiguous, length1, length2, size, count = count_orig & 0x7fffffff;
        bool interrupt_context;
 
-       if (*offset) {
+       if (*offset)
                return -EINVAL;
-       }
 
        interrupt_context = in_interrupt();
-       // Annotations are not supported in interrupt context, but may work if you comment out the the next four lines of code.
-       //   By doing so, annotations in interrupt context can result in deadlocks and lost data.
+       /* Annotations are not supported in interrupt context, but may work
+        * if you comment out the the next four lines of code. By doing so,
+        * annotations in interrupt context can result in deadlocks and lost
+        * data.
+        */
        if (interrupt_context) {
-               printk(KERN_WARNING "gator: Annotations are not supported in interrupt context. Edit gator_annotate.c in the gator driver to enable annotations in interrupt context.\n");
+               pr_warning("gator: Annotations are not supported in interrupt context. Edit gator_annotate.c in the gator driver to enable annotations in interrupt context.\n");
                return -EINVAL;
        }
 
  retry:
-       // synchronize between cores and with collect_annotations
+       /* synchronize between cores and with collect_annotations */
        spin_lock(&annotate_lock);
 
        if (!collect_annotations) {
-               // Not collecting annotations, tell the caller everything was written
+               /* Not collecting annotations, tell the caller everything was written */
                size = count_orig;
                goto annotate_write_out;
        }
 
-       // Annotation only uses a single per-cpu buffer as the data must be in order to the engine
+       /* Annotation only uses a single per-cpu buffer as the data must be in order to the engine */
        cpu = 0;
 
-       if (current == NULL) {
+       if (current == NULL)
                pid = 0;
-       } else {
+       else
                pid = current->pid;
-       }
 
-       // determine total size of the payload
+       /* determine total size of the payload */
        header_size = MAXSIZE_PACK32 * 3 + MAXSIZE_PACK64;
        available = buffer_bytes_available(cpu, ANNOTATE_BUF) - header_size;
        size = count < available ? count : available;
 
        if (size <= 0) {
-               // Buffer is full, wait until space is available
+               /* Buffer is full, wait until space is available */
                spin_unlock(&annotate_lock);
 
-               // Drop the annotation as blocking is not allowed in interrupt context
-               if (interrupt_context) {
+               /* Drop the annotation as blocking is not allowed in interrupt context */
+               if (interrupt_context)
                        return -EINVAL;
-               }
 
                wait_event_interruptible(gator_annotate_wait, buffer_bytes_available(cpu, ANNOTATE_BUF) > header_size || !collect_annotations);
 
-               // Check to see if a signal is pending
-               if (signal_pending(current)) {
+               /* Check to see if a signal is pending */
+               if (signal_pending(current))
                        return -EINTR;
-               }
 
                goto retry;
        }
 
-       // synchronize shared variables annotateBuf and annotatePos
+       /* synchronize shared variables annotateBuf and annotatePos */
        if (per_cpu(gator_buffer, cpu)[ANNOTATE_BUF]) {
                u64 time = gator_get_time();
+
                gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, get_physical_cpu());
                gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, pid);
                gator_buffer_write_packed_int64(cpu, ANNOTATE_BUF, time);
                gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, size);
 
-               // determine the sizes to capture, length1 + length2 will equal size
+               /* determine the sizes to capture, length1 + length2 will equal size */
                contiguous = contiguous_space_available(cpu, ANNOTATE_BUF);
                if (size < contiguous) {
                        length1 = size;
@@ -124,14 +124,14 @@ static ssize_t annotate_write(struct file *file, char const __user *buf, size_t
                        goto annotate_write_out;
                }
 
-               // Check and commit; commit is set to occur once buffer is 3/4 full
+               /* Check and commit; commit is set to occur once buffer is 3/4 full */
                buffer_check(cpu, ANNOTATE_BUF, time);
        }
 
 annotate_write_out:
        spin_unlock(&annotate_lock);
 
-       // return the number of bytes written
+       /* return the number of bytes written */
        return size;
 }
 
@@ -141,18 +141,21 @@ static int annotate_release(struct inode *inode, struct file *file)
 {
        int cpu = 0;
 
-       // synchronize between cores
+       /* synchronize between cores */
        spin_lock(&annotate_lock);
 
        if (per_cpu(gator_buffer, cpu)[ANNOTATE_BUF] && buffer_check_space(cpu, ANNOTATE_BUF, MAXSIZE_PACK64 + 3 * MAXSIZE_PACK32)) {
                uint32_t pid = current->pid;
+
                gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, get_physical_cpu());
                gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, pid);
-               gator_buffer_write_packed_int64(cpu, ANNOTATE_BUF, 0);  // time
-               gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, 0);    // size
+               /* time */
+               gator_buffer_write_packed_int64(cpu, ANNOTATE_BUF, 0);
+               /* size */
+               gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, 0);
        }
 
-       // Check and commit; commit is set to occur once buffer is 3/4 full
+       /* Check and commit; commit is set to occur once buffer is 3/4 full */
        buffer_check(cpu, ANNOTATE_BUF, gator_get_time());
 
        spin_unlock(&annotate_lock);
@@ -178,7 +181,7 @@ static int gator_annotate_start(void)
 
 static void gator_annotate_stop(void)
 {
-       // the spinlock here will ensure that when this function exits, we are not in the middle of an annotation
+       /* the spinlock here will ensure that when this function exits, we are not in the middle of an annotation */
        spin_lock(&annotate_lock);
        collect_annotations = false;
        wake_up(&gator_annotate_wait);
index 0108068255297285859854a15bc632579365de15..69471f99e5fb3769957dc2d341a4792cf89c1302 100644 (file)
@@ -19,10 +19,11 @@ static void kannotate_write(const char *ptr, unsigned int size)
        int retval;
        int pos = 0;
        loff_t offset = 0;
+
        while (pos < size) {
                retval = annotate_write(NULL, &ptr[pos], size - pos, &offset);
                if (retval < 0) {
-                       printk(KERN_WARNING "gator: kannotate_write failed with return value %d\n", retval);
+                       pr_warning("gator: kannotate_write failed with return value %d\n", retval);
                        return;
                }
                pos += retval;
@@ -47,6 +48,7 @@ void gator_annotate_channel(int channel, const char *str)
 {
        const u16 str_size = strlen(str) & 0xffff;
        char header[8];
+
        header[0] = ESCAPE_CODE;
        header[1] = STRING_ANNOTATION;
        marshal_u32(header + 2, channel);
@@ -54,20 +56,19 @@ void gator_annotate_channel(int channel, const char *str)
        kannotate_write(header, sizeof(header));
        kannotate_write(str, str_size);
 }
-
 EXPORT_SYMBOL(gator_annotate_channel);
 
 void gator_annotate(const char *str)
 {
        gator_annotate_channel(0, str);
 }
-
 EXPORT_SYMBOL(gator_annotate);
 
 void gator_annotate_channel_color(int channel, int color, const char *str)
 {
        const u16 str_size = (strlen(str) + 4) & 0xffff;
        char header[12];
+
        header[0] = ESCAPE_CODE;
        header[1] = STRING_ANNOTATION;
        marshal_u32(header + 2, channel);
@@ -76,39 +77,37 @@ void gator_annotate_channel_color(int channel, int color, const char *str)
        kannotate_write(header, sizeof(header));
        kannotate_write(str, str_size - 4);
 }
-
 EXPORT_SYMBOL(gator_annotate_channel_color);
 
 void gator_annotate_color(int color, const char *str)
 {
        gator_annotate_channel_color(0, color, str);
 }
-
 EXPORT_SYMBOL(gator_annotate_color);
 
 void gator_annotate_channel_end(int channel)
 {
        char header[8];
+
        header[0] = ESCAPE_CODE;
        header[1] = STRING_ANNOTATION;
        marshal_u32(header + 2, channel);
        marshal_u16(header + 6, 0);
        kannotate_write(header, sizeof(header));
 }
-
 EXPORT_SYMBOL(gator_annotate_channel_end);
 
 void gator_annotate_end(void)
 {
        gator_annotate_channel_end(0);
 }
-
 EXPORT_SYMBOL(gator_annotate_end);
 
-void gator_annotate_name_channel(int channel, int group, const charstr)
+void gator_annotate_name_channel(int channel, int group, const char *str)
 {
        const u16 str_size = strlen(str) & 0xffff;
        char header[12];
+
        header[0] = ESCAPE_CODE;
        header[1] = NAME_CHANNEL_ANNOTATION;
        marshal_u32(header + 2, channel);
@@ -117,13 +116,13 @@ void gator_annotate_name_channel(int channel, int group, const char* str)
        kannotate_write(header, sizeof(header));
        kannotate_write(str, str_size);
 }
-
 EXPORT_SYMBOL(gator_annotate_name_channel);
 
-void gator_annotate_name_group(int group, const charstr)
+void gator_annotate_name_group(int group, const char *str)
 {
        const u16 str_size = strlen(str) & 0xffff;
        char header[8];
+
        header[0] = ESCAPE_CODE;
        header[1] = NAME_GROUP_ANNOTATION;
        marshal_u32(header + 2, group);
@@ -131,7 +130,6 @@ void gator_annotate_name_group(int group, const char* str)
        kannotate_write(header, sizeof(header));
        kannotate_write(str, str_size);
 }
-
 EXPORT_SYMBOL(gator_annotate_name_group);
 
 void gator_annotate_visual(const char *data, unsigned int length, const char *str)
@@ -139,6 +137,7 @@ void gator_annotate_visual(const char *data, unsigned int length, const char *st
        const u16 str_size = strlen(str) & 0xffff;
        char header[4];
        char header_length[4];
+
        header[0] = ESCAPE_CODE;
        header[1] = VISUAL_ANNOTATION;
        marshal_u16(header + 2, str_size);
@@ -148,49 +147,49 @@ void gator_annotate_visual(const char *data, unsigned int length, const char *st
        kannotate_write(header_length, sizeof(header_length));
        kannotate_write(data, length);
 }
-
 EXPORT_SYMBOL(gator_annotate_visual);
 
 void gator_annotate_marker(void)
 {
        char header[4];
+
        header[0] = ESCAPE_CODE;
        header[1] = MARKER_ANNOTATION;
        marshal_u16(header + 2, 0);
        kannotate_write(header, sizeof(header));
 }
-
 EXPORT_SYMBOL(gator_annotate_marker);
 
 void gator_annotate_marker_str(const char *str)
 {
        const u16 str_size = strlen(str) & 0xffff;
        char header[4];
+
        header[0] = ESCAPE_CODE;
        header[1] = MARKER_ANNOTATION;
        marshal_u16(header + 2, str_size);
        kannotate_write(header, sizeof(header));
        kannotate_write(str, str_size);
 }
-
 EXPORT_SYMBOL(gator_annotate_marker_str);
 
 void gator_annotate_marker_color(int color)
 {
        char header[8];
+
        header[0] = ESCAPE_CODE;
        header[1] = MARKER_ANNOTATION;
        marshal_u16(header + 2, 4);
        marshal_u32(header + 4, color);
        kannotate_write(header, sizeof(header));
 }
-
 EXPORT_SYMBOL(gator_annotate_marker_color);
 
 void gator_annotate_marker_color_str(int color, const char *str)
 {
        const u16 str_size = (strlen(str) + 4) & 0xffff;
        char header[8];
+
        header[0] = ESCAPE_CODE;
        header[1] = MARKER_ANNOTATION;
        marshal_u16(header + 2, str_size);
@@ -198,5 +197,4 @@ void gator_annotate_marker_color_str(int color, const char *str)
        kannotate_write(header, sizeof(header));
        kannotate_write(str, str_size - 4);
 }
-
 EXPORT_SYMBOL(gator_annotate_marker_color_str);
index 9f305cf7242c8c2b3a196a13d7e3ad5e6e1811b7..76c941d009a9afdcaf15fa682b3bd40004b39166 100644 (file)
@@ -14,17 +14,17 @@ struct stack_frame_eabi {
        union {
                struct {
                        unsigned long fp;
-                       // May be the fp in the case of a leaf function or clang
+                       /* May be the fp in the case of a leaf function or clang */
                        unsigned long lr;
-                       // If lr is really the fp, lr2 is the corresponding lr
+                       /* If lr is really the fp, lr2 is the corresponding lr */
                        unsigned long lr2;
                };
-               // Used to read 32 bit fp/lr from a 64 bit kernel
+               /* Used to read 32 bit fp/lr from a 64 bit kernel */
                struct {
                        u32 fp_32;
-                       // same as lr above
+                       /* same as lr above */
                        u32 lr_32;
-                       // same as lr2 above
+                       /* same as lr2 above */
                        u32 lr2_32;
                };
        };
@@ -35,9 +35,8 @@ static void gator_add_trace(int cpu, unsigned long address)
        off_t offset = 0;
        unsigned long cookie = get_address_cookie(cpu, current, address & ~1, &offset);
 
-       if (cookie == NO_COOKIE || cookie == UNRESOLVED_COOKIE) {
+       if (cookie == NO_COOKIE || cookie == UNRESOLVED_COOKIE)
                offset = address;
-       }
 
        marshal_backtrace(offset & ~1, cookie, 0);
 }
@@ -54,36 +53,34 @@ static void arm_backtrace_eabi(int cpu, struct pt_regs *const regs, unsigned int
        unsigned long lr = regs->ARM_lr;
        const int gcc_frame_offset = sizeof(unsigned long);
 #else
-       // Is userspace aarch32 (32 bit)
+       /* Is userspace aarch32 (32 bit) */
        const bool is_compat = compat_user_mode(regs);
        unsigned long fp = (is_compat ? regs->regs[11] : regs->regs[29]);
        unsigned long sp = (is_compat ? regs->compat_sp : regs->sp);
        unsigned long lr = (is_compat ? regs->compat_lr : regs->regs[30]);
        const int gcc_frame_offset = (is_compat ? sizeof(u32) : 0);
 #endif
-       // clang frame offset is always zero
+       /* clang frame offset is always zero */
        int is_user_mode = user_mode(regs);
 
-       // pc (current function) has already been added
+       /* pc (current function) has already been added */
 
-       if (!is_user_mode) {
+       if (!is_user_mode)
                return;
-       }
 
-       // Add the lr (parent function)
-       // entry preamble may not have executed
+       /* Add the lr (parent function), entry preamble may not have
+        * executed
+        */
        gator_add_trace(cpu, lr);
 
-       // check fp is valid
-       if (fp == 0 || fp < sp) {
+       /* check fp is valid */
+       if (fp == 0 || fp < sp)
                return;
-       }
 
-       // Get the current stack frame
+       /* Get the current stack frame */
        curr = (struct stack_frame_eabi *)(fp - gcc_frame_offset);
-       if ((unsigned long)curr & 3) {
+       if ((unsigned long)curr & 3)
                return;
-       }
 
        while (depth-- && curr) {
                if (!access_ok(VERIFY_READ, curr, sizeof(struct stack_frame_eabi)) ||
@@ -95,13 +92,15 @@ static void arm_backtrace_eabi(int cpu, struct pt_regs *const regs, unsigned int
                lr = (is_compat ? bufcurr.lr_32 : bufcurr.lr);
 
 #define calc_next(reg) ((reg) - gcc_frame_offset)
-               // Returns true if reg is a valid fp
+               /* Returns true if reg is a valid fp */
 #define validate_next(reg, curr) \
                ((reg) != 0 && (calc_next(reg) & 3) == 0 && (unsigned long)(curr) < calc_next(reg))
 
-               // Try lr from the stack as the fp because gcc leaf functions do not push lr
-               // If gcc_frame_offset is non-zero, the lr will also be the clang fp
-               // This assumes code is at a lower address than the stack
+               /* Try lr from the stack as the fp because gcc leaf functions do
+                * not push lr. If gcc_frame_offset is non-zero, the lr will also
+                * be the clang fp. This assumes code is at a lower address than
+                * the stack
+                */
                if (validate_next(lr, curr)) {
                        fp = lr;
                        lr = (is_compat ? bufcurr.lr2_32 : bufcurr.lr2);
@@ -109,11 +108,10 @@ static void arm_backtrace_eabi(int cpu, struct pt_regs *const regs, unsigned int
 
                gator_add_trace(cpu, lr);
 
-               if (!validate_next(fp, curr)) {
+               if (!validate_next(fp, curr))
                        return;
-               }
 
-               // Move to the next stack frame
+               /* Move to the next stack frame */
                curr = (struct stack_frame_eabi *)calc_next(fp);
        }
 #endif
@@ -129,6 +127,7 @@ static int report_trace(struct stackframe *frame, void *d)
 #if defined(MODULE)
                unsigned int cpu = get_physical_cpu();
                struct module *mod = __module_address(addr);
+
                if (mod) {
                        cookie = get_cookie(cpu, current, mod->name, false);
                        addr = addr - (unsigned long)mod->module_core;
@@ -142,13 +141,13 @@ static int report_trace(struct stackframe *frame, void *d)
 }
 #endif
 
-// Uncomment the following line to enable kernel stack unwinding within gator, note it can also be defined from the Makefile
-// #define GATOR_KERNEL_STACK_UNWINDING
+/* Uncomment the following line to enable kernel stack unwinding within gator, note it can also be defined from the Makefile */
+/* #define GATOR_KERNEL_STACK_UNWINDING */
 
 #if (defined(__arm__) || defined(__aarch64__)) && !defined(GATOR_KERNEL_STACK_UNWINDING)
-// Disabled by default
+/* Disabled by default */
 MODULE_PARM_DESC(kernel_stack_unwinding, "Allow kernel stack unwinding.");
-static bool kernel_stack_unwinding = 0;
+static bool kernel_stack_unwinding;
 module_param(kernel_stack_unwinding, bool, 0644);
 #endif
 
@@ -161,6 +160,7 @@ static void kernel_backtrace(int cpu, struct pt_regs *const regs)
        int depth = (kernel_stack_unwinding ? gator_backtrace_depth : 1);
 #endif
        struct stackframe frame;
+
        if (depth == 0)
                depth = 1;
 #if defined(__arm__)
@@ -178,7 +178,7 @@ static void kernel_backtrace(int cpu, struct pt_regs *const regs)
        marshal_backtrace(PC_REG & ~1, NO_COOKIE, 1);
 #endif
 }
+
 static void gator_add_sample(int cpu, struct pt_regs *const regs, u64 time)
 {
        bool in_kernel;
@@ -196,10 +196,10 @@ static void gator_add_sample(int cpu, struct pt_regs *const regs, u64 time)
        if (in_kernel) {
                kernel_backtrace(cpu, regs);
        } else {
-               // Cookie+PC
+               /* Cookie+PC */
                gator_add_trace(cpu, PC_REG);
 
-               // Backtrace
+               /* Backtrace */
                if (gator_backtrace_depth)
                        arm_backtrace_eabi(cpu, regs, gator_backtrace_depth);
        }
index eba22dfe3bf27e2de154745ae50e5456e918725e..910d5aa1506697d0c22dc0120547b92e6f24a0bd 100644 (file)
 static void marshal_frame(int cpu, int buftype)
 {
        int frame;
+       bool write_cpu;
 
-       if (!per_cpu(gator_buffer, cpu)[buftype]) {
+       if (!per_cpu(gator_buffer, cpu)[buftype])
                return;
-       }
 
        switch (buftype) {
        case SUMMARY_BUF:
+               write_cpu = false;
                frame = FRAME_SUMMARY;
                break;
        case BACKTRACE_BUF:
+               write_cpu = true;
                frame = FRAME_BACKTRACE;
                break;
        case NAME_BUF:
+               write_cpu = true;
                frame = FRAME_NAME;
                break;
        case COUNTER_BUF:
+               write_cpu = false;
                frame = FRAME_COUNTER;
                break;
        case BLOCK_COUNTER_BUF:
+               write_cpu = true;
                frame = FRAME_BLOCK_COUNTER;
                break;
        case ANNOTATE_BUF:
+               write_cpu = false;
                frame = FRAME_ANNOTATE;
                break;
        case SCHED_TRACE_BUF:
+               write_cpu = true;
                frame = FRAME_SCHED_TRACE;
                break;
-       case GPU_TRACE_BUF:
-               frame = FRAME_GPU_TRACE;
-               break;
        case IDLE_BUF:
+               write_cpu = false;
                frame = FRAME_IDLE;
                break;
+       case ACTIVITY_BUF:
+               write_cpu = false;
+               frame = FRAME_ACTIVITY;
+               break;
        default:
+               write_cpu = false;
                frame = -1;
                break;
        }
 
-       // add response type
-       if (gator_response_type > 0) {
+       /* add response type */
+       if (gator_response_type > 0)
                gator_buffer_write_packed_int(cpu, buftype, gator_response_type);
-       }
 
-       // leave space for 4-byte unpacked length
+       /* leave space for 4-byte unpacked length */
        per_cpu(gator_buffer_write, cpu)[buftype] = (per_cpu(gator_buffer_write, cpu)[buftype] + sizeof(s32)) & gator_buffer_mask[buftype];
 
-       // add frame type and core number
+       /* add frame type and core number */
        gator_buffer_write_packed_int(cpu, buftype, frame);
-       gator_buffer_write_packed_int(cpu, buftype, cpu);
+       if (write_cpu)
+               gator_buffer_write_packed_int(cpu, buftype, cpu);
 }
 
 static int buffer_bytes_available(int cpu, int buftype)
@@ -66,19 +76,17 @@ static int buffer_bytes_available(int cpu, int buftype)
        int remaining, filled;
 
        filled = per_cpu(gator_buffer_write, cpu)[buftype] - per_cpu(gator_buffer_read, cpu)[buftype];
-       if (filled < 0) {
+       if (filled < 0)
                filled += gator_buffer_size[buftype];
-       }
 
        remaining = gator_buffer_size[buftype] - filled;
 
-       if (per_cpu(buffer_space_available, cpu)[buftype]) {
-               // Give some extra room; also allows space to insert the overflow error packet
+       if (per_cpu(buffer_space_available, cpu)[buftype])
+               /* Give some extra room; also allows space to insert the overflow error packet */
                remaining -= 200;
-       } else {
-               // Hysteresis, prevents multiple overflow messages
+       else
+               /* Hysteresis, prevents multiple overflow messages */
                remaining -= 2000;
-       }
 
        return remaining;
 }
@@ -87,11 +95,10 @@ static bool buffer_check_space(int cpu, int buftype, int bytes)
 {
        int remaining = buffer_bytes_available(cpu, buftype);
 
-       if (remaining < bytes) {
+       if (remaining < bytes)
                per_cpu(buffer_space_available, cpu)[buftype] = false;
-       } else {
+       else
                per_cpu(buffer_space_available, cpu)[buftype] = true;
-       }
 
        return per_cpu(buffer_space_available, cpu)[buftype];
 }
@@ -100,10 +107,10 @@ static int contiguous_space_available(int cpu, int buftype)
 {
        int remaining = buffer_bytes_available(cpu, buftype);
        int contiguous = gator_buffer_size[buftype] - per_cpu(gator_buffer_write, cpu)[buftype];
+
        if (remaining < contiguous)
                return remaining;
-       else
-               return contiguous;
+       return contiguous;
 }
 
 static void gator_commit_buffer(int cpu, int buftype, u64 time)
@@ -114,41 +121,38 @@ static void gator_commit_buffer(int cpu, int buftype, u64 time)
        if (!per_cpu(gator_buffer, cpu)[buftype])
                return;
 
-       // post-populate the length, which does not include the response type length nor the length itself, i.e. only the length of the payload
+       /* post-populate the length, which does not include the response type length nor the length itself, i.e. only the length of the payload */
        local_irq_save(flags);
        type_length = gator_response_type ? 1 : 0;
        commit = per_cpu(gator_buffer_commit, cpu)[buftype];
        length = per_cpu(gator_buffer_write, cpu)[buftype] - commit;
-       if (length < 0) {
+       if (length < 0)
                length += gator_buffer_size[buftype];
-       }
        length = length - type_length - sizeof(s32);
 
        if (length <= FRAME_HEADER_SIZE) {
-               // Nothing to write, only the frame header is present
+               /* Nothing to write, only the frame header is present */
                local_irq_restore(flags);
                return;
        }
 
-       for (byte = 0; byte < sizeof(s32); byte++) {
+       for (byte = 0; byte < sizeof(s32); byte++)
                per_cpu(gator_buffer, cpu)[buftype][(commit + type_length + byte) & gator_buffer_mask[buftype]] = (length >> byte * 8) & 0xFF;
-       }
 
        per_cpu(gator_buffer_commit, cpu)[buftype] = per_cpu(gator_buffer_write, cpu)[buftype];
 
        if (gator_live_rate > 0) {
-               while (time > per_cpu(gator_buffer_commit_time, cpu)) {
+               while (time > per_cpu(gator_buffer_commit_time, cpu))
                        per_cpu(gator_buffer_commit_time, cpu) += gator_live_rate;
-               }
        }
 
        marshal_frame(cpu, buftype);
        local_irq_restore(flags);
 
-       // had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater
+       /* had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater */
        if (per_cpu(in_scheduler_context, cpu)) {
 #ifndef CONFIG_PREEMPT_RT_FULL
-               // mod_timer can not be used in interrupt context in RT-Preempt full
+               /* mod_timer can not be used in interrupt context in RT-Preempt full */
                mod_timer(&gator_buffer_wake_up_timer, jiffies + 1);
 #endif
        } else {
@@ -159,10 +163,9 @@ static void gator_commit_buffer(int cpu, int buftype, u64 time)
 static void buffer_check(int cpu, int buftype, u64 time)
 {
        int filled = per_cpu(gator_buffer_write, cpu)[buftype] - per_cpu(gator_buffer_commit, cpu)[buftype];
-       if (filled < 0) {
+
+       if (filled < 0)
                filled += gator_buffer_size[buftype];
-       }
-       if (filled >= ((gator_buffer_size[buftype] * 3) / 4)) {
+       if (filled >= ((gator_buffer_size[buftype] * 3) / 4))
                gator_commit_buffer(cpu, buftype, time);
-       }
 }
index b621ba93ee5e452e89bd8ab32a49e11118e0c496..654ec606cfad66d1dd776bd5c44039a856e36f20 100644 (file)
@@ -14,16 +14,17 @@ static void gator_buffer_write_packed_int(int cpu, int buftype, int x)
        char *buffer = per_cpu(gator_buffer, cpu)[buftype];
        int packedBytes = 0;
        int more = true;
+
        while (more) {
-               // low order 7 bits of x
+               /* low order 7 bits of x */
                char b = x & 0x7f;
+
                x >>= 7;
 
-               if ((x == 0 && (b & 0x40) == 0) || (x == -1 && (b & 0x40) != 0)) {
+               if ((x == 0 && (b & 0x40) == 0) || (x == -1 && (b & 0x40) != 0))
                        more = false;
-               } else {
+               else
                        b |= 0x80;
-               }
 
                buffer[(write + packedBytes) & mask] = b;
                packedBytes++;
@@ -39,16 +40,17 @@ static void gator_buffer_write_packed_int64(int cpu, int buftype, long long x)
        char *buffer = per_cpu(gator_buffer, cpu)[buftype];
        int packedBytes = 0;
        int more = true;
+
        while (more) {
-               // low order 7 bits of x
+               /* low order 7 bits of x */
                char b = x & 0x7f;
+
                x >>= 7;
 
-               if ((x == 0 && (b & 0x40) == 0) || (x == -1 && (b & 0x40) != 0)) {
+               if ((x == 0 && (b & 0x40) == 0) || (x == -1 && (b & 0x40) != 0))
                        more = false;
-               } else {
+               else
                        b |= 0x80;
-               }
 
                buffer[(write + packedBytes) & mask] = b;
                packedBytes++;
@@ -75,6 +77,7 @@ static void gator_buffer_write_bytes(int cpu, int buftype, const char *x, int le
 static void gator_buffer_write_string(int cpu, int buftype, const char *x)
 {
        int len = strlen(x);
+
        gator_buffer_write_packed_int(cpu, buftype, len);
        gator_buffer_write_bytes(cpu, buftype, x, len);
 }
index 5c7d842070e0097325cf6b4602dcfb8688e94c50..c43cce81522649c00e61bfbde8bde4da178cfd0e 100644 (file)
@@ -7,8 +7,10 @@
  *
  */
 
-#define COOKIEMAP_ENTRIES      1024    /* must be power of 2 */
-#define TRANSLATE_BUFFER_SIZE 512  // must be a power of 2 - 512/4 = 128 entries
+/* must be power of 2 */
+#define COOKIEMAP_ENTRIES      1024
+/* must be a power of 2 - 512/4 = 128 entries */
+#define TRANSLATE_BUFFER_SIZE 512
 #define TRANSLATE_TEXT_SIZE            256
 #define MAX_COLLISIONS         2
 
@@ -38,6 +40,7 @@ static uint32_t cookiemap_code(uint64_t value64)
 {
        uint32_t value = (uint32_t)((value64 >> 32) + value64);
        uint32_t cookiecode = (value >> 24) & 0xff;
+
        cookiecode = cookiecode * 31 + ((value >> 16) & 0xff);
        cookiecode = cookiecode * 31 + ((value >> 8) & 0xff);
        cookiecode = cookiecode * 31 + ((value >> 0) & 0xff);
@@ -52,9 +55,8 @@ static uint32_t gator_chksum_crc32(const char *data)
        int i, length = strlen(data);
 
        crc = 0xFFFFFFFF;
-       for (i = 0; i < length; i++) {
+       for (i = 0; i < length; i++)
                crc = ((crc >> 8) & 0x00FFFFFF) ^ gator_crc32_table[(crc ^ *block++) & 0xFF];
-       }
 
        return (crc ^ 0xFFFFFFFF);
 }
@@ -72,11 +74,12 @@ static uint32_t cookiemap_exists(uint64_t key)
        uint64_t *keys = &(per_cpu(cookie_keys, cpu)[cookiecode]);
        uint32_t *values = &(per_cpu(cookie_values, cpu)[cookiecode]);
 
-       // Can be called from interrupt handler or from work queue
+       /* Can be called from interrupt handler or from work queue */
        local_irq_save(flags);
        for (x = 0; x < MAX_COLLISIONS; x++) {
                if (keys[x] == key) {
                        uint32_t value = values[x];
+
                        for (; x > 0; x--) {
                                keys[x] = keys[x - 1];
                                values[x] = values[x - 1];
@@ -126,7 +129,7 @@ static void translate_buffer_write_args(int cpu, struct task_struct *task, const
        write = per_cpu(translate_buffer_write, cpu);
        next_write = (write + 1) & translate_buffer_mask;
 
-       // At least one entry must always remain available as when read == write, the queue is empty not full
+       /* At least one entry must always remain available as when read == write, the queue is empty not full */
        if (next_write != per_cpu(translate_buffer_read, cpu)) {
                args = &per_cpu(translate_buffer, cpu)[write];
                args->task = task;
@@ -178,11 +181,11 @@ static void wq_cookie_handler(struct work_struct *unused)
 
 static void app_process_wake_up_handler(unsigned long unused_data)
 {
-       // had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater
+       /* had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater */
        schedule_work(&cookie_work);
 }
 
-// Retrieve full name from proc/pid/cmdline for java processes on Android
+/* Retrieve full name from proc/pid/cmdline for java processes on Android */
 static int translate_app_process(const char **text, int cpu, struct task_struct *task, bool from_wq)
 {
        void *maddr;
@@ -195,12 +198,16 @@ static int translate_app_process(const char **text, int cpu, struct task_struct
        char *buf = per_cpu(translate_text, cpu);
 
 #ifndef CONFIG_PREEMPT_RT_FULL
-       // Push work into a work queue if in atomic context as the kernel functions below might sleep
-       // Rely on the in_interrupt variable rather than in_irq() or in_interrupt() kernel functions, as the value of these functions seems
-       //   inconsistent during a context switch between android/linux versions
+       /* Push work into a work queue if in atomic context as the kernel
+        * functions below might sleep. Rely on the in_interrupt variable
+        * rather than in_irq() or in_interrupt() kernel functions, as the
+        * value of these functions seems inconsistent during a context
+        * switch between android/linux versions
+        */
        if (!from_wq) {
-               // Check if already in buffer
+               /* Check if already in buffer */
                int pos = per_cpu(translate_buffer_read, cpu);
+
                while (pos != per_cpu(translate_buffer_write, cpu)) {
                        if (per_cpu(translate_buffer, cpu)[pos].task == task)
                                goto out;
@@ -209,7 +216,7 @@ static int translate_app_process(const char **text, int cpu, struct task_struct
 
                translate_buffer_write_args(cpu, task, *text);
 
-               // Not safe to call in RT-Preempt full in schedule switch context
+               /* Not safe to call in RT-Preempt full in schedule switch context */
                mod_timer(&app_process_wake_up_timer, jiffies + 1);
                goto out;
        }
@@ -239,7 +246,8 @@ static int translate_app_process(const char **text, int cpu, struct task_struct
 
                copy_from_user_page(page_vma, page, addr, buf, maddr + offset, bytes);
 
-               kunmap(page);   // release page allocated by get_user_pages()
+               /* release page allocated by get_user_pages() */
+               kunmap(page);
                page_cache_release(page);
 
                len -= bytes;
@@ -250,7 +258,7 @@ static int translate_app_process(const char **text, int cpu, struct task_struct
                retval = 1;
        }
 
-       // On app_process startup, /proc/pid/cmdline is initially "zygote" then "<pre-initialized>" but changes after an initial startup period
+       /* On app_process startup, /proc/pid/cmdline is initially "zygote" then "<pre-initialized>" but changes after an initial startup period */
        if (strcmp(*text, "zygote") == 0 || strcmp(*text, "<pre-initialized>") == 0)
                retval = 0;
 
@@ -262,6 +270,8 @@ out:
        return retval;
 }
 
+static const char APP_PROCESS[] = "app_process";
+
 static uint32_t get_cookie(int cpu, struct task_struct *task, const char *text, bool from_wq)
 {
        unsigned long flags, cookie;
@@ -271,16 +281,16 @@ static uint32_t get_cookie(int cpu, struct task_struct *task, const char *text,
        key = (key << 32) | (uint32_t)task->tgid;
 
        cookie = cookiemap_exists(key);
-       if (cookie) {
+       if (cookie)
                return cookie;
-       }
 
-       if (strcmp(text, "app_process") == 0) {
+       /* On 64-bit android app_process can be app_process32 or app_process64 */
+       if (strncmp(text, APP_PROCESS, sizeof(APP_PROCESS) - 1) == 0) {
                if (!translate_app_process(&text, cpu, task, from_wq))
                        return UNRESOLVED_COOKIE;
        }
 
-       // Can be called from interrupt handler or from work queue or from scheduler trace
+       /* Can be called from interrupt handler or from work queue or from scheduler trace */
        local_irq_save(flags);
 
        cookie = UNRESOLVED_COOKIE;
@@ -300,7 +310,7 @@ static int get_exec_cookie(int cpu, struct task_struct *task)
        struct mm_struct *mm = task->mm;
        const char *text;
 
-       // kernel threads have no address space
+       /* kernel threads have no address space */
        if (!mm)
                return NO_COOKIE;
 
@@ -355,7 +365,7 @@ static int cookies_initialize(void)
                per_cpu(cookie_next_key, cpu) = nr_cpu_ids + cpu;
 
                size = COOKIEMAP_ENTRIES * MAX_COLLISIONS * sizeof(uint64_t);
-               per_cpu(cookie_keys, cpu) = (uint64_t *)kmalloc(size, GFP_KERNEL);
+               per_cpu(cookie_keys, cpu) = kmalloc(size, GFP_KERNEL);
                if (!per_cpu(cookie_keys, cpu)) {
                        err = -ENOMEM;
                        goto cookie_setup_error;
@@ -363,14 +373,14 @@ static int cookies_initialize(void)
                memset(per_cpu(cookie_keys, cpu), 0, size);
 
                size = COOKIEMAP_ENTRIES * MAX_COLLISIONS * sizeof(uint32_t);
-               per_cpu(cookie_values, cpu) = (uint32_t *)kmalloc(size, GFP_KERNEL);
+               per_cpu(cookie_values, cpu) = kmalloc(size, GFP_KERNEL);
                if (!per_cpu(cookie_values, cpu)) {
                        err = -ENOMEM;
                        goto cookie_setup_error;
                }
                memset(per_cpu(cookie_values, cpu), 0, size);
 
-               per_cpu(translate_buffer, cpu) = (struct cookie_args *)kmalloc(TRANSLATE_BUFFER_SIZE, GFP_KERNEL);
+               per_cpu(translate_buffer, cpu) = kmalloc(TRANSLATE_BUFFER_SIZE, GFP_KERNEL);
                if (!per_cpu(translate_buffer, cpu)) {
                        err = -ENOMEM;
                        goto cookie_setup_error;
@@ -379,16 +389,16 @@ static int cookies_initialize(void)
                per_cpu(translate_buffer_write, cpu) = 0;
                per_cpu(translate_buffer_read, cpu) = 0;
 
-               per_cpu(translate_text, cpu) = (char *)kmalloc(TRANSLATE_TEXT_SIZE, GFP_KERNEL);
+               per_cpu(translate_text, cpu) = kmalloc(TRANSLATE_TEXT_SIZE, GFP_KERNEL);
                if (!per_cpu(translate_text, cpu)) {
                        err = -ENOMEM;
                        goto cookie_setup_error;
                }
        }
 
-       // build CRC32 table
+       /* build CRC32 table */
        poly = 0x04c11db7;
-       gator_crc32_table = (uint32_t *)kmalloc(256 * sizeof(uint32_t), GFP_KERNEL);
+       gator_crc32_table = kmalloc(256 * sizeof(*gator_crc32_table), GFP_KERNEL);
        if (!gator_crc32_table) {
                err = -ENOMEM;
                goto cookie_setup_error;
@@ -396,11 +406,10 @@ static int cookies_initialize(void)
        for (i = 0; i < 256; i++) {
                crc = i;
                for (j = 8; j > 0; j--) {
-                       if (crc & 1) {
+                       if (crc & 1)
                                crc = (crc >> 1) ^ poly;
-                       } else {
+                       else
                                crc >>= 1;
-                       }
                }
                gator_crc32_table[i] = crc;
        }
index 353645622306048fbcd6f06e6af15da68a1544b0..a157a0013302e8e5aaaf998c074352135a22be4c 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "gator.h"
 
-// gator_events_perf_pmu.c is used if perf is supported
+/* gator_events_perf_pmu.c is used if perf is supported */
 #if GATOR_NO_PERF_SUPPORT
 
 static const char *pmnc_name;
@@ -28,7 +28,7 @@ static const char *pmnc_name;
 #define CCNT 2
 #define CNTMAX (CCNT+1)
 
-static int pmnc_counters = 0;
+static int pmnc_counters;
 static unsigned long pmnc_enabled[CNTMAX];
 static unsigned long pmnc_event[CNTMAX];
 static unsigned long pmnc_key[CNTMAX];
@@ -45,6 +45,7 @@ static inline void armv6_pmnc_write(u32 val)
 static inline u32 armv6_pmnc_read(void)
 {
        u32 val;
+
        asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (val));
        return val;
 }
@@ -52,6 +53,7 @@ static inline u32 armv6_pmnc_read(void)
 static void armv6_pmnc_reset_counter(unsigned int cnt)
 {
        u32 val = 0;
+
        switch (cnt) {
        case CCNT:
                asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r" (val));
@@ -74,20 +76,18 @@ int gator_events_armv6_create_files(struct super_block *sb, struct dentry *root)
 
        for (i = PMN0; i <= CCNT; i++) {
                char buf[40];
-               if (i == CCNT) {
-                       snprintf(buf, sizeof buf, "ARM_%s_ccnt", pmnc_name);
-               } else {
-                       snprintf(buf, sizeof buf, "ARM_%s_cnt%d", pmnc_name, i);
-               }
+
+               if (i == CCNT)
+                       snprintf(buf, sizeof(buf), "ARM_%s_ccnt", pmnc_name);
+               else
+                       snprintf(buf, sizeof(buf), "ARM_%s_cnt%d", pmnc_name, i);
                dir = gatorfs_mkdir(sb, root, buf);
-               if (!dir) {
+               if (!dir)
                        return -1;
-               }
                gatorfs_create_ulong(sb, dir, "enabled", &pmnc_enabled[i]);
                gatorfs_create_ro_ulong(sb, dir, "key", &pmnc_key[i]);
-               if (i != CCNT) {
+               if (i != CCNT)
                        gatorfs_create_ulong(sb, dir, "event", &pmnc_event[i]);
-               }
        }
 
        return 0;
@@ -98,9 +98,8 @@ static int gator_events_armv6_online(int **buffer, bool migrate)
        unsigned int cnt, len = 0, cpu = smp_processor_id();
        u32 pmnc;
 
-       if (armv6_pmnc_read() & PMCR_E) {
+       if (armv6_pmnc_read() & PMCR_E)
                armv6_pmnc_write(armv6_pmnc_read() & ~PMCR_E);
-       }
 
        /* initialize PMNC, reset overflow, D bit, C bit and P bit. */
        armv6_pmnc_write(PMCR_OFL_PMN0 | PMCR_OFL_PMN1 | PMCR_OFL_CCNT |
@@ -115,19 +114,18 @@ static int gator_events_armv6_online(int **buffer, bool migrate)
 
                event = pmnc_event[cnt] & 255;
 
-               // Set event (if destined for PMNx counters)
-               if (cnt == PMN0) {
+               /* Set event (if destined for PMNx counters) */
+               if (cnt == PMN0)
                        pmnc |= event << 20;
-               } else if (cnt == PMN1) {
+               else if (cnt == PMN1)
                        pmnc |= event << 12;
-               }
 
-               // Reset counter
+               /* Reset counter */
                armv6_pmnc_reset_counter(cnt);
        }
        armv6_pmnc_write(pmnc | PMCR_E);
 
-       // return zero values, no need to read as the counters were just reset
+       /* return zero values, no need to read as the counters were just reset */
        for (cnt = PMN0; cnt <= CCNT; cnt++) {
                if (pmnc_enabled[cnt]) {
                        per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
@@ -146,9 +144,8 @@ static int gator_events_armv6_offline(int **buffer, bool migrate)
        unsigned int cnt;
 
        armv6_pmnc_write(armv6_pmnc_read() & ~PMCR_E);
-       for (cnt = PMN0; cnt <= CCNT; cnt++) {
+       for (cnt = PMN0; cnt <= CCNT; cnt++)
                armv6_pmnc_reset_counter(cnt);
-       }
 
        return 0;
 }
@@ -163,19 +160,19 @@ static void gator_events_armv6_stop(void)
        }
 }
 
-static int gator_events_armv6_read(int **buffer)
+static int gator_events_armv6_read(int **buffer, bool sched_switch)
 {
        int cnt, len = 0;
        int cpu = smp_processor_id();
 
-       // a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled
-       if (!(armv6_pmnc_read() & PMCR_E)) {
+       /* a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled */
+       if (!(armv6_pmnc_read() & PMCR_E))
                return 0;
-       }
 
        for (cnt = PMN0; cnt <= CCNT; cnt++) {
                if (pmnc_enabled[cnt]) {
                        u32 value = 0;
+
                        switch (cnt) {
                        case CCNT:
                                asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r" (value));
index 153119b463e654c1cacc53633cfb412b69da9a43..09c94220114ca018f0a42986ba24dce7e579b255 100644 (file)
 
 #include "gator.h"
 
-// gator_events_perf_pmu.c is used if perf is supported
+/* gator_events_perf_pmu.c is used if perf is supported */
 #if GATOR_NO_PERF_SUPPORT
 
-// Per-CPU PMNC: config reg
+/* Per-CPU PMNC: config reg */
 #define PMNC_E         (1 << 0)        /* Enable all counters */
 #define PMNC_P         (1 << 1)        /* Reset all counters */
 #define PMNC_C         (1 << 2)        /* Cycle counter reset */
 #define        PMNC_MASK       0x3f    /* Mask for writable bits */
 
-// ccnt reg
+/* ccnt reg */
 #define CCNT_REG       (1 << 31)
 
-#define CCNT           0
+#define CCNT           0
 #define CNT0           1
-#define CNTMAX                 (6+1)
+#define CNTMAX         (6+1)
 
 static const char *pmnc_name;
 static int pmnc_counters;
@@ -49,6 +49,7 @@ inline void armv7_pmnc_write(u32 val)
 inline u32 armv7_pmnc_read(void)
 {
        u32 val;
+
        asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
        return val;
 }
@@ -61,10 +62,10 @@ inline u32 armv7_ccnt_read(u32 reset_value)
        u32 val;
 
        local_irq_save(flags);
-       asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (den));       // disable
-       asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));        // read
-       asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (newval));    // new value
-       asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (den));       // enable
+       asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (den));       /* disable */
+       asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));        /* read */
+       asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (newval));    /* new value */
+       asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (den));       /* enable */
        local_irq_restore(flags);
 
        return val;
@@ -79,11 +80,11 @@ inline u32 armv7_cntn_read(unsigned int cnt, u32 reset_value)
        u32 oldval;
 
        local_irq_save(flags);
-       asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (den));       // disable
-       asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (sel));       // select
-       asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (oldval));     // read
-       asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (newval));    // new value
-       asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (den));       // enable
+       asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (den));       /* disable */
+       asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (sel));       /* select */
+       asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (oldval));     /* read */
+       asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (newval));    /* new value */
+       asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (den));       /* enable */
        local_irq_restore(flags);
 
        return oldval;
@@ -92,13 +93,15 @@ inline u32 armv7_cntn_read(unsigned int cnt, u32 reset_value)
 static inline void armv7_pmnc_disable_interrupt(unsigned int cnt)
 {
        u32 val = cnt ? (1 << (cnt - CNT0)) : (1 << 31);
+
        asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
 }
 
 inline u32 armv7_pmnc_reset_interrupt(void)
 {
-       // Get and reset overflow status flags
+       /* Get and reset overflow status flags */
        u32 flags;
+
        asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (flags));
        flags &= 0x8000003f;
        asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (flags));
@@ -108,6 +111,7 @@ inline u32 armv7_pmnc_reset_interrupt(void)
 static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
 {
        u32 val = cnt ? (1 << (cnt - CNT0)) : CCNT_REG;
+
        asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
        return cnt;
 }
@@ -115,6 +119,7 @@ static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
 static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
 {
        u32 val = cnt ? (1 << (cnt - CNT0)) : CCNT_REG;
+
        asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
        return cnt;
 }
@@ -122,15 +127,15 @@ static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
 static inline int armv7_pmnc_select_counter(unsigned int cnt)
 {
        u32 val = (cnt - CNT0);
+
        asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
        return cnt;
 }
 
 static inline void armv7_pmnc_write_evtsel(unsigned int cnt, u32 val)
 {
-       if (armv7_pmnc_select_counter(cnt) == cnt) {
+       if (armv7_pmnc_select_counter(cnt) == cnt)
                asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
-       }
 }
 
 static int gator_events_armv7_create_files(struct super_block *sb, struct dentry *root)
@@ -140,20 +145,18 @@ static int gator_events_armv7_create_files(struct super_block *sb, struct dentry
 
        for (i = 0; i < pmnc_counters; i++) {
                char buf[40];
-               if (i == 0) {
-                       snprintf(buf, sizeof buf, "%s_ccnt", pmnc_name);
-               } else {
-                       snprintf(buf, sizeof buf, "%s_cnt%d", pmnc_name, i - 1);
-               }
+
+               if (i == 0)
+                       snprintf(buf, sizeof(buf), "%s_ccnt", pmnc_name);
+               else
+                       snprintf(buf, sizeof(buf), "%s_cnt%d", pmnc_name, i - 1);
                dir = gatorfs_mkdir(sb, root, buf);
-               if (!dir) {
+               if (!dir)
                        return -1;
-               }
                gatorfs_create_ulong(sb, dir, "enabled", &pmnc_enabled[i]);
                gatorfs_create_ro_ulong(sb, dir, "key", &pmnc_key[i]);
-               if (i > 0) {
+               if (i > 0)
                        gatorfs_create_ulong(sb, dir, "event", &pmnc_event[i]);
-               }
        }
 
        return 0;
@@ -163,14 +166,13 @@ static int gator_events_armv7_online(int **buffer, bool migrate)
 {
        unsigned int cnt, len = 0, cpu = smp_processor_id();
 
-       if (armv7_pmnc_read() & PMNC_E) {
+       if (armv7_pmnc_read() & PMNC_E)
                armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
-       }
 
-       // Initialize & Reset PMNC: C bit and P bit
+       /* Initialize & Reset PMNC: C bit and P bit */
        armv7_pmnc_write(PMNC_P | PMNC_C);
 
-       // Reset overflow flags
+       /* Reset overflow flags */
        armv7_pmnc_reset_interrupt();
 
        for (cnt = CCNT; cnt < CNTMAX; cnt++) {
@@ -179,28 +181,28 @@ static int gator_events_armv7_online(int **buffer, bool migrate)
                if (!pmnc_enabled[cnt])
                        continue;
 
-               // Disable counter
+               /* Disable counter */
                armv7_pmnc_disable_counter(cnt);
 
                event = pmnc_event[cnt] & 255;
 
-               // Set event (if destined for PMNx counters), we don't need to set the event if it's a cycle count
+               /* Set event (if destined for PMNx counters), we don't need to set the event if it's a cycle count */
                if (cnt != CCNT)
                        armv7_pmnc_write_evtsel(cnt, event);
 
                armv7_pmnc_disable_interrupt(cnt);
 
-               // Reset counter
+               /* Reset counter */
                cnt ? armv7_cntn_read(cnt, 0) : armv7_ccnt_read(0);
 
-               // Enable counter
+               /* Enable counter */
                armv7_pmnc_enable_counter(cnt);
        }
 
-       // enable
+       /* enable */
        armv7_pmnc_write(armv7_pmnc_read() | PMNC_E);
 
-       // return zero values, no need to read as the counters were just reset
+       /* return zero values, no need to read as the counters were just reset */
        for (cnt = 0; cnt < pmnc_counters; cnt++) {
                if (pmnc_enabled[cnt]) {
                        per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
@@ -216,7 +218,7 @@ static int gator_events_armv7_online(int **buffer, bool migrate)
 
 static int gator_events_armv7_offline(int **buffer, bool migrate)
 {
-       // disable all counters, including PMCCNTR; overflow IRQs will not be signaled
+       /* disable all counters, including PMCCNTR; overflow IRQs will not be signaled */
        armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
 
        return 0;
@@ -232,24 +234,23 @@ static void gator_events_armv7_stop(void)
        }
 }
 
-static int gator_events_armv7_read(int **buffer)
+static int gator_events_armv7_read(int **buffer, bool sched_switch)
 {
        int cnt, len = 0;
        int cpu = smp_processor_id();
 
-       // a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled
-       if (!(armv7_pmnc_read() & PMNC_E)) {
+       /* a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled */
+       if (!(armv7_pmnc_read() & PMNC_E))
                return 0;
-       }
 
        for (cnt = 0; cnt < pmnc_counters; cnt++) {
                if (pmnc_enabled[cnt]) {
                        int value;
-                       if (cnt == CCNT) {
+
+                       if (cnt == CCNT)
                                value = armv7_ccnt_read(0);
-                       } else {
+                       else
                                value = armv7_cntn_read(cnt, 0);
-                       }
                        per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
                        per_cpu(perfCnt, cpu)[len++] = value;
                }
@@ -290,17 +291,16 @@ int gator_events_armv7_init(void)
                pmnc_name = "ARMv7_Cortex_A9";
                pmnc_counters = 6;
                break;
-       // ARM Cortex A12 is not supported by version of Linux before 3.0
        case CORTEX_A15:
                pmnc_name = "ARMv7_Cortex_A15";
                pmnc_counters = 6;
                break;
-       // ARM Cortex A17 is not supported by version of Linux before 3.0
+       /* ARM Cortex A17 is not supported by version of Linux before 3.0 */
        default:
                return -1;
        }
 
-       pmnc_counters++;        // CNT[n] + CCNT
+       pmnc_counters++;        /* CNT[n] + CCNT */
 
        for (cnt = CCNT; cnt < CNTMAX; cnt++) {
                pmnc_enabled[cnt] = 0;
index 0a1dc1aef5233449de2e2e0c68571a1ccd484e43..a352a54afa025d5ab4927fd3936f9807fb815b8e 100644 (file)
@@ -28,26 +28,35 @@ static ulong block_rq_rd_key;
 static atomic_t blockCnt[BLOCK_TOTAL];
 static int blockGet[BLOCK_TOTAL * 4];
 
+/* Tracepoint changed in 3.15 backported to older kernels. The Makefile tries to autodetect the correct value, but if it fails change the #if below */
+#if OLD_BLOCK_RQ_COMPLETE
+GATOR_DEFINE_PROBE(block_rq_complete, TP_PROTO(struct request_queue *q, struct request *rq))
+#else
 GATOR_DEFINE_PROBE(block_rq_complete, TP_PROTO(struct request_queue *q, struct request *rq, unsigned int nr_bytes))
+#endif
 {
        int write;
+       unsigned int size;
 
        if (!rq)
                return;
 
        write = rq->cmd_flags & EVENTWRITE;
+#if OLD_BLOCK_RQ_COMPLETE
+       size = rq->resid_len;
+#else
+       size = nr_bytes;
+#endif
 
-       if (!nr_bytes)
+       if (!size)
                return;
 
        if (write) {
-               if (block_rq_wr_enabled) {
-                       atomic_add(nr_bytes, &blockCnt[BLOCK_RQ_WR]);
-               }
+               if (block_rq_wr_enabled)
+                       atomic_add(size, &blockCnt[BLOCK_RQ_WR]);
        } else {
-               if (block_rq_rd_enabled) {
-                       atomic_add(nr_bytes, &blockCnt[BLOCK_RQ_RD]);
-               }
+               if (block_rq_rd_enabled)
+                       atomic_add(size, &blockCnt[BLOCK_RQ_RD]);
        }
 }
 
@@ -57,17 +66,15 @@ static int gator_events_block_create_files(struct super_block *sb, struct dentry
 
        /* block_complete_wr */
        dir = gatorfs_mkdir(sb, root, "Linux_block_rq_wr");
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
        gatorfs_create_ulong(sb, dir, "enabled", &block_rq_wr_enabled);
        gatorfs_create_ro_ulong(sb, dir, "key", &block_rq_wr_key);
 
        /* block_complete_rd */
        dir = gatorfs_mkdir(sb, root, "Linux_block_rq_rd");
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
        gatorfs_create_ulong(sb, dir, "enabled", &block_rq_rd_enabled);
        gatorfs_create_ro_ulong(sb, dir, "key", &block_rq_rd_key);
 
@@ -76,7 +83,7 @@ static int gator_events_block_create_files(struct super_block *sb, struct dentry
 
 static int gator_events_block_start(void)
 {
-       // register tracepoints
+       /* register tracepoints */
        if (block_rq_wr_enabled || block_rq_rd_enabled)
                if (GATOR_REGISTER_TRACE(block_rq_complete))
                        goto fail_block_rq_exit;
@@ -84,7 +91,7 @@ static int gator_events_block_start(void)
 
        return 0;
 
-       // unregister tracepoints on error
+       /* unregister tracepoints on error */
 fail_block_rq_exit:
        pr_err("gator: block event tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
 
@@ -101,19 +108,19 @@ static void gator_events_block_stop(void)
        block_rq_rd_enabled = 0;
 }
 
-static int gator_events_block_read(int **buffer)
+static int gator_events_block_read(int **buffer, bool sched_switch)
 {
        int len, value, data = 0;
 
-       if (!on_primary_core()) {
+       if (!on_primary_core())
                return 0;
-       }
 
        len = 0;
        if (block_rq_wr_enabled && (value = atomic_read(&blockCnt[BLOCK_RQ_WR])) > 0) {
                atomic_sub(value, &blockCnt[BLOCK_RQ_WR]);
                blockGet[len++] = block_rq_wr_key;
-               blockGet[len++] = 0;    // indicates to Streamline that value bytes were written now, not since the last message
+               /* Indicates to Streamline that value bytes were written now, not since the last message */
+               blockGet[len++] = 0;
                blockGet[len++] = block_rq_wr_key;
                blockGet[len++] = value;
                data += value;
@@ -121,7 +128,8 @@ static int gator_events_block_read(int **buffer)
        if (block_rq_rd_enabled && (value = atomic_read(&blockCnt[BLOCK_RQ_RD])) > 0) {
                atomic_sub(value, &blockCnt[BLOCK_RQ_RD]);
                blockGet[len++] = block_rq_rd_key;
-               blockGet[len++] = 0;    // indicates to Streamline that value bytes were read now, not since the last message
+               /* Indicates to Streamline that value bytes were read now, not since the last message */
+               blockGet[len++] = 0;
                blockGet[len++] = block_rq_rd_key;
                blockGet[len++] = value;
                data += value;
diff --git a/drivers/gator/gator_events_ccn-504.c b/drivers/gator/gator_events_ccn-504.c
deleted file mode 100644 (file)
index 024ffc2..0000000
+++ /dev/null
@@ -1,346 +0,0 @@
-/**
- * Copyright (C) ARM Limited 2013-2014. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/module.h>
-
-#include "gator.h"
-
-#define NUM_REGIONS 256
-#define REGION_SIZE (64*1024)
-#define REGION_DEBUG 1
-#define REGION_XP 64
-#define NUM_XPS 11
-
-// DT (Debug) region
-#define PMEVCNTSR0    0x0150
-#define PMCCNTRSR     0x0190
-#define PMCR          0x01A8
-#define PMSR          0x01B0
-#define PMSR_REQ      0x01B8
-#define PMSR_CLR      0x01C0
-
-// XP region
-#define DT_CONFIG     0x0300
-#define DT_CONTROL    0x0370
-
-// Multiple
-#define PMU_EVENT_SEL 0x0600
-#define OLY_ID        0xFF00
-
-#define CCNT 4
-#define CNTMAX (CCNT + 1)
-
-#define get_pmu_event_id(event) (((event) >> 0) & 0xFF)
-#define get_node_type(event) (((event) >> 8) & 0xFF)
-#define get_region(event) (((event) >> 16) & 0xFF)
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
-
-// From kernel/params.c
-#define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn)              \
-       int param_set_##name(const char *val, struct kernel_param *kp)  \
-       {                                                               \
-               tmptype l;                                              \
-               int ret;                                                \
-                                                                       \
-               if (!val) return -EINVAL;                               \
-               ret = strtolfn(val, 0, &l);                             \
-               if (ret == -EINVAL || ((type)l != l))                   \
-                       return -EINVAL;                                 \
-               *((type *)kp->arg) = l;                                 \
-               return 0;                                               \
-       }                                                               \
-       int param_get_##name(char *buffer, struct kernel_param *kp)     \
-       {                                                               \
-               return sprintf(buffer, format, *((type *)kp->arg));     \
-       }
-
-#else
-
-// From kernel/params.c
-#define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn)              \
-       int param_set_##name(const char *val, const struct kernel_param *kp) \
-       {                                                               \
-               tmptype l;                                              \
-               int ret;                                                \
-                                                                       \
-               ret = strtolfn(val, 0, &l);                             \
-               if (ret < 0 || ((type)l != l))                          \
-                       return ret < 0 ? ret : -EINVAL;                 \
-               *((type *)kp->arg) = l;                                 \
-               return 0;                                               \
-       }                                                               \
-       int param_get_##name(char *buffer, const struct kernel_param *kp) \
-       {                                                               \
-               return scnprintf(buffer, PAGE_SIZE, format,             \
-                               *((type *)kp->arg));                    \
-       }                                                               \
-       struct kernel_param_ops param_ops_##name = {                    \
-               .set = param_set_##name,                                \
-               .get = param_get_##name,                                \
-       };                                                              \
-       EXPORT_SYMBOL(param_set_##name);                                \
-       EXPORT_SYMBOL(param_get_##name);                                \
-       EXPORT_SYMBOL(param_ops_##name)
-
-#endif
-
-STANDARD_PARAM_DEF(u64, u64, "%llu", u64, strict_strtoull);
-
-// From include/linux/moduleparam.h
-#define param_check_u64(name, p) __param_check(name, p, u64)
-
-MODULE_PARM_DESC(ccn504_addr, "CCN-504 physical base address");
-static u64 ccn504_addr = 0;
-module_param(ccn504_addr, u64, 0444);
-
-static void __iomem *gator_events_ccn504_base;
-static bool gator_events_ccn504_global_enabled;
-static unsigned long gator_events_ccn504_enabled[CNTMAX];
-static unsigned long gator_events_ccn504_event[CNTMAX];
-static unsigned long gator_events_ccn504_key[CNTMAX];
-static int gator_events_ccn504_buffer[2*CNTMAX];
-static int gator_events_ccn504_prev[CNTMAX];
-
-static void gator_events_ccn504_create_shutdown(void)
-{
-       if (gator_events_ccn504_base != NULL) {
-               iounmap(gator_events_ccn504_base);
-       }
-}
-
-static int gator_events_ccn504_create_files(struct super_block *sb, struct dentry *root)
-{
-       struct dentry *dir;
-       int i;
-       char buf[32];
-
-       for (i = 0; i < CNTMAX; ++i) {
-               if (i == CCNT) {
-                       snprintf(buf, sizeof(buf), "CCN-504_ccnt");
-               } else {
-                       snprintf(buf, sizeof(buf), "CCN-504_cnt%i", i);
-               }
-               dir = gatorfs_mkdir(sb, root, buf);
-               if (!dir) {
-                       return -1;
-               }
-
-               gatorfs_create_ulong(sb, dir, "enabled", &gator_events_ccn504_enabled[i]);
-               if (i != CCNT) {
-                       gatorfs_create_ulong(sb, dir, "event", &gator_events_ccn504_event[i]);
-               }
-               gatorfs_create_ro_ulong(sb, dir, "key", &gator_events_ccn504_key[i]);
-       }
-
-       return 0;
-}
-
-static void gator_events_ccn504_set_dt_config(int xp_node_id, int event_num, int value)
-{
-       u32 dt_config;
-
-       dt_config = readl(gator_events_ccn504_base + (REGION_XP + xp_node_id)*REGION_SIZE + DT_CONFIG);
-       dt_config |= (value + event_num) << (4*event_num);
-       writel(dt_config, gator_events_ccn504_base + (REGION_XP + xp_node_id)*REGION_SIZE + DT_CONFIG);
-}
-
-static int gator_events_ccn504_start(void)
-{
-       int i;
-
-       gator_events_ccn504_global_enabled = 0;
-       for (i = 0; i < CNTMAX; ++i) {
-               if (gator_events_ccn504_enabled[i]) {
-                       gator_events_ccn504_global_enabled = 1;
-                       break;
-               }
-       }
-
-       if (!gator_events_ccn504_global_enabled) {
-               return 0;
-       }
-
-       memset(&gator_events_ccn504_prev, 0x80, sizeof(gator_events_ccn504_prev));
-
-       // Disable INTREQ on overflow
-       // [6] ovfl_intr_en = 0
-       // perhaps set to 1?
-       // [5] cntr_rst = 0
-       // No register paring
-       // [4:1] cntcfg = 0
-       // Enable PMU features
-       // [0] pmu_en = 1
-       writel(0x1, gator_events_ccn504_base + REGION_DEBUG*REGION_SIZE + PMCR);
-
-       // Configure the XPs
-       for (i = 0; i < NUM_XPS; ++i) {
-               int dt_control;
-
-               // Pass on all events
-               writel(0, gator_events_ccn504_base + (REGION_XP + i)*REGION_SIZE + DT_CONFIG);
-
-               // Enable PMU capability
-               // [0] dt_enable = 1
-               dt_control = readl(gator_events_ccn504_base + (REGION_XP + i)*REGION_SIZE + DT_CONTROL);
-               dt_control |= 0x1;
-               writel(dt_control, gator_events_ccn504_base + (REGION_XP + i)*REGION_SIZE + DT_CONTROL);
-       }
-
-       // Assume no other pmu_event_sel registers are set
-
-       // cycle counter does not need to be enabled
-       for (i = 0; i < CCNT; ++i) {
-               int pmu_event_id;
-               int node_type;
-               int region;
-               u32 pmu_event_sel;
-               u32 oly_id_whole;
-               u32 oly_id;
-               u32 node_id;
-
-               if (!gator_events_ccn504_enabled[i]) {
-                       continue;
-               }
-
-               pmu_event_id = get_pmu_event_id(gator_events_ccn504_event[i]);
-               node_type = get_node_type(gator_events_ccn504_event[i]);
-               region = get_region(gator_events_ccn504_event[i]);
-
-               // Verify the node_type
-               oly_id_whole = readl(gator_events_ccn504_base + region*REGION_SIZE + OLY_ID);
-               oly_id = oly_id_whole & 0x1F;
-               node_id = (oly_id_whole >> 8) & 0x7F;
-               if ((oly_id != node_type) ||
-                               ((node_type == 0x16) && ((oly_id != 0x14) && (oly_id != 0x15) && (oly_id != 0x16) && (oly_id != 0x18) && (oly_id != 0x19) && (oly_id != 0x1A)))) {
-                       printk(KERN_ERR "gator: oly_id is 0x%x expected 0x%x\n", oly_id, node_type);
-                       return -1;
-               }
-
-               // Set the control register
-               pmu_event_sel = readl(gator_events_ccn504_base + region*REGION_SIZE + PMU_EVENT_SEL);
-               switch (node_type) {
-               case 0x08: // XP
-                       pmu_event_sel |= pmu_event_id << (7*i);
-                       gator_events_ccn504_set_dt_config(node_id, i, 0x4);
-                       break;
-               case 0x04: // HN-F
-               case 0x16: // RN-I
-               case 0x10: // SBAS
-                       pmu_event_sel |= pmu_event_id << (4*i);
-                       gator_events_ccn504_set_dt_config(node_id/2, i, (node_id & 1) == 0 ? 0x8 : 0xC);
-                       break;
-               }
-               writel(pmu_event_sel, gator_events_ccn504_base + region*REGION_SIZE + PMU_EVENT_SEL);
-       }
-
-       return 0;
-}
-
-static void gator_events_ccn504_stop(void)
-{
-       int i;
-
-       if (!gator_events_ccn504_global_enabled) {
-               return;
-       }
-
-       // cycle counter does not need to be disabled
-       for (i = 0; i < CCNT; ++i) {
-               int region;
-
-               if (!gator_events_ccn504_enabled[i]) {
-                       continue;
-               }
-
-               region = get_region(gator_events_ccn504_event[i]);
-
-               writel(0, gator_events_ccn504_base + region*REGION_SIZE + PMU_EVENT_SEL);
-       }
-
-       // Clear dt_config
-       for (i = 0; i < NUM_XPS; ++i) {
-               writel(0, gator_events_ccn504_base + (REGION_XP + i)*REGION_SIZE + DT_CONFIG);
-       }
-}
-
-static int gator_events_ccn504_read(int **buffer)
-{
-       int i;
-       int len = 0;
-       int value;
-
-       if (!on_primary_core() || !gator_events_ccn504_global_enabled) {
-               return 0;
-       }
-
-       // Verify the pmsr register is zero
-       while (readl(gator_events_ccn504_base + REGION_DEBUG*REGION_SIZE + PMSR) != 0);
-
-       // Request a PMU snapshot
-       writel(1, gator_events_ccn504_base + REGION_DEBUG*REGION_SIZE + PMSR_REQ);
-
-       // Wait for the snapshot
-       while (readl(gator_events_ccn504_base + REGION_DEBUG*REGION_SIZE + PMSR) == 0);
-
-       // Read the shadow registers
-       for (i = 0; i < CNTMAX; ++i) {
-               if (!gator_events_ccn504_enabled[i]) {
-                       continue;
-               }
-
-               value = readl(gator_events_ccn504_base + REGION_DEBUG*REGION_SIZE + (i == CCNT ? PMCCNTRSR : PMEVCNTSR0 + 8*i));
-               if (gator_events_ccn504_prev[i] != 0x80808080) {
-                       gator_events_ccn504_buffer[len++] = gator_events_ccn504_key[i];
-                       gator_events_ccn504_buffer[len++] = value - gator_events_ccn504_prev[i];
-               }
-               gator_events_ccn504_prev[i] = value;
-
-               // Are the counters registers cleared when read? Is that what the cntr_rst bit on the pmcr register does?
-       }
-
-       // Clear the PMU snapshot status
-       writel(1, gator_events_ccn504_base + REGION_DEBUG*REGION_SIZE + PMSR_CLR);
-
-       if (buffer)
-               *buffer = gator_events_ccn504_buffer;
-
-       return len;
-}
-
-static struct gator_interface gator_events_ccn504_interface = {
-       .shutdown = gator_events_ccn504_create_shutdown,
-       .create_files = gator_events_ccn504_create_files,
-       .start = gator_events_ccn504_start,
-       .stop = gator_events_ccn504_stop,
-       .read = gator_events_ccn504_read,
-};
-
-int gator_events_ccn504_init(void)
-{
-       int i;
-
-       if (ccn504_addr == 0) {
-               return -1;
-       }
-
-       gator_events_ccn504_base = ioremap(ccn504_addr, NUM_REGIONS*REGION_SIZE);
-       if (gator_events_ccn504_base == NULL) {
-               printk(KERN_ERR "gator: ioremap returned NULL\n");
-               return -1;
-       }
-
-       for (i = 0; i < CNTMAX; ++i) {
-               gator_events_ccn504_enabled[i] = 0;
-               gator_events_ccn504_event[i] = 0;
-               gator_events_ccn504_key[i] = gator_events_get_key();
-       }
-
-       return gator_events_install(&gator_events_ccn504_interface);
-}
index facbdd62325eccbc722a82f516ac15652a7ad860..5221aac581b3327aa34fda11f5e2c3bd7b879aed 100644 (file)
@@ -42,17 +42,15 @@ static int gator_events_irq_create_files(struct super_block *sb, struct dentry *
 
        /* irq */
        dir = gatorfs_mkdir(sb, root, "Linux_irq_irq");
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
        gatorfs_create_ulong(sb, dir, "enabled", &hardirq_enabled);
        gatorfs_create_ro_ulong(sb, dir, "key", &hardirq_key);
 
        /* soft irq */
        dir = gatorfs_mkdir(sb, root, "Linux_irq_softirq");
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
        gatorfs_create_ulong(sb, dir, "enabled", &softirq_enabled);
        gatorfs_create_ro_ulong(sb, dir, "key", &softirq_key);
 
@@ -63,7 +61,7 @@ static int gator_events_irq_online(int **buffer, bool migrate)
 {
        int len = 0, cpu = get_physical_cpu();
 
-       // synchronization with the irq_exit functions is not necessary as the values are being reset
+       /* synchronization with the irq_exit functions is not necessary as the values are being reset */
        if (hardirq_enabled) {
                atomic_set(&per_cpu(irqCnt, cpu)[HARDIRQ], 0);
                per_cpu(irqGet, cpu)[len++] = hardirq_key;
@@ -84,7 +82,7 @@ static int gator_events_irq_online(int **buffer, bool migrate)
 
 static int gator_events_irq_start(void)
 {
-       // register tracepoints
+       /* register tracepoints */
        if (hardirq_enabled)
                if (GATOR_REGISTER_TRACE(irq_handler_exit))
                        goto fail_hardirq_exit;
@@ -95,7 +93,7 @@ static int gator_events_irq_start(void)
 
        return 0;
 
-       // unregister tracepoints on error
+       /* unregister tracepoints on error */
 fail_softirq_exit:
        if (hardirq_enabled)
                GATOR_UNREGISTER_TRACE(irq_handler_exit);
@@ -117,7 +115,7 @@ static void gator_events_irq_stop(void)
        softirq_enabled = 0;
 }
 
-static int gator_events_irq_read(int **buffer)
+static int gator_events_irq_read(int **buffer, bool sched_switch)
 {
        int len, value;
        int cpu = get_physical_cpu();
index 553f9707bdbf6161b3b0d850c6e41556d8e7a1e8..73aaac32327e8ea1582557e6839f718c8da5d94c 100644 (file)
@@ -91,7 +91,7 @@ static void gator_events_l2c310_stop(void)
        writel(0, l2c310_base + L2X0_EVENT_CNT_CTRL);
 }
 
-static int gator_events_l2c310_read(int **buffer)
+static int gator_events_l2c310_read(int **buffer, bool sched_switch)
 {
        static const unsigned long l2x0_event_cntx_val[L2C310_COUNTERS_NUM] = {
                L2X0_EVENT_CNT0_VAL,
@@ -149,8 +149,8 @@ static void __iomem *gator_events_l2c310_probe(void)
                0xa0412000,
 #endif
 #if defined(CONFIG_ARCH_VEXPRESS)
-               0x1e00a000, // A9x4 core tile (HBI-0191)
-               0x2c0f0000, // New memory map tiles
+               0x1e00a000, /* A9x4 core tile (HBI-0191) */
+               0x2c0f0000, /* New memory map tiles */
 #endif
        };
        int i;
index 85d47645a9d9e0333ab245ee246da3ce2c0cb901..9cf43fe2c29b3565d93096a58cf8ad73b3c2eccd 100644 (file)
 #include "gator_events_mali_4xx.h"
 
 /*
- * There are (currently) four different variants of the comms between gator and Mali:
- * 1 (deprecated): No software counter support
- * 2 (deprecated): Tracepoint called for each separate s/w counter value as it appears
- * 3 (default): Single tracepoint for all s/w counters in a bundle.
- * Interface style 3 is the default if no other is specified.  1 and 2 will be eliminated when
- * existing Mali DDKs are upgraded.
- * 4. As above, but for the Utgard (Mali-450) driver.
- */
+* There have been four different variants of the comms between gator and Mali depending on driver version:
+* # | DDK vsn range             | Support                                                             | Notes
+*
+* 1 | (obsolete)                | No software counter support                                         | Obsolete patches
+* 2 | (obsolete)                | Tracepoint called for each separate s/w counter value as it appears | Obsolete patches
+* 3 | r3p0-04rel0 - r3p2-01rel2 | Single tracepoint for all s/w counters in a bundle.                 |
+* 4 | r3p2-01rel3 - date        | As above but with extensions for MP devices (Mali-450)              | At least r4p0-00rel1
+*/
 
 #if !defined(GATOR_MALI_INTERFACE_STYLE)
-#define GATOR_MALI_INTERFACE_STYLE (3)
+#define GATOR_MALI_INTERFACE_STYLE (4)
+#endif
+
+#if GATOR_MALI_INTERFACE_STYLE == 1
+#error GATOR_MALI_INTERFACE_STYLE 1 is obsolete
+#elif GATOR_MALI_INTERFACE_STYLE == 2
+#error GATOR_MALI_INTERFACE_STYLE 2 is obsolete
+#elif GATOR_MALI_INTERFACE_STYLE >= 3
+/* Valid GATOR_MALI_INTERFACE_STYLE */
+#else
+#error Unknown GATOR_MALI_INTERFACE_STYLE option.
 #endif
 
 #if GATOR_MALI_INTERFACE_STYLE < 4
@@ -44,6 +54,8 @@
 #error MALI_SUPPORT set to an invalid device code: expecting MALI_4xx
 #endif
 
+static const char mali_name[] = "4xx";
+
 /* gatorfs variables for counter enable state,
  * the event the counter should count and the
  * 'key' (a unique id set by gatord and returned
@@ -61,8 +73,9 @@ static u32 *counter_address[NUMBER_OF_EVENTS];
 /* An array used to return the data we recorded
  * as key,value pairs hence the *2
  */
-static unsigned long counter_dump[NUMBER_OF_EVENTS * 2];
-static unsigned long counter_prev[NUMBER_OF_EVENTS];
+static int counter_dump[NUMBER_OF_EVENTS * 2];
+static int counter_prev[NUMBER_OF_EVENTS];
+static bool prev_set[NUMBER_OF_EVENTS];
 
 /* Note whether tracepoints have been registered */
 static int trace_registered;
@@ -76,18 +89,11 @@ static unsigned int n_vp_cores = MAX_NUM_VP_CORES;
 static unsigned int n_l2_cores = MAX_NUM_L2_CACHE_CORES;
 static unsigned int n_fp_cores = MAX_NUM_FP_CORES;
 
-/**
- * Calculate the difference and handle the overflow.
- */
-static u32 get_difference(u32 start, u32 end)
-{
-       if (start - end >= 0) {
-               return start - end;
-       }
-
-       // Mali counters are unsigned 32 bit values that wrap.
-       return (4294967295u - end) + start;
-}
+extern struct mali_counter mali_activity[2];
+static const char *const mali_activity_names[] = {
+       "fragment",
+       "vertex",
+};
 
 /**
  * Returns non-zero if the given counter ID is an activity counter.
@@ -106,94 +112,23 @@ static inline int is_hw_counter(unsigned int event_id)
        return (event_id >= FIRST_HW_COUNTER && event_id <= LAST_HW_COUNTER);
 }
 
-/*
- * These are provided for utgard compatibility.
- */
-typedef void _mali_profiling_get_mali_version_type(struct _mali_profiling_mali_version *values);
-typedef u32 _mali_profiling_get_l2_counters_type(_mali_profiling_l2_counter_values *values);
-
-#if GATOR_MALI_INTERFACE_STYLE == 2
-/**
- * Returns non-zero if the given counter ID is a software counter.
- */
-static inline int is_sw_counter(unsigned int event_id)
-{
-       return (event_id >= FIRST_SW_COUNTER && event_id <= LAST_SW_COUNTER);
-}
-#endif
-
-#if GATOR_MALI_INTERFACE_STYLE == 2
-/*
- * The Mali DDK uses s64 types to contain software counter values, but gator
- * can only use a maximum of 32 bits. This function scales a software counter
- * to an appropriate range.
- */
-static u32 scale_sw_counter_value(unsigned int event_id, signed long long value)
-{
-       u32 scaled_value;
-
-       switch (event_id) {
-       case COUNTER_GLES_UPLOAD_TEXTURE_TIME:
-       case COUNTER_GLES_UPLOAD_VBO_TIME:
-               scaled_value = (u32)div_s64(value, 1000000);
-               break;
-       default:
-               scaled_value = (u32)value;
-               break;
-       }
-
-       return scaled_value;
-}
-#endif
-
-/* Probe for continuously sampled counter */
-#if 0                          //WE_DONT_CURRENTLY_USE_THIS_SO_SUPPRESS_WARNING
-GATOR_DEFINE_PROBE(mali_sample_address, TP_PROTO(unsigned int event_id, u32 *addr))
-{
-       /* Turning on too many pr_debug statements in frequently called functions
-        * can cause stability and/or performance problems
-        */
-       //pr_debug("gator: mali_sample_address %d %d\n", event_id, addr);
-       if (event_id >= ACTIVITY_VP && event_id <= COUNTER_FP3_C1) {
-               counter_address[event_id] = addr;
-       }
-}
-#endif
-
 /* Probe for hardware counter events */
 GATOR_DEFINE_PROBE(mali_hw_counter, TP_PROTO(unsigned int event_id, unsigned int value))
 {
-       /* Turning on too many pr_debug statements in frequently called functions
-        * can cause stability and/or performance problems
-        */
-       //pr_debug("gator: mali_hw_counter %d %d\n", event_id, value);
-       if (is_hw_counter(event_id)) {
+       if (is_hw_counter(event_id))
                counter_data[event_id] = value;
-       }
 }
 
-#if GATOR_MALI_INTERFACE_STYLE == 2
-GATOR_DEFINE_PROBE(mali_sw_counter, TP_PROTO(unsigned int event_id, signed long long value))
-{
-       if (is_sw_counter(event_id)) {
-               counter_data[event_id] = scale_sw_counter_value(event_id, value);
-       }
-}
-#endif /* GATOR_MALI_INTERFACE_STYLE == 2 */
-
-#if GATOR_MALI_INTERFACE_STYLE >= 3
 GATOR_DEFINE_PROBE(mali_sw_counters, TP_PROTO(pid_t pid, pid_t tid, void *surface_id, unsigned int *counters))
 {
        u32 i;
 
        /* Copy over the values for those counters which are enabled. */
        for (i = FIRST_SW_COUNTER; i <= LAST_SW_COUNTER; i++) {
-               if (counter_enabled[i]) {
+               if (counter_enabled[i])
                        counter_data[i] = (u32)(counters[i - FIRST_SW_COUNTER]);
-               }
        }
 }
-#endif /* GATOR_MALI_INTERFACE_STYLE >= 3 */
 
 /**
  * Create a single filesystem entry for a specified event.
@@ -211,13 +146,11 @@ static int create_fs_entry(struct super_block *sb, struct dentry *root, const ch
 
        dir = gatorfs_mkdir(sb, root, name);
 
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
 
-       if (create_event_item) {
+       if (create_event_item)
                gatorfs_create_ulong(sb, dir, "event", &counter_event[event]);
-       }
 
        gatorfs_create_ulong(sb, dir, "enabled", &counter_enabled[event]);
        gatorfs_create_ro_ulong(sb, dir, "key", &counter_key[event]);
@@ -231,7 +164,7 @@ static int create_fs_entry(struct super_block *sb, struct dentry *root, const ch
  */
 static void initialise_version_info(void)
 {
-       _mali_profiling_get_mali_version_type *mali_profiling_get_mali_version_symbol;
+       void (*mali_profiling_get_mali_version_symbol)(struct _mali_profiling_mali_version *values);
 
        mali_profiling_get_mali_version_symbol = symbol_get(_mali_profiling_get_mali_version);
 
@@ -253,7 +186,8 @@ static void initialise_version_info(void)
                /* Release the function - we're done with it. */
                symbol_put(_mali_profiling_get_mali_version);
        } else {
-               printk("gator: mali online _mali_profiling_get_mali_version symbol not found\n");
+               pr_err("gator: mali online _mali_profiling_get_mali_version symbol not found\n");
+               pr_err("gator:  check your Mali DDK version versus the GATOR_MALI_INTERFACE_STYLE setting\n");
        }
 }
 #endif
@@ -261,7 +195,6 @@ static void initialise_version_info(void)
 static int create_files(struct super_block *sb, struct dentry *root)
 {
        int event;
-       const char *mali_name = gator_mali_get_mali_name();
 
        char buf[40];
        int core_id;
@@ -278,21 +211,27 @@ static int create_files(struct super_block *sb, struct dentry *root)
        initialise_version_info();
 #endif
 
+       mali_activity[0].cores = n_fp_cores;
+       mali_activity[1].cores = n_vp_cores;
+       for (event = 0; event < ARRAY_SIZE(mali_activity); event++) {
+               if (gator_mali_create_file_system(mali_name, mali_activity_names[event], sb, root, &mali_activity[event], NULL) != 0)
+                       return -1;
+       }
+
        /* Vertex processor counters */
        for (core_id = 0; core_id < n_vp_cores; core_id++) {
                int activity_counter_id = ACTIVITY_VP_0;
-               snprintf(buf, sizeof buf, "ARM_%s_VP_%d_active", mali_name, core_id);
-               if (create_fs_entry(sb, root, buf, activity_counter_id, 0) != 0) {
+
+               snprintf(buf, sizeof(buf), "ARM_Mali-%s_VP_%d_active", mali_name, core_id);
+               if (create_fs_entry(sb, root, buf, activity_counter_id, 0) != 0)
                        return -1;
-               }
 
                for (counter_number = 0; counter_number < 2; counter_number++) {
                        int counter_id = COUNTER_VP_0_C0 + (2 * core_id) + counter_number;
 
-                       snprintf(buf, sizeof buf, "ARM_%s_VP_%d_cnt%d", mali_name, core_id, counter_number);
-                       if (create_fs_entry(sb, root, buf, counter_id, 1) != 0) {
+                       snprintf(buf, sizeof(buf), "ARM_Mali-%s_VP_%d_cnt%d", mali_name, core_id, counter_number);
+                       if (create_fs_entry(sb, root, buf, counter_id, 1) != 0)
                                return -1;
-                       }
                }
        }
 
@@ -300,18 +239,16 @@ static int create_files(struct super_block *sb, struct dentry *root)
        for (core_id = 0; core_id < n_fp_cores; core_id++) {
                int activity_counter_id = ACTIVITY_FP_0 + core_id;
 
-               snprintf(buf, sizeof buf, "ARM_%s_FP_%d_active", mali_name, core_id);
-               if (create_fs_entry(sb, root, buf, activity_counter_id, 0) != 0) {
+               snprintf(buf, sizeof(buf), "ARM_Mali-%s_FP_%d_active", mali_name, core_id);
+               if (create_fs_entry(sb, root, buf, activity_counter_id, 0) != 0)
                        return -1;
-               }
 
                for (counter_number = 0; counter_number < 2; counter_number++) {
                        int counter_id = COUNTER_FP_0_C0 + (2 * core_id) + counter_number;
 
-                       snprintf(buf, sizeof buf, "ARM_%s_FP_%d_cnt%d", mali_name, core_id, counter_number);
-                       if (create_fs_entry(sb, root, buf, counter_id, 1) != 0) {
+                       snprintf(buf, sizeof(buf), "ARM_Mali-%s_FP_%d_cnt%d", mali_name, core_id, counter_number);
+                       if (create_fs_entry(sb, root, buf, counter_id, 1) != 0)
                                return -1;
-                       }
                }
        }
 
@@ -320,38 +257,33 @@ static int create_files(struct super_block *sb, struct dentry *root)
                for (counter_number = 0; counter_number < 2; counter_number++) {
                        int counter_id = COUNTER_L2_0_C0 + (2 * core_id) + counter_number;
 
-                       snprintf(buf, sizeof buf, "ARM_%s_L2_%d_cnt%d", mali_name, core_id, counter_number);
-                       if (create_fs_entry(sb, root, buf, counter_id, 1) != 0) {
+                       snprintf(buf, sizeof(buf), "ARM_Mali-%s_L2_%d_cnt%d", mali_name, core_id, counter_number);
+                       if (create_fs_entry(sb, root, buf, counter_id, 1) != 0)
                                return -1;
-                       }
                }
        }
 
        /* Now set up the software counter entries */
        for (event = FIRST_SW_COUNTER; event <= LAST_SW_COUNTER; event++) {
-               snprintf(buf, sizeof(buf), "ARM_%s_SW_%d", mali_name, event - FIRST_SW_COUNTER);
+               snprintf(buf, sizeof(buf), "ARM_Mali-%s_SW_%d", mali_name, event - FIRST_SW_COUNTER);
 
-               if (create_fs_entry(sb, root, buf, event, 0) != 0) {
+               if (create_fs_entry(sb, root, buf, event, 0) != 0)
                        return -1;
-               }
        }
 
        /* Now set up the special counter entries */
-       snprintf(buf, sizeof(buf), "ARM_%s_Filmstrip_cnt0", mali_name);
-       if (create_fs_entry(sb, root, buf, COUNTER_FILMSTRIP, 1) != 0) {
+       snprintf(buf, sizeof(buf), "ARM_Mali-%s_Filmstrip_cnt0", mali_name);
+       if (create_fs_entry(sb, root, buf, COUNTER_FILMSTRIP, 1) != 0)
                return -1;
-       }
 
 #ifdef DVFS_REPORTED_BY_DDK
-       snprintf(buf, sizeof(buf), "ARM_%s_Frequency", mali_name);
-       if (create_fs_entry(sb, root, buf, COUNTER_FREQUENCY, 1) != 0) {
+       snprintf(buf, sizeof(buf), "ARM_Mali-%s_Frequency", mali_name);
+       if (create_fs_entry(sb, root, buf, COUNTER_FREQUENCY, 1) != 0)
                return -1;
-       }
 
-       snprintf(buf, sizeof(buf), "ARM_%s_Voltage", mali_name);
-       if (create_fs_entry(sb, root, buf, COUNTER_VOLTAGE, 1) != 0) {
+       snprintf(buf, sizeof(buf), "ARM_Mali-%s_Voltage", mali_name);
+       if (create_fs_entry(sb, root, buf, COUNTER_VOLTAGE, 1) != 0)
                return -1;
-       }
 #endif
 
        return 0;
@@ -361,8 +293,8 @@ static int create_files(struct super_block *sb, struct dentry *root)
  * Local store for the get_counters entry point into the DDK.
  * This is stored here since it is used very regularly.
  */
-static mali_profiling_get_counters_type *mali_get_counters = NULL;
-static _mali_profiling_get_l2_counters_type *mali_get_l2_counters = NULL;
+static void (*mali_get_counters)(unsigned int *, unsigned int *, unsigned int *, unsigned int *);
+static u32 (*mali_get_l2_counters)(struct _mali_profiling_l2_counter_values *values);
 
 /*
  * Examine list of counters between two index limits and determine if any one is enabled.
@@ -373,9 +305,8 @@ static int is_any_counter_enabled(unsigned int first_counter, unsigned int last_
        unsigned int i;
 
        for (i = first_counter; i <= last_counter; i++) {
-               if (counter_enabled[i]) {
+               if (counter_enabled[i])
                        return 1;       /* At least one counter is enabled */
-               }
        }
 
        return 0;               /* No s/w counters enabled */
@@ -397,23 +328,21 @@ static void init_counters(unsigned int from_counter, unsigned int to_counter)
                pr_debug("gator: mali online _mali_profiling_set_event symbol @ %p\n", mali_set_hw_event);
 
                for (counter_id = from_counter; counter_id <= to_counter; counter_id++) {
-                       if (counter_enabled[counter_id]) {
+                       if (counter_enabled[counter_id])
                                mali_set_hw_event(counter_id, counter_event[counter_id]);
-                       } else {
+                       else
                                mali_set_hw_event(counter_id, 0xFFFFFFFF);
-                       }
                }
 
                symbol_put(_mali_profiling_set_event);
        } else {
-               printk("gator: mali online _mali_profiling_set_event symbol not found\n");
+               pr_err("gator: mali online _mali_profiling_set_event symbol not found\n");
        }
 }
 
 static void mali_counter_initialize(void)
 {
        int i;
-       int core_id;
 
        mali_profiling_control_type *mali_control;
 
@@ -439,39 +368,30 @@ static void mali_counter_initialize(void)
 
                symbol_put(_mali_profiling_control);
        } else {
-               printk("gator: mali online _mali_profiling_control symbol not found\n");
+               pr_err("gator: mali online _mali_profiling_control symbol not found\n");
        }
 
        mali_get_counters = symbol_get(_mali_profiling_get_counters);
-       if (mali_get_counters) {
+       if (mali_get_counters)
                pr_debug("gator: mali online _mali_profiling_get_counters symbol @ %p\n", mali_get_counters);
-
-       } else {
-               pr_debug("gator WARNING: mali _mali_profiling_get_counters symbol not defined");
-       }
+       else
+               pr_debug("gator WARNING: mali _mali_profiling_get_counters symbol not defined\n");
 
        mali_get_l2_counters = symbol_get(_mali_profiling_get_l2_counters);
-       if (mali_get_l2_counters) {
+       if (mali_get_l2_counters)
                pr_debug("gator: mali online _mali_profiling_get_l2_counters symbol @ %p\n", mali_get_l2_counters);
-
-       } else {
-               pr_debug("gator WARNING: mali _mali_profiling_get_l2_counters symbol not defined");
-       }
+       else
+               pr_debug("gator WARNING: mali _mali_profiling_get_l2_counters symbol not defined\n");
 
        if (!mali_get_counters && !mali_get_l2_counters) {
-               pr_debug("gator: WARNING: no L2 counters available");
+               pr_debug("gator: WARNING: no L2 counters available\n");
                n_l2_cores = 0;
        }
 
-       for (core_id = 0; core_id < n_l2_cores; core_id++) {
-               int counter_id = COUNTER_L2_0_C0 + (2 * core_id);
-               counter_prev[counter_id] = 0;
-               counter_prev[counter_id + 1] = 0;
-       }
-
        /* Clear counters in the start */
        for (i = 0; i < NUMBER_OF_EVENTS; i++) {
                counter_data[i] = 0;
+               prev_set[i] = false;
        }
 }
 
@@ -486,13 +406,12 @@ static void mali_counter_deinitialize(void)
                int i;
 
                pr_debug("gator: mali offline _mali_profiling_set_event symbol @ %p\n", mali_set_hw_event);
-               for (i = FIRST_HW_COUNTER; i <= LAST_HW_COUNTER; i++) {
+               for (i = FIRST_HW_COUNTER; i <= LAST_HW_COUNTER; i++)
                        mali_set_hw_event(i, 0xFFFFFFFF);
-               }
 
                symbol_put(_mali_profiling_set_event);
        } else {
-               printk("gator: mali offline _mali_profiling_set_event symbol not found\n");
+               pr_err("gator: mali offline _mali_profiling_set_event symbol not found\n");
        }
 
        /* Generic control interface for Mali DDK. */
@@ -508,43 +427,29 @@ static void mali_counter_deinitialize(void)
 
                symbol_put(_mali_profiling_control);
        } else {
-               printk("gator: mali offline _mali_profiling_control symbol not found\n");
+               pr_err("gator: mali offline _mali_profiling_control symbol not found\n");
        }
 
-       if (mali_get_counters) {
+       if (mali_get_counters)
                symbol_put(_mali_profiling_get_counters);
-       }
 
-       if (mali_get_l2_counters) {
+       if (mali_get_l2_counters)
                symbol_put(_mali_profiling_get_l2_counters);
-       }
 }
 
 static int start(void)
 {
-       // register tracepoints
+       /* register tracepoints */
        if (GATOR_REGISTER_TRACE(mali_hw_counter)) {
-               printk("gator: mali_hw_counter tracepoint failed to activate\n");
+               pr_err("gator: mali_hw_counter tracepoint failed to activate\n");
                return -1;
        }
 
-#if GATOR_MALI_INTERFACE_STYLE == 1
-       /* None. */
-#elif GATOR_MALI_INTERFACE_STYLE == 2
-       /* For patched Mali driver. */
-       if (GATOR_REGISTER_TRACE(mali_sw_counter)) {
-               printk("gator: mali_sw_counter tracepoint failed to activate\n");
-               return -1;
-       }
-#elif GATOR_MALI_INTERFACE_STYLE >= 3
        /* For Mali drivers with built-in support. */
        if (GATOR_REGISTER_TRACE(mali_sw_counters)) {
-               printk("gator: mali_sw_counters tracepoint failed to activate\n");
+               pr_err("gator: mali_sw_counters tracepoint failed to activate\n");
                return -1;
        }
-#else
-#error Unknown GATOR_MALI_INTERFACE_STYLE option.
-#endif
 
        trace_registered = 1;
 
@@ -561,17 +466,8 @@ static void stop(void)
        if (trace_registered) {
                GATOR_UNREGISTER_TRACE(mali_hw_counter);
 
-#if GATOR_MALI_INTERFACE_STYLE == 1
-               /* None. */
-#elif GATOR_MALI_INTERFACE_STYLE == 2
-               /* For patched Mali driver. */
-               GATOR_UNREGISTER_TRACE(mali_sw_counter);
-#elif GATOR_MALI_INTERFACE_STYLE >= 3
                /* For Mali drivers with built-in support. */
                GATOR_UNREGISTER_TRACE(mali_sw_counters);
-#else
-#error Unknown GATOR_MALI_INTERFACE_STYLE option.
-#endif
 
                pr_debug("gator: mali timeline tracepoint deactivated\n");
 
@@ -601,17 +497,17 @@ static void dump_counters(unsigned int from_counter, unsigned int to_counter, un
        }
 }
 
-static int read(int **buffer)
+static int read(int **buffer, bool sched_switch)
 {
        int len = 0;
 
        if (!on_primary_core())
                return 0;
 
-       // Read the L2 C0 and C1 here.
+       /* Read the L2 C0 and C1 here. */
        if (n_l2_cores > 0 && is_any_counter_enabled(COUNTER_L2_0_C0, COUNTER_L2_0_C0 + (2 * n_l2_cores))) {
                unsigned int unavailable_l2_caches = 0;
-               _mali_profiling_l2_counter_values cache_values;
+               struct _mali_profiling_l2_counter_values cache_values;
                unsigned int cache_id;
                struct _mali_profiling_core_counters *per_core;
 
@@ -630,27 +526,28 @@ static int read(int **buffer)
                        unsigned int counter_id_0 = COUNTER_L2_0_C0 + (2 * cache_id);
                        unsigned int counter_id_1 = counter_id_0 + 1;
 
-                       if ((1 << cache_id) & unavailable_l2_caches) {
+                       if ((1 << cache_id) & unavailable_l2_caches)
                                continue; /* This cache is unavailable (powered-off, possibly). */
-                       }
 
                        per_core = &cache_values.cores[cache_id];
 
-                       if (counter_enabled[counter_id_0]) {
-                               // Calculate and save src0's counter val0
+                       if (counter_enabled[counter_id_0] && prev_set[counter_id_0]) {
+                               /* Calculate and save src0's counter val0 */
                                counter_dump[len++] = counter_key[counter_id_0];
-                               counter_dump[len++] = get_difference(per_core->value0, counter_prev[counter_id_0]);
+                               counter_dump[len++] = per_core->value0 - counter_prev[counter_id_0];
                        }
 
-                       if (counter_enabled[counter_id_1]) {
-                               // Calculate and save src1's counter val1
+                       if (counter_enabled[counter_id_1] && prev_set[counter_id_1]) {
+                               /* Calculate and save src1's counter val1 */
                                counter_dump[len++] = counter_key[counter_id_1];
-                               counter_dump[len++] = get_difference(per_core->value1, counter_prev[counter_id_1]);
+                               counter_dump[len++] = per_core->value1 - counter_prev[counter_id_1];
                        }
 
-                       // Save the previous values for the counters.
+                       /* Save the previous values for the counters. */
                        counter_prev[counter_id_0] = per_core->value0;
+                       prev_set[counter_id_0] = true;
                        counter_prev[counter_id_1] = per_core->value1;
+                       prev_set[counter_id_1] = true;
                }
        }
 
@@ -664,8 +561,9 @@ static int read(int **buffer)
        {
                int cnt;
                /*
-                * Add in the voltage and frequency counters if enabled.  Note that, since these are
-                * actually passed as events, the counter value should not be cleared.
+                * Add in the voltage and frequency counters if enabled. Note
+                * that, since these are actually passed as events, the counter
+                * value should not be cleared.
                 */
                cnt = COUNTER_FREQUENCY;
                if (counter_enabled[cnt]) {
@@ -681,9 +579,8 @@ static int read(int **buffer)
        }
 #endif
 
-       if (buffer) {
-               *buffer = (int *)counter_dump;
-       }
+       if (buffer)
+               *buffer = counter_dump;
 
        return len;
 }
@@ -709,6 +606,8 @@ int gator_events_mali_init(void)
 
        pr_debug("gator: mali init\n");
 
+       gator_mali_initialise_counters(mali_activity, ARRAY_SIZE(mali_activity));
+
        for (cnt = 0; cnt < NUMBER_OF_EVENTS; cnt++) {
                counter_enabled[cnt] = 0;
                counter_event[cnt] = 0;
index dc58dcf0c6628cded90241571d3b4d31e0905372..1af87d649afe0f763b5f617ba88c5bc04cdcd16f 100644 (file)
@@ -8,27 +8,7 @@
  */
 #include "gator_events_mali_common.h"
 
-static u32 gator_mali_get_id(void)
-{
-       return MALI_SUPPORT;
-}
-
-extern const char *gator_mali_get_mali_name(void)
-{
-       u32 id = gator_mali_get_id();
-
-       switch (id) {
-       case MALI_T6xx:
-               return "Mali-T6xx";
-       case MALI_4xx:
-               return "Mali-4xx";
-       default:
-               pr_debug("gator: Mali-T6xx: unknown Mali ID (%d)\n", id);
-               return "Mali-Unknown";
-       }
-}
-
-extern int gator_mali_create_file_system(const char *mali_name, const char *event_name, struct super_block *sb, struct dentry *root, mali_counter *counter, unsigned long *event)
+extern int gator_mali_create_file_system(const char *mali_name, const char *event_name, struct super_block *sb, struct dentry *root, struct mali_counter *counter, unsigned long *event)
 {
        int err;
        char buf[255];
@@ -37,29 +17,39 @@ extern int gator_mali_create_file_system(const char *mali_name, const char *even
        /* If the counter name is empty ignore it */
        if (strlen(event_name) != 0) {
                /* Set up the filesystem entry for this event. */
-               snprintf(buf, sizeof(buf), "ARM_%s_%s", mali_name, event_name);
+               if (mali_name == NULL)
+                       snprintf(buf, sizeof(buf), "ARM_Mali-%s", event_name);
+               else
+                       snprintf(buf, sizeof(buf), "ARM_Mali-%s_%s", mali_name, event_name);
 
                dir = gatorfs_mkdir(sb, root, buf);
 
                if (dir == NULL) {
-                       pr_debug("gator: Mali-T6xx: error creating file system for: %s (%s)", event_name, buf);
+                       pr_debug("gator: %s: error creating file system for: %s (%s)\n", mali_name, event_name, buf);
                        return -1;
                }
 
                err = gatorfs_create_ulong(sb, dir, "enabled", &counter->enabled);
                if (err != 0) {
-                       pr_debug("gator: Mali-T6xx: error calling gatorfs_create_ulong for: %s (%s)", event_name, buf);
+                       pr_debug("gator: %s: error calling gatorfs_create_ulong for: %s (%s)\n", mali_name, event_name, buf);
                        return -1;
                }
                err = gatorfs_create_ro_ulong(sb, dir, "key", &counter->key);
                if (err != 0) {
-                       pr_debug("gator: Mali-T6xx: error calling gatorfs_create_ro_ulong for: %s (%s)", event_name, buf);
+                       pr_debug("gator: %s: error calling gatorfs_create_ro_ulong for: %s (%s)\n", mali_name, event_name, buf);
                        return -1;
                }
+               if (counter->cores != -1) {
+                       err = gatorfs_create_ro_ulong(sb, dir, "cores", &counter->cores);
+                       if (err != 0) {
+                               pr_debug("gator: %s: error calling gatorfs_create_ro_ulong for: %s (%s)\n", mali_name, event_name, buf);
+                               return -1;
+                       }
+               }
                if (event != NULL) {
                        err = gatorfs_create_ulong(sb, dir, "event", event);
                        if (err != 0) {
-                               pr_debug("gator: Mali-T6xx: error calling gatorfs_create_ro_ulong for: %s (%s)", event_name, buf);
+                               pr_debug("gator: %s: error calling gatorfs_create_ro_ulong for: %s (%s)\n", mali_name, event_name, buf);
                                return -1;
                        }
                }
@@ -68,14 +58,15 @@ extern int gator_mali_create_file_system(const char *mali_name, const char *even
        return 0;
 }
 
-extern void gator_mali_initialise_counters(mali_counter counters[], unsigned int n_counters)
+extern void gator_mali_initialise_counters(struct mali_counter counters[], unsigned int n_counters)
 {
        unsigned int cnt;
 
        for (cnt = 0; cnt < n_counters; cnt++) {
-               mali_counter *counter = &counters[cnt];
+               struct mali_counter *counter = &counters[cnt];
 
                counter->key = gator_events_get_key();
                counter->enabled = 0;
+               counter->cores = -1;
        }
 }
index 41c2a3c13fae69ccc6c6aae407951f6a5872a882..e7082e62fe88c3b0250b5a52dbcc283bcbb84c63 100644 (file)
 #include <linux/time.h>
 #include <linux/math64.h>
 #include <linux/slab.h>
-#include <asm/io.h>
-
-/* Device codes for each known GPU */
-#define MALI_4xx     (0x0b07)
-#define MALI_T6xx    (0x0056)
+#include <linux/io.h>
 
 /* Ensure that MALI_SUPPORT has been defined to something. */
 #ifndef MALI_SUPPORT
 /*
  * Runtime state information for a counter.
  */
-typedef struct {
-       unsigned long key;      /* 'key' (a unique id set by gatord and returned by gator.ko) */
-       unsigned long enabled;  /* counter enable state */
-} mali_counter;
+struct mali_counter {
+       /* 'key' (a unique id set by gatord and returned by gator.ko) */
+       unsigned long key;
+       /* counter enable state */
+       unsigned long enabled;
+       /* for activity counters, the number of cores, otherwise -1 */
+       unsigned long cores;
+};
 
 /*
  * Mali-4xx
  */
 typedef int mali_profiling_set_event_type(unsigned int, int);
 typedef void mali_profiling_control_type(unsigned int, unsigned int);
-typedef void mali_profiling_get_counters_type(unsigned int *, unsigned int *, unsigned int *, unsigned int *);
 
 /*
  * Driver entry points for functions called directly by gator.
@@ -53,18 +52,10 @@ extern int _mali_profiling_set_event(unsigned int, int);
 extern void _mali_profiling_control(unsigned int, unsigned int);
 extern void _mali_profiling_get_counters(unsigned int *, unsigned int *, unsigned int *, unsigned int *);
 
-/**
- * Returns a name which identifies the GPU type (eg Mali-4xx, Mali-T6xx).
- *
- * @return The name as a constant string.
- */
-extern const char *gator_mali_get_mali_name(void);
-
 /**
  * Creates a filesystem entry under /dev/gator relating to the specified event name and key, and
  * associate the key/enable values with this entry point.
  *
- * @param mali_name A name related to the type of GPU, obtained from a call to gator_mali_get_mali_name()
  * @param event_name The name of the event.
  * @param sb Linux super block
  * @param root Directory under which the entry will be created.
@@ -73,7 +64,7 @@ extern const char *gator_mali_get_mali_name(void);
  *
  * @return 0 if entry point was created, non-zero if not.
  */
-extern int gator_mali_create_file_system(const char *mali_name, const char *event_name, struct super_block *sb, struct dentry *root, mali_counter *counter, unsigned long *event);
+extern int gator_mali_create_file_system(const char *mali_name, const char *event_name, struct super_block *sb, struct dentry *root, struct mali_counter *counter, unsigned long *event);
 
 /**
  * Initializes the counter array.
@@ -81,6 +72,6 @@ extern int gator_mali_create_file_system(const char *mali_name, const char *even
  * @param keys The array of counters
  * @param n_counters The number of entries in each of the arrays.
  */
-extern void gator_mali_initialise_counters(mali_counter counters[], unsigned int n_counters);
+extern void gator_mali_initialise_counters(struct mali_counter counters[], unsigned int n_counters);
 
 #endif /* GATOR_EVENTS_MALI_COMMON_H  */
diff --git a/drivers/gator/gator_events_mali_midgard.c b/drivers/gator/gator_events_mali_midgard.c
new file mode 100644 (file)
index 0000000..0aec906
--- /dev/null
@@ -0,0 +1,562 @@
+/**
+ * Copyright (C) ARM Limited 2011-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#ifdef MALI_DIR_MIDGARD
+/* New DDK Directory structure with kernel/drivers/gpu/arm/midgard*/
+#include "mali_linux_trace.h"
+#else
+/* Old DDK Directory structure with kernel/drivers/gpu/arm/t6xx*/
+#include "linux/mali_linux_trace.h"
+#endif
+
+#include "gator_events_mali_common.h"
+
+/*
+ * Check that the MALI_SUPPORT define is set to one of the allowable device codes.
+ */
+#if (MALI_SUPPORT != MALI_MIDGARD)
+#error MALI_SUPPORT set to an invalid device code: expecting MALI_MIDGARD
+#endif
+
+static const char mali_name[] = "Midgard";
+
+/* Counters for Mali-Midgard:
+ *
+ *  - Timeline events
+ *    They are tracepoints, but instead of reporting a number they report a START/STOP event.
+ *    They are reported in Streamline as number of microseconds while that particular counter was active.
+ *
+ *  - SW counters
+ *    They are tracepoints reporting a particular number.
+ *    They are accumulated in sw_counter_data array until they are passed to Streamline, then they are zeroed.
+ *
+ *  - Accumulators
+ *    They are the same as software counters but their value is not zeroed.
+ */
+
+/* Timeline (start/stop) activity */
+static const char *const timeline_event_names[] = {
+       "PM_SHADER_0",
+       "PM_SHADER_1",
+       "PM_SHADER_2",
+       "PM_SHADER_3",
+       "PM_SHADER_4",
+       "PM_SHADER_5",
+       "PM_SHADER_6",
+       "PM_SHADER_7",
+       "PM_TILER_0",
+       "PM_L2_0",
+       "PM_L2_1",
+       "MMU_AS_0",
+       "MMU_AS_1",
+       "MMU_AS_2",
+       "MMU_AS_3"
+};
+
+enum {
+       PM_SHADER_0 = 0,
+       PM_SHADER_1,
+       PM_SHADER_2,
+       PM_SHADER_3,
+       PM_SHADER_4,
+       PM_SHADER_5,
+       PM_SHADER_6,
+       PM_SHADER_7,
+       PM_TILER_0,
+       PM_L2_0,
+       PM_L2_1,
+       MMU_AS_0,
+       MMU_AS_1,
+       MMU_AS_2,
+       MMU_AS_3
+};
+/* The number of shader blocks in the enum above */
+#define NUM_PM_SHADER (8)
+
+/* Software Counters */
+static const char *const software_counter_names[] = {
+       "MMU_PAGE_FAULT_0",
+       "MMU_PAGE_FAULT_1",
+       "MMU_PAGE_FAULT_2",
+       "MMU_PAGE_FAULT_3"
+};
+
+enum {
+       MMU_PAGE_FAULT_0 = 0,
+       MMU_PAGE_FAULT_1,
+       MMU_PAGE_FAULT_2,
+       MMU_PAGE_FAULT_3
+};
+
+/* Software Counters */
+static const char *const accumulators_names[] = {
+       "TOTAL_ALLOC_PAGES"
+};
+
+enum {
+       TOTAL_ALLOC_PAGES = 0
+};
+
+#define FIRST_TIMELINE_EVENT (0)
+#define NUMBER_OF_TIMELINE_EVENTS (sizeof(timeline_event_names) / sizeof(timeline_event_names[0]))
+#define FIRST_SOFTWARE_COUNTER (FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS)
+#define NUMBER_OF_SOFTWARE_COUNTERS (sizeof(software_counter_names) / sizeof(software_counter_names[0]))
+#define FIRST_ACCUMULATOR (FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS)
+#define NUMBER_OF_ACCUMULATORS (sizeof(accumulators_names) / sizeof(accumulators_names[0]))
+#define FILMSTRIP (NUMBER_OF_TIMELINE_EVENTS + NUMBER_OF_SOFTWARE_COUNTERS + NUMBER_OF_ACCUMULATORS)
+#define NUMBER_OF_EVENTS (NUMBER_OF_TIMELINE_EVENTS + NUMBER_OF_SOFTWARE_COUNTERS + NUMBER_OF_ACCUMULATORS + 1)
+
+/*
+ * gatorfs variables for counter enable state
+ */
+static struct mali_counter counters[NUMBER_OF_EVENTS];
+static unsigned long filmstrip_event;
+
+/* An array used to return the data we recorded
+ * as key,value pairs hence the *2
+ */
+static int counter_dump[NUMBER_OF_EVENTS * 2];
+
+/*
+ * Array holding counter start times (in ns) for each counter. A zero
+ * here indicates that the activity monitored by this counter is not
+ * running.
+ */
+static struct timespec timeline_event_starttime[NUMBER_OF_TIMELINE_EVENTS];
+
+/* The data we have recorded */
+static unsigned int timeline_data[NUMBER_OF_TIMELINE_EVENTS];
+static unsigned int sw_counter_data[NUMBER_OF_SOFTWARE_COUNTERS];
+static unsigned int accumulators_data[NUMBER_OF_ACCUMULATORS];
+
+/* Hold the previous timestamp, used to calculate the sample interval. */
+static struct timespec prev_timestamp;
+
+/**
+ * Returns the timespan (in microseconds) between the two specified timestamps.
+ *
+ * @param start Ptr to the start timestamp
+ * @param end Ptr to the end timestamp
+ *
+ * @return Number of microseconds between the two timestamps (can be negative if start follows end).
+ */
+static inline long get_duration_us(const struct timespec *start, const struct timespec *end)
+{
+       long event_duration_us = (end->tv_nsec - start->tv_nsec) / 1000;
+
+       event_duration_us += (end->tv_sec - start->tv_sec) * 1000000;
+
+       return event_duration_us;
+}
+
+static void record_timeline_event(unsigned int timeline_index, unsigned int type)
+{
+       struct timespec event_timestamp;
+       struct timespec *event_start = &timeline_event_starttime[timeline_index];
+
+       switch (type) {
+       case ACTIVITY_START:
+               /* Get the event time... */
+               getnstimeofday(&event_timestamp);
+
+               /* Remember the start time if the activity is not already started */
+               if (event_start->tv_sec == 0)
+                       *event_start = event_timestamp; /* Structure copy */
+               break;
+
+       case ACTIVITY_STOP:
+               /* if the counter was started... */
+               if (event_start->tv_sec != 0) {
+                       /* Get the event time... */
+                       getnstimeofday(&event_timestamp);
+
+                       /* Accumulate the duration in us */
+                       timeline_data[timeline_index] += get_duration_us(event_start, &event_timestamp);
+
+                       /* Reset the start time to indicate the activity is stopped. */
+                       event_start->tv_sec = 0;
+               }
+               break;
+
+       default:
+               /* Other activity events are ignored. */
+               break;
+       }
+}
+
+/*
+ * Documentation about the following tracepoints is in mali_linux_trace.h
+ */
+
+GATOR_DEFINE_PROBE(mali_pm_status, TP_PROTO(unsigned int event_id, unsigned long long value))
+{
+#define SHADER_PRESENT_LO       0x100  /* (RO) Shader core present bitmap, low word */
+#define TILER_PRESENT_LO        0x110  /* (RO) Tiler core present bitmap, low word */
+#define L2_PRESENT_LO           0x120  /* (RO) Level 2 cache present bitmap, low word */
+#define BIT_AT(value, pos) ((value >> pos) & 1)
+
+       static unsigned long long previous_shader_bitmask;
+       static unsigned long long previous_tiler_bitmask;
+       static unsigned long long previous_l2_bitmask;
+
+       switch (event_id) {
+       case SHADER_PRESENT_LO:
+               {
+                       unsigned long long changed_bitmask = previous_shader_bitmask ^ value;
+                       int pos;
+
+                       for (pos = 0; pos < NUM_PM_SHADER; ++pos) {
+                               if (BIT_AT(changed_bitmask, pos))
+                                       record_timeline_event(PM_SHADER_0 + pos, BIT_AT(value, pos) ? ACTIVITY_START : ACTIVITY_STOP);
+                       }
+
+                       previous_shader_bitmask = value;
+                       break;
+               }
+
+       case TILER_PRESENT_LO:
+               {
+                       unsigned long long changed = previous_tiler_bitmask ^ value;
+
+                       if (BIT_AT(changed, 0))
+                               record_timeline_event(PM_TILER_0, BIT_AT(value, 0) ? ACTIVITY_START : ACTIVITY_STOP);
+
+                       previous_tiler_bitmask = value;
+                       break;
+               }
+
+       case L2_PRESENT_LO:
+               {
+                       unsigned long long changed = previous_l2_bitmask ^ value;
+
+                       if (BIT_AT(changed, 0))
+                               record_timeline_event(PM_L2_0, BIT_AT(value, 0) ? ACTIVITY_START : ACTIVITY_STOP);
+                       if (BIT_AT(changed, 4))
+                               record_timeline_event(PM_L2_1, BIT_AT(value, 4) ? ACTIVITY_START : ACTIVITY_STOP);
+
+                       previous_l2_bitmask = value;
+                       break;
+               }
+
+       default:
+               /* No other blocks are supported at present */
+               break;
+       }
+
+#undef SHADER_PRESENT_LO
+#undef TILER_PRESENT_LO
+#undef L2_PRESENT_LO
+#undef BIT_AT
+}
+
+GATOR_DEFINE_PROBE(mali_page_fault_insert_pages, TP_PROTO(int event_id, unsigned long value))
+{
+       /* We add to the previous since we may receive many tracepoints in one sample period */
+       sw_counter_data[MMU_PAGE_FAULT_0 + event_id] += value;
+}
+
+GATOR_DEFINE_PROBE(mali_mmu_as_in_use, TP_PROTO(int event_id))
+{
+       record_timeline_event(MMU_AS_0 + event_id, ACTIVITY_START);
+}
+
+GATOR_DEFINE_PROBE(mali_mmu_as_released, TP_PROTO(int event_id))
+{
+       record_timeline_event(MMU_AS_0 + event_id, ACTIVITY_STOP);
+}
+
+GATOR_DEFINE_PROBE(mali_total_alloc_pages_change, TP_PROTO(long long int event_id))
+{
+       accumulators_data[TOTAL_ALLOC_PAGES] = event_id;
+}
+
+static int create_files(struct super_block *sb, struct dentry *root)
+{
+       int event;
+       /*
+        * Create the filesystem for all events
+        */
+       int counter_index = 0;
+       mali_profiling_control_type *mali_control;
+
+       for (event = FIRST_TIMELINE_EVENT; event < FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS; event++) {
+               if (gator_mali_create_file_system(mali_name, timeline_event_names[counter_index], sb, root, &counters[event], NULL) != 0)
+                       return -1;
+               counter_index++;
+       }
+       counter_index = 0;
+       for (event = FIRST_SOFTWARE_COUNTER; event < FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS; event++) {
+               if (gator_mali_create_file_system(mali_name, software_counter_names[counter_index], sb, root, &counters[event], NULL) != 0)
+                       return -1;
+               counter_index++;
+       }
+       counter_index = 0;
+       for (event = FIRST_ACCUMULATOR; event < FIRST_ACCUMULATOR + NUMBER_OF_ACCUMULATORS; event++) {
+               if (gator_mali_create_file_system(mali_name, accumulators_names[counter_index], sb, root, &counters[event], NULL) != 0)
+                       return -1;
+               counter_index++;
+       }
+
+       mali_control = symbol_get(_mali_profiling_control);
+       if (mali_control) {
+               if (gator_mali_create_file_system(mali_name, "Filmstrip_cnt0", sb, root, &counters[FILMSTRIP], &filmstrip_event) != 0)
+                       return -1;
+               symbol_put(_mali_profiling_control);
+       }
+
+       return 0;
+}
+
+static int register_tracepoints(void)
+{
+       if (GATOR_REGISTER_TRACE(mali_pm_status)) {
+               pr_debug("gator: Mali-Midgard: mali_pm_status tracepoint failed to activate\n");
+               return 0;
+       }
+
+       if (GATOR_REGISTER_TRACE(mali_page_fault_insert_pages)) {
+               pr_debug("gator: Mali-Midgard: mali_page_fault_insert_pages tracepoint failed to activate\n");
+               return 0;
+       }
+
+       if (GATOR_REGISTER_TRACE(mali_mmu_as_in_use)) {
+               pr_debug("gator: Mali-Midgard: mali_mmu_as_in_use tracepoint failed to activate\n");
+               return 0;
+       }
+
+       if (GATOR_REGISTER_TRACE(mali_mmu_as_released)) {
+               pr_debug("gator: Mali-Midgard: mali_mmu_as_released tracepoint failed to activate\n");
+               return 0;
+       }
+
+       if (GATOR_REGISTER_TRACE(mali_total_alloc_pages_change)) {
+               pr_debug("gator: Mali-Midgard: mali_total_alloc_pages_change tracepoint failed to activate\n");
+               return 0;
+       }
+
+       pr_debug("gator: Mali-Midgard: start\n");
+       pr_debug("gator: Mali-Midgard: mali_pm_status probe is at %p\n", &probe_mali_pm_status);
+       pr_debug("gator: Mali-Midgard: mali_page_fault_insert_pages probe is at %p\n", &probe_mali_page_fault_insert_pages);
+       pr_debug("gator: Mali-Midgard: mali_mmu_as_in_use probe is at %p\n", &probe_mali_mmu_as_in_use);
+       pr_debug("gator: Mali-Midgard: mali_mmu_as_released probe is at %p\n", &probe_mali_mmu_as_released);
+       pr_debug("gator: Mali-Midgard: mali_total_alloc_pages_change probe is at %p\n", &probe_mali_total_alloc_pages_change);
+
+       return 1;
+}
+
+static int start(void)
+{
+       unsigned int cnt;
+       mali_profiling_control_type *mali_control;
+
+       /* Clean all data for the next capture */
+       for (cnt = 0; cnt < NUMBER_OF_TIMELINE_EVENTS; cnt++) {
+               timeline_event_starttime[cnt].tv_sec = timeline_event_starttime[cnt].tv_nsec = 0;
+               timeline_data[cnt] = 0;
+       }
+
+       for (cnt = 0; cnt < NUMBER_OF_SOFTWARE_COUNTERS; cnt++)
+               sw_counter_data[cnt] = 0;
+
+       for (cnt = 0; cnt < NUMBER_OF_ACCUMULATORS; cnt++)
+               accumulators_data[cnt] = 0;
+
+       /* Register tracepoints */
+       if (register_tracepoints() == 0)
+               return -1;
+
+       /* Generic control interface for Mali DDK. */
+       mali_control = symbol_get(_mali_profiling_control);
+       if (mali_control) {
+               /* The event attribute in the XML file keeps the actual frame rate. */
+               unsigned int enabled = counters[FILMSTRIP].enabled ? 1 : 0;
+               unsigned int rate = filmstrip_event & 0xff;
+               unsigned int resize_factor = (filmstrip_event >> 8) & 0xff;
+
+               pr_debug("gator: mali online _mali_profiling_control symbol @ %p\n", mali_control);
+
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+               mali_control(FBDUMP_CONTROL_ENABLE, enabled);
+               mali_control(FBDUMP_CONTROL_RATE, rate);
+               mali_control(FBDUMP_CONTROL_RESIZE_FACTOR, resize_factor);
+
+               pr_debug("gator: sent mali_control enabled=%d, rate=%d, resize_factor=%d\n", enabled, rate, resize_factor);
+
+               symbol_put(_mali_profiling_control);
+       } else {
+               pr_err("gator: mali online _mali_profiling_control symbol not found\n");
+       }
+
+       /*
+        * Set the first timestamp for calculating the sample interval. The first interval could be quite long,
+        * since it will be the time between 'start' and the first 'read'.
+        * This means that timeline values will be divided by a big number for the first sample.
+        */
+       getnstimeofday(&prev_timestamp);
+
+       return 0;
+}
+
+static void stop(void)
+{
+       mali_profiling_control_type *mali_control;
+
+       pr_debug("gator: Mali-Midgard: stop\n");
+
+       /*
+        * It is safe to unregister traces even if they were not successfully
+        * registered, so no need to check.
+        */
+       GATOR_UNREGISTER_TRACE(mali_pm_status);
+       pr_debug("gator: Mali-Midgard: mali_pm_status tracepoint deactivated\n");
+
+       GATOR_UNREGISTER_TRACE(mali_page_fault_insert_pages);
+       pr_debug("gator: Mali-Midgard: mali_page_fault_insert_pages tracepoint deactivated\n");
+
+       GATOR_UNREGISTER_TRACE(mali_mmu_as_in_use);
+       pr_debug("gator: Mali-Midgard: mali_mmu_as_in_use tracepoint deactivated\n");
+
+       GATOR_UNREGISTER_TRACE(mali_mmu_as_released);
+       pr_debug("gator: Mali-Midgard: mali_mmu_as_released tracepoint deactivated\n");
+
+       GATOR_UNREGISTER_TRACE(mali_total_alloc_pages_change);
+       pr_debug("gator: Mali-Midgard: mali_total_alloc_pages_change tracepoint deactivated\n");
+
+       /* Generic control interface for Mali DDK. */
+       mali_control = symbol_get(_mali_profiling_control);
+       if (mali_control) {
+               pr_debug("gator: mali offline _mali_profiling_control symbol @ %p\n", mali_control);
+
+               mali_control(FBDUMP_CONTROL_ENABLE, 0);
+
+               symbol_put(_mali_profiling_control);
+       } else {
+               pr_err("gator: mali offline _mali_profiling_control symbol not found\n");
+       }
+}
+
+static int read(int **buffer, bool sched_switch)
+{
+       int cnt;
+       int len = 0;
+       long sample_interval_us = 0;
+       struct timespec read_timestamp;
+
+       if (!on_primary_core())
+               return 0;
+
+       /* Get the start of this sample period. */
+       getnstimeofday(&read_timestamp);
+
+       /*
+        * Calculate the sample interval if the previous sample time is valid.
+        * We use tv_sec since it will not be 0.
+        */
+       if (prev_timestamp.tv_sec != 0)
+               sample_interval_us = get_duration_us(&prev_timestamp, &read_timestamp);
+
+       /* Structure copy. Update the previous timestamp. */
+       prev_timestamp = read_timestamp;
+
+       /*
+        * Report the timeline counters (ACTIVITY_START/STOP)
+        */
+       for (cnt = FIRST_TIMELINE_EVENT; cnt < (FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS); cnt++) {
+               struct mali_counter *counter = &counters[cnt];
+
+               if (counter->enabled) {
+                       const int index = cnt - FIRST_TIMELINE_EVENT;
+                       unsigned int value;
+
+                       /* If the activity is still running, reset its start time to the
+                        * start of this sample period to correct the count. Add the
+                        * time up to the end of the sample onto the count.
+                        */
+                       if (timeline_event_starttime[index].tv_sec != 0) {
+                               const long event_duration = get_duration_us(&timeline_event_starttime[index], &read_timestamp);
+
+                               timeline_data[index] += event_duration;
+                               timeline_event_starttime[index] = read_timestamp;       /* Activity is still running. */
+                       }
+
+                       if (sample_interval_us != 0) {
+                               /* Convert the counter to a percent-of-sample value */
+                               value = (timeline_data[index] * 100) / sample_interval_us;
+                       } else {
+                               pr_debug("gator: Mali-Midgard: setting value to zero\n");
+                               value = 0;
+                       }
+
+                       /* Clear the counter value ready for the next sample. */
+                       timeline_data[index] = 0;
+
+                       counter_dump[len++] = counter->key;
+                       counter_dump[len++] = value;
+               }
+       }
+
+       /* Report the software counters */
+       for (cnt = FIRST_SOFTWARE_COUNTER; cnt < (FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS); cnt++) {
+               const struct mali_counter *counter = &counters[cnt];
+
+               if (counter->enabled) {
+                       const int index = cnt - FIRST_SOFTWARE_COUNTER;
+
+                       counter_dump[len++] = counter->key;
+                       counter_dump[len++] = sw_counter_data[index];
+                       /* Set the value to zero for the next time */
+                       sw_counter_data[index] = 0;
+               }
+       }
+
+       /* Report the accumulators */
+       for (cnt = FIRST_ACCUMULATOR; cnt < (FIRST_ACCUMULATOR + NUMBER_OF_ACCUMULATORS); cnt++) {
+               const struct mali_counter *counter = &counters[cnt];
+
+               if (counter->enabled) {
+                       const int index = cnt - FIRST_ACCUMULATOR;
+
+                       counter_dump[len++] = counter->key;
+                       counter_dump[len++] = accumulators_data[index];
+                       /* Do not zero the accumulator */
+               }
+       }
+
+       /* Update the buffer */
+       if (buffer)
+               *buffer = counter_dump;
+
+       return len;
+}
+
+static struct gator_interface gator_events_mali_midgard_interface = {
+       .create_files = create_files,
+       .start = start,
+       .stop = stop,
+       .read = read
+};
+
+extern int gator_events_mali_midgard_init(void)
+{
+       pr_debug("gator: Mali-Midgard: sw_counters init\n");
+
+       gator_mali_initialise_counters(counters, NUMBER_OF_EVENTS);
+
+       return gator_events_install(&gator_events_mali_midgard_interface);
+}
diff --git a/drivers/gator/gator_events_mali_midgard_hw.c b/drivers/gator/gator_events_mali_midgard_hw.c
new file mode 100644 (file)
index 0000000..c8065da
--- /dev/null
@@ -0,0 +1,977 @@
+/**
+ * Copyright (C) ARM Limited 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/* Mali Midgard DDK includes */
+#if defined(MALI_SIMPLE_API)
+/* Header with wrapper functions to kbase structures and functions */
+#include "mali/mali_kbase_gator_api.h"
+#elif defined(MALI_DIR_MIDGARD)
+/* New DDK Directory structure with kernel/drivers/gpu/arm/midgard */
+#include "mali_linux_trace.h"
+#include "mali_kbase.h"
+#include "mali_kbase_mem_linux.h"
+#else
+/* Old DDK Directory structure with kernel/drivers/gpu/arm/t6xx */
+#include "linux/mali_linux_trace.h"
+#include "kbase/src/common/mali_kbase.h"
+#include "kbase/src/linux/mali_kbase_mem_linux.h"
+#endif
+
+/* If API version is not specified then assume API version 1. */
+#ifndef MALI_DDK_GATOR_API_VERSION
+#define MALI_DDK_GATOR_API_VERSION 1
+#endif
+
+#if (MALI_DDK_GATOR_API_VERSION != 1) && (MALI_DDK_GATOR_API_VERSION != 2) && (MALI_DDK_GATOR_API_VERSION != 3)
+#error MALI_DDK_GATOR_API_VERSION is invalid (must be 1 for r1/r2 DDK, or 2 for r3/r4 DDK, or 3 for r5 and later DDK).
+#endif
+
+#include "gator_events_mali_common.h"
+
+/*
+ * Mali-Midgard
+ */
+#if MALI_DDK_GATOR_API_VERSION == 3
+static uint32_t (*kbase_gator_instr_hwcnt_dump_irq_symbol)(struct kbase_gator_hwcnt_handles *);
+static uint32_t (*kbase_gator_instr_hwcnt_dump_complete_symbol)(struct kbase_gator_hwcnt_handles *, uint32_t *const);
+static struct kbase_gator_hwcnt_handles *(*kbase_gator_hwcnt_init_symbol)(struct kbase_gator_hwcnt_info *);
+static void (*kbase_gator_hwcnt_term_symbol)(struct kbase_gator_hwcnt_info *, struct kbase_gator_hwcnt_handles *);
+
+#else
+static struct kbase_device *(*kbase_find_device_symbol)(int);
+static struct kbase_context *(*kbase_create_context_symbol)(struct kbase_device *);
+static void (*kbase_destroy_context_symbol)(struct kbase_context *);
+
+#if MALI_DDK_GATOR_API_VERSION == 1
+static void *(*kbase_va_alloc_symbol)(struct kbase_context *, u32);
+static void (*kbase_va_free_symbol)(struct kbase_context *, void *);
+#elif MALI_DDK_GATOR_API_VERSION == 2
+static void *(*kbase_va_alloc_symbol)(struct kbase_context *, u32, struct kbase_hwc_dma_mapping *);
+static void (*kbase_va_free_symbol)(struct kbase_context *, struct kbase_hwc_dma_mapping *);
+#endif
+
+static mali_error (*kbase_instr_hwcnt_enable_symbol)(struct kbase_context *, struct kbase_uk_hwcnt_setup *);
+static mali_error (*kbase_instr_hwcnt_disable_symbol)(struct kbase_context *);
+static mali_error (*kbase_instr_hwcnt_clear_symbol)(struct kbase_context *);
+static mali_error (*kbase_instr_hwcnt_dump_irq_symbol)(struct kbase_context *);
+static mali_bool (*kbase_instr_hwcnt_dump_complete_symbol)(struct kbase_context *, mali_bool *);
+
+static long shader_present_low;
+#endif
+
+/** The interval between reads, in ns.
+ *
+ * Earlier we introduced a 'hold off for 1ms after last read' to
+ * resolve MIDBASE-2178 and MALINE-724. However, the 1ms hold off is
+ * too long if no context switches occur as there is a race between
+ * this value and the tick of the read clock in gator which is also
+ * 1ms. If we 'miss' the current read, the counter values are
+ * effectively 'spread' over 2ms and the values seen are half what
+ * they should be (since Streamline averages over sample time). In the
+ * presence of context switches this spread can vary and markedly
+ * affect the counters. Currently there is no 'proper' solution to
+ * this, but empirically we have found that reducing the minimum read
+ * interval to 950us causes the counts to be much more stable.
+ */
+static const int READ_INTERVAL_NSEC = 950000;
+
+#if GATOR_TEST
+#include "gator_events_mali_midgard_hw_test.c"
+#endif
+
+#if MALI_DDK_GATOR_API_VERSION != 3
+/* Blocks for HW counters */
+enum {
+       JM_BLOCK = 0,
+       TILER_BLOCK,
+       SHADER_BLOCK,
+       MMU_BLOCK
+};
+#endif
+
+static const char *mali_name;
+
+/* Counters for Mali-Midgard:
+ *
+ *    For HW counters we need strings to create /dev/gator/events files.
+ *    Enums are not needed because the position of the HW name in the array is the same
+ *    of the corresponding value in the received block of memory.
+ *    HW counters are requested by calculating a bitmask, passed then to the driver.
+ *    Every millisecond a HW counters dump is requested, and if the previous has been completed they are read.
+ */
+
+/* Hardware Counters */
+#if MALI_DDK_GATOR_API_VERSION == 3
+
+static const char *const *hardware_counter_names;
+static int number_of_hardware_counters;
+
+#else
+
+static const char *const hardware_counter_names[] = {
+       /* Job Manager */
+       "",
+       "",
+       "",
+       "",
+       "MESSAGES_SENT",
+       "MESSAGES_RECEIVED",
+       "GPU_ACTIVE",           /* 6 */
+       "IRQ_ACTIVE",
+       "JS0_JOBS",
+       "JS0_TASKS",
+       "JS0_ACTIVE",
+       "",
+       "JS0_WAIT_READ",
+       "JS0_WAIT_ISSUE",
+       "JS0_WAIT_DEPEND",
+       "JS0_WAIT_FINISH",
+       "JS1_JOBS",
+       "JS1_TASKS",
+       "JS1_ACTIVE",
+       "",
+       "JS1_WAIT_READ",
+       "JS1_WAIT_ISSUE",
+       "JS1_WAIT_DEPEND",
+       "JS1_WAIT_FINISH",
+       "JS2_JOBS",
+       "JS2_TASKS",
+       "JS2_ACTIVE",
+       "",
+       "JS2_WAIT_READ",
+       "JS2_WAIT_ISSUE",
+       "JS2_WAIT_DEPEND",
+       "JS2_WAIT_FINISH",
+       "JS3_JOBS",
+       "JS3_TASKS",
+       "JS3_ACTIVE",
+       "",
+       "JS3_WAIT_READ",
+       "JS3_WAIT_ISSUE",
+       "JS3_WAIT_DEPEND",
+       "JS3_WAIT_FINISH",
+       "JS4_JOBS",
+       "JS4_TASKS",
+       "JS4_ACTIVE",
+       "",
+       "JS4_WAIT_READ",
+       "JS4_WAIT_ISSUE",
+       "JS4_WAIT_DEPEND",
+       "JS4_WAIT_FINISH",
+       "JS5_JOBS",
+       "JS5_TASKS",
+       "JS5_ACTIVE",
+       "",
+       "JS5_WAIT_READ",
+       "JS5_WAIT_ISSUE",
+       "JS5_WAIT_DEPEND",
+       "JS5_WAIT_FINISH",
+       "JS6_JOBS",
+       "JS6_TASKS",
+       "JS6_ACTIVE",
+       "",
+       "JS6_WAIT_READ",
+       "JS6_WAIT_ISSUE",
+       "JS6_WAIT_DEPEND",
+       "JS6_WAIT_FINISH",
+
+       /*Tiler */
+       "",
+       "",
+       "",
+       "JOBS_PROCESSED",
+       "TRIANGLES",
+       "QUADS",
+       "POLYGONS",
+       "POINTS",
+       "LINES",
+       "VCACHE_HIT",
+       "VCACHE_MISS",
+       "FRONT_FACING",
+       "BACK_FACING",
+       "PRIM_VISIBLE",
+       "PRIM_CULLED",
+       "PRIM_CLIPPED",
+       "LEVEL0",
+       "LEVEL1",
+       "LEVEL2",
+       "LEVEL3",
+       "LEVEL4",
+       "LEVEL5",
+       "LEVEL6",
+       "LEVEL7",
+       "COMMAND_1",
+       "COMMAND_2",
+       "COMMAND_3",
+       "COMMAND_4",
+       "COMMAND_4_7",
+       "COMMAND_8_15",
+       "COMMAND_16_63",
+       "COMMAND_64",
+       "COMPRESS_IN",
+       "COMPRESS_OUT",
+       "COMPRESS_FLUSH",
+       "TIMESTAMPS",
+       "PCACHE_HIT",
+       "PCACHE_MISS",
+       "PCACHE_LINE",
+       "PCACHE_STALL",
+       "WRBUF_HIT",
+       "WRBUF_MISS",
+       "WRBUF_LINE",
+       "WRBUF_PARTIAL",
+       "WRBUF_STALL",
+       "ACTIVE",
+       "LOADING_DESC",
+       "INDEX_WAIT",
+       "INDEX_RANGE_WAIT",
+       "VERTEX_WAIT",
+       "PCACHE_WAIT",
+       "WRBUF_WAIT",
+       "BUS_READ",
+       "BUS_WRITE",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "UTLB_STALL",
+       "UTLB_REPLAY_MISS",
+       "UTLB_REPLAY_FULL",
+       "UTLB_NEW_MISS",
+       "UTLB_HIT",
+
+       /* Shader Core */
+       "",
+       "",
+       "",
+       "SHADER_CORE_ACTIVE",
+       "FRAG_ACTIVE",
+       "FRAG_PRIMATIVES",
+       "FRAG_PRIMATIVES_DROPPED",
+       "FRAG_CYCLE_DESC",
+       "FRAG_CYCLES_PLR",
+       "FRAG_CYCLES_VERT",
+       "FRAG_CYCLES_TRISETUP",
+       "FRAG_CYCLES_RAST",
+       "FRAG_THREADS",
+       "FRAG_DUMMY_THREADS",
+       "FRAG_QUADS_RAST",
+       "FRAG_QUADS_EZS_TEST",
+       "FRAG_QUADS_EZS_KILLED",
+       "FRAG_QUADS_LZS_TEST",
+       "FRAG_QUADS_LZS_KILLED",
+       "FRAG_CYCLE_NO_TILE",
+       "FRAG_NUM_TILES",
+       "FRAG_TRANS_ELIM",
+       "COMPUTE_ACTIVE",
+       "COMPUTE_TASKS",
+       "COMPUTE_THREADS",
+       "COMPUTE_CYCLES_DESC",
+       "TRIPIPE_ACTIVE",
+       "ARITH_WORDS",
+       "ARITH_CYCLES_REG",
+       "ARITH_CYCLES_L0",
+       "ARITH_FRAG_DEPEND",
+       "LS_WORDS",
+       "LS_ISSUES",
+       "LS_RESTARTS",
+       "LS_REISSUES_MISS",
+       "LS_REISSUES_VD",
+       "LS_REISSUE_ATTRIB_MISS",
+       "LS_NO_WB",
+       "TEX_WORDS",
+       "TEX_BUBBLES",
+       "TEX_WORDS_L0",
+       "TEX_WORDS_DESC",
+       "TEX_THREADS",
+       "TEX_RECIRC_FMISS",
+       "TEX_RECIRC_DESC",
+       "TEX_RECIRC_MULTI",
+       "TEX_RECIRC_PMISS",
+       "TEX_RECIRC_CONF",
+       "LSC_READ_HITS",
+       "LSC_READ_MISSES",
+       "LSC_WRITE_HITS",
+       "LSC_WRITE_MISSES",
+       "LSC_ATOMIC_HITS",
+       "LSC_ATOMIC_MISSES",
+       "LSC_LINE_FETCHES",
+       "LSC_DIRTY_LINE",
+       "LSC_SNOOPS",
+       "AXI_TLB_STALL",
+       "AXI_TLB_MIESS",
+       "AXI_TLB_TRANSACTION",
+       "LS_TLB_MISS",
+       "LS_TLB_HIT",
+       "AXI_BEATS_READ",
+       "AXI_BEATS_WRITTEN",
+
+       /*L2 and MMU */
+       "",
+       "",
+       "",
+       "",
+       "MMU_HIT",
+       "MMU_NEW_MISS",
+       "MMU_REPLAY_FULL",
+       "MMU_REPLAY_MISS",
+       "MMU_TABLE_WALK",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "UTLB_HIT",
+       "UTLB_NEW_MISS",
+       "UTLB_REPLAY_FULL",
+       "UTLB_REPLAY_MISS",
+       "UTLB_STALL",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "L2_WRITE_BEATS",
+       "L2_READ_BEATS",
+       "L2_ANY_LOOKUP",
+       "L2_READ_LOOKUP",
+       "L2_SREAD_LOOKUP",
+       "L2_READ_REPLAY",
+       "L2_READ_SNOOP",
+       "L2_READ_HIT",
+       "L2_CLEAN_MISS",
+       "L2_WRITE_LOOKUP",
+       "L2_SWRITE_LOOKUP",
+       "L2_WRITE_REPLAY",
+       "L2_WRITE_SNOOP",
+       "L2_WRITE_HIT",
+       "L2_EXT_READ_FULL",
+       "L2_EXT_READ_HALF",
+       "L2_EXT_WRITE_FULL",
+       "L2_EXT_WRITE_HALF",
+       "L2_EXT_READ",
+       "L2_EXT_READ_LINE",
+       "L2_EXT_WRITE",
+       "L2_EXT_WRITE_LINE",
+       "L2_EXT_WRITE_SMALL",
+       "L2_EXT_BARRIER",
+       "L2_EXT_AR_STALL",
+       "L2_EXT_R_BUF_FULL",
+       "L2_EXT_RD_BUF_FULL",
+       "L2_EXT_R_RAW",
+       "L2_EXT_W_STALL",
+       "L2_EXT_W_BUF_FULL",
+       "L2_EXT_R_W_HAZARD",
+       "L2_TAG_HAZARD",
+       "L2_SNOOP_FULL",
+       "L2_REPLAY_FULL"
+};
+
+static const int number_of_hardware_counters = ARRAY_SIZE(hardware_counter_names);
+
+#endif
+
+#define GET_HW_BLOCK(c) (((c) >> 6) & 0x3)
+#define GET_COUNTER_OFFSET(c) ((c) & 0x3f)
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+/* Opaque handles for kbase_context and kbase_hwc_dma_mapping */
+static struct kbase_gator_hwcnt_handles *handles;
+
+/* Information about hardware counters */
+static struct kbase_gator_hwcnt_info *in_out_info;
+
+#else
+/* Memory to dump hardware counters into */
+static void *kernel_dump_buffer;
+
+#if MALI_DDK_GATOR_API_VERSION == 2
+/* DMA state used to manage lifetime of the buffer */
+struct kbase_hwc_dma_mapping kernel_dump_buffer_handle;
+#endif
+
+/* kbase context and device */
+static struct kbase_context *kbcontext;
+static struct kbase_device *kbdevice;
+
+/*
+ * The following function has no external prototype in older DDK
+ * revisions. When the DDK is updated then this should be removed.
+ */
+struct kbase_device *kbase_find_device(int minor);
+#endif
+
+static volatile bool kbase_device_busy;
+static unsigned int num_hardware_counters_enabled;
+
+/* gatorfs variables for counter enable state */
+static struct mali_counter *counters;
+
+/* An array used to return the data we recorded as key,value pairs */
+static int *counter_dump;
+
+extern struct mali_counter mali_activity[3];
+
+static const char *const mali_activity_names[] = {
+       "fragment",
+       "vertex",
+       "opencl",
+};
+
+#define SYMBOL_GET(FUNCTION, ERROR_COUNT) \
+       do { \
+               if (FUNCTION ## _symbol) { \
+                       pr_err("gator: mali " #FUNCTION " symbol was already registered\n"); \
+                       (ERROR_COUNT)++; \
+               } else { \
+                       FUNCTION ## _symbol = symbol_get(FUNCTION); \
+                       if (!FUNCTION ## _symbol) { \
+                               pr_err("gator: mali online " #FUNCTION " symbol not found\n"); \
+                               (ERROR_COUNT)++; \
+                       } \
+               } \
+       } while (0)
+
+#define SYMBOL_CLEANUP(FUNCTION) \
+       do { \
+               if (FUNCTION ## _symbol) { \
+                       symbol_put(FUNCTION); \
+                       FUNCTION ## _symbol = NULL; \
+               } \
+       } while (0)
+
+/**
+ * Execute symbol_get for all the Mali symbols and check for success.
+ * @return the number of symbols not loaded.
+ */
+static int init_symbols(void)
+{
+       int error_count = 0;
+#if MALI_DDK_GATOR_API_VERSION == 3
+       SYMBOL_GET(kbase_gator_instr_hwcnt_dump_irq, error_count);
+       SYMBOL_GET(kbase_gator_instr_hwcnt_dump_complete, error_count);
+       SYMBOL_GET(kbase_gator_hwcnt_init, error_count);
+       SYMBOL_GET(kbase_gator_hwcnt_term, error_count);
+#else
+       SYMBOL_GET(kbase_find_device, error_count);
+       SYMBOL_GET(kbase_create_context, error_count);
+       SYMBOL_GET(kbase_va_alloc, error_count);
+       SYMBOL_GET(kbase_instr_hwcnt_enable, error_count);
+       SYMBOL_GET(kbase_instr_hwcnt_clear, error_count);
+       SYMBOL_GET(kbase_instr_hwcnt_dump_irq, error_count);
+       SYMBOL_GET(kbase_instr_hwcnt_dump_complete, error_count);
+       SYMBOL_GET(kbase_instr_hwcnt_disable, error_count);
+       SYMBOL_GET(kbase_va_free, error_count);
+       SYMBOL_GET(kbase_destroy_context, error_count);
+#endif
+
+       return error_count;
+}
+
+/**
+ * Execute symbol_put for all the registered Mali symbols.
+ */
+static void clean_symbols(void)
+{
+#if MALI_DDK_GATOR_API_VERSION == 3
+       SYMBOL_CLEANUP(kbase_gator_instr_hwcnt_dump_irq);
+       SYMBOL_CLEANUP(kbase_gator_instr_hwcnt_dump_complete);
+       SYMBOL_CLEANUP(kbase_gator_hwcnt_init);
+       SYMBOL_CLEANUP(kbase_gator_hwcnt_term);
+#else
+       SYMBOL_CLEANUP(kbase_find_device);
+       SYMBOL_CLEANUP(kbase_create_context);
+       SYMBOL_CLEANUP(kbase_va_alloc);
+       SYMBOL_CLEANUP(kbase_instr_hwcnt_enable);
+       SYMBOL_CLEANUP(kbase_instr_hwcnt_clear);
+       SYMBOL_CLEANUP(kbase_instr_hwcnt_dump_irq);
+       SYMBOL_CLEANUP(kbase_instr_hwcnt_dump_complete);
+       SYMBOL_CLEANUP(kbase_instr_hwcnt_disable);
+       SYMBOL_CLEANUP(kbase_va_free);
+       SYMBOL_CLEANUP(kbase_destroy_context);
+#endif
+}
+
+/**
+ * Determines whether a read should take place
+ * @param current_time The current time, obtained from getnstimeofday()
+ * @param prev_time_s The number of seconds at the previous read attempt.
+ * @param next_read_time_ns The time (in ns) when the next read should be allowed.
+ *
+ * Note that this function has been separated out here to allow it to be tested.
+ */
+static int is_read_scheduled(const struct timespec *current_time, u32 *prev_time_s, s32 *next_read_time_ns)
+{
+       /* If the current ns count rolls over a second, roll the next read time too. */
+       if (current_time->tv_sec != *prev_time_s)
+               *next_read_time_ns = *next_read_time_ns - NSEC_PER_SEC;
+
+       /* Abort the read if the next read time has not arrived. */
+       if (current_time->tv_nsec < *next_read_time_ns)
+               return 0;
+
+       /* Set the next read some fixed time after this one, and update the read timestamp. */
+       *next_read_time_ns = current_time->tv_nsec + READ_INTERVAL_NSEC;
+
+       *prev_time_s = current_time->tv_sec;
+       return 1;
+}
+
+static int start(void)
+{
+#if MALI_DDK_GATOR_API_VERSION != 3
+       struct kbase_uk_hwcnt_setup setup;
+       unsigned long long shadersPresent = 0;
+       u16 bitmask[] = { 0, 0, 0, 0 };
+       mali_error err;
+#endif
+       int cnt;
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+       /* Setup HW counters */
+       num_hardware_counters_enabled = 0;
+
+       /* Declare and initialise kbase_gator_hwcnt_info structure */
+       in_out_info = kmalloc(sizeof(*in_out_info), GFP_KERNEL);
+       for (cnt = 0; cnt < ARRAY_SIZE(in_out_info->bitmask); cnt++)
+               in_out_info->bitmask[cnt] = 0;
+
+       /* Calculate enable bitmasks based on counters_enabled array */
+       for (cnt = 0; cnt < number_of_hardware_counters; cnt++) {
+               if (counters[cnt].enabled) {
+                       int block = GET_HW_BLOCK(cnt);
+                       int enable_bit = GET_COUNTER_OFFSET(cnt) / 4;
+
+                       in_out_info->bitmask[block] |= (1 << enable_bit);
+                       pr_debug("gator: Mali-Midgard: hardware counter %s selected [%d]\n", hardware_counter_names[cnt], cnt);
+                       num_hardware_counters_enabled++;
+               }
+       }
+
+       /* Create a kbase context for HW counters */
+       if (num_hardware_counters_enabled > 0) {
+               if (init_symbols() > 0) {
+                       clean_symbols();
+                       /* No Mali driver code entrypoints found - not a fault. */
+                       return 0;
+               }
+
+               handles = kbase_gator_hwcnt_init_symbol(in_out_info);
+
+               if (handles == NULL)
+                       goto out;
+
+               kbase_device_busy = false;
+       }
+
+       return 0;
+#else
+       /* Setup HW counters */
+       num_hardware_counters_enabled = 0;
+
+       /* Calculate enable bitmasks based on counters_enabled array */
+       for (cnt = 0; cnt < number_of_hardware_counters; cnt++) {
+               const struct mali_counter *counter = &counters[cnt];
+
+               if (counter->enabled) {
+                       int block = GET_HW_BLOCK(cnt);
+                       int enable_bit = GET_COUNTER_OFFSET(cnt) / 4;
+
+                       bitmask[block] |= (1 << enable_bit);
+                       pr_debug("gator: Mali-Midgard: hardware counter %s selected [%d]\n", hardware_counter_names[cnt], cnt);
+                       num_hardware_counters_enabled++;
+               }
+       }
+
+       /* Create a kbase context for HW counters */
+       if (num_hardware_counters_enabled > 0) {
+               if (init_symbols() > 0) {
+                       clean_symbols();
+                       /* No Mali driver code entrypoints found - not a fault. */
+                       return 0;
+               }
+
+               kbdevice = kbase_find_device_symbol(-1);
+
+               /* If we already got a context, fail */
+               if (kbcontext) {
+                       pr_debug("gator: Mali-Midgard: error context already present\n");
+                       goto out;
+               }
+
+               /* kbcontext will only be valid after all the Mali symbols are loaded successfully */
+               kbcontext = kbase_create_context_symbol(kbdevice);
+               if (!kbcontext) {
+                       pr_debug("gator: Mali-Midgard: error creating kbase context\n");
+                       goto out;
+               }
+
+               /* See if we can get the number of shader cores */
+               shadersPresent = kbdevice->shader_present_bitmap;
+               shader_present_low = (unsigned long)shadersPresent;
+
+               /*
+                * The amount of memory needed to store the dump (bytes)
+                * DUMP_SIZE = number of core groups
+                *             * number of blocks (always 8 for midgard)
+                *             * number of counters per block (always 64 for midgard)
+                *             * number of bytes per counter (always 4 in midgard)
+                * For a Mali-Midgard with a single core group = 1 * 8 * 64 * 4 = 2048
+                * For a Mali-Midgard with a dual core group   = 2 * 8 * 64 * 4 = 4096
+                */
+#if MALI_DDK_GATOR_API_VERSION == 1
+               kernel_dump_buffer = kbase_va_alloc_symbol(kbcontext, 4096);
+#elif MALI_DDK_GATOR_API_VERSION == 2
+               kernel_dump_buffer = kbase_va_alloc_symbol(kbcontext, 4096, &kernel_dump_buffer_handle);
+#endif
+               if (!kernel_dump_buffer) {
+                       pr_debug("gator: Mali-Midgard: error trying to allocate va\n");
+                       goto destroy_context;
+               }
+
+               setup.dump_buffer = (uintptr_t)kernel_dump_buffer;
+               setup.jm_bm = bitmask[JM_BLOCK];
+               setup.tiler_bm = bitmask[TILER_BLOCK];
+               setup.shader_bm = bitmask[SHADER_BLOCK];
+               setup.mmu_l2_bm = bitmask[MMU_BLOCK];
+               /* These counters do not exist on Mali-T60x */
+               setup.l3_cache_bm = 0;
+
+               /* Use kbase API to enable hardware counters and provide dump buffer */
+               err = kbase_instr_hwcnt_enable_symbol(kbcontext, &setup);
+               if (err != MALI_ERROR_NONE) {
+                       pr_debug("gator: Mali-Midgard: can't setup hardware counters\n");
+                       goto free_buffer;
+               }
+               pr_debug("gator: Mali-Midgard: hardware counters enabled\n");
+               kbase_instr_hwcnt_clear_symbol(kbcontext);
+               pr_debug("gator: Mali-Midgard: hardware counters cleared\n");
+
+               kbase_device_busy = false;
+       }
+
+       return 0;
+
+free_buffer:
+#if MALI_DDK_GATOR_API_VERSION == 1
+       kbase_va_free_symbol(kbcontext, kernel_dump_buffer);
+#elif MALI_DDK_GATOR_API_VERSION == 2
+       kbase_va_free_symbol(kbcontext, &kernel_dump_buffer_handle);
+#endif
+
+destroy_context:
+       kbase_destroy_context_symbol(kbcontext);
+#endif
+
+out:
+       clean_symbols();
+       return -1;
+}
+
+static void stop(void)
+{
+       unsigned int cnt;
+#if MALI_DDK_GATOR_API_VERSION == 3
+       struct kbase_gator_hwcnt_handles *temp_hand;
+#else
+       struct kbase_context *temp_kbcontext;
+#endif
+
+       pr_debug("gator: Mali-Midgard: stop\n");
+
+       /* Set all counters as disabled */
+       for (cnt = 0; cnt < number_of_hardware_counters; cnt++)
+               counters[cnt].enabled = 0;
+
+       /* Destroy the context for HW counters */
+#if MALI_DDK_GATOR_API_VERSION == 3
+       if (num_hardware_counters_enabled > 0 && handles != NULL) {
+               /*
+                * Set the global variable to NULL before destroying it, because
+                * other function will check this before using it.
+                */
+               temp_hand = handles;
+               handles = NULL;
+
+               kbase_gator_hwcnt_term_symbol(in_out_info, temp_hand);
+
+               kfree(in_out_info);
+
+#else
+       if (num_hardware_counters_enabled > 0 && kbcontext != NULL) {
+               /*
+                * Set the global variable to NULL before destroying it, because
+                * other function will check this before using it.
+                */
+               temp_kbcontext = kbcontext;
+               kbcontext = NULL;
+
+               kbase_instr_hwcnt_disable_symbol(temp_kbcontext);
+
+#if MALI_DDK_GATOR_API_VERSION == 1
+               kbase_va_free_symbol(temp_kbcontext, kernel_dump_buffer);
+#elif MALI_DDK_GATOR_API_VERSION == 2
+               kbase_va_free_symbol(temp_kbcontext, &kernel_dump_buffer_handle);
+#endif
+
+               kbase_destroy_context_symbol(temp_kbcontext);
+#endif
+
+               pr_debug("gator: Mali-Midgard: hardware counters stopped\n");
+
+               clean_symbols();
+       }
+}
+
+static int read_counter(const int cnt, const int len, const struct mali_counter *counter)
+{
+       const int block = GET_HW_BLOCK(cnt);
+       const int counter_offset = GET_COUNTER_OFFSET(cnt);
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+       const char *block_base_address = (char *)in_out_info->kernel_dump_buffer;
+       int i;
+       int shader_core_count = 0;
+       u32 value = 0;
+
+       for (i = 0; i < in_out_info->nr_hwc_blocks; i++) {
+               if (block == in_out_info->hwc_layout[i]) {
+                       value += *((u32 *)(block_base_address + (0x100 * i)) + counter_offset);
+                       if (block == SHADER_BLOCK)
+                               ++shader_core_count;
+               }
+       }
+
+       if (shader_core_count > 1)
+               value /= shader_core_count;
+#else
+       const char *block_base_address = (char *)kernel_dump_buffer + vithar_blocks[block];
+
+       /* If counter belongs to shader block need to take into account all cores */
+       if (block == SHADER_BLOCK) {
+               int i = 0;
+               int shader_core_count = 0;
+
+               value = 0;
+
+               for (i = 0; i < 4; i++) {
+                       if ((shader_present_low >> i) & 1) {
+                               value += *((u32 *)(block_base_address + (0x100 * i)) + counter_offset);
+                               shader_core_count++;
+                       }
+               }
+
+               for (i = 0; i < 4; i++) {
+                       if ((shader_present_low >> (i+4)) & 1) {
+                               value += *((u32 *)(block_base_address + (0x100 * i) + 0x800) + counter_offset);
+                               shader_core_count++;
+                       }
+               }
+
+               /* Need to total by number of cores to produce an average */
+               if (shader_core_count != 0)
+                       value /= shader_core_count;
+       } else {
+               value = *((u32 *)block_base_address + counter_offset);
+       }
+#endif
+
+       counter_dump[len + 0] = counter->key;
+       counter_dump[len + 1] = value;
+
+       return 2;
+}
+
+static int read(int **buffer, bool sched_switch)
+{
+       int cnt;
+       int len = 0;
+       uint32_t success;
+
+       struct timespec current_time;
+       static u32 prev_time_s;
+       static s32 next_read_time_ns;
+
+       if (!on_primary_core() || sched_switch)
+               return 0;
+
+       getnstimeofday(&current_time);
+
+       /*
+        * Discard reads unless a respectable time has passed. This
+        * reduces the load on the GPU without sacrificing accuracy on
+        * the Streamline display.
+        */
+       if (!is_read_scheduled(&current_time, &prev_time_s, &next_read_time_ns))
+               return 0;
+
+       /*
+        * Report the HW counters
+        * Only process hardware counters if at least one of the hardware counters is enabled.
+        */
+       if (num_hardware_counters_enabled > 0) {
+#if MALI_DDK_GATOR_API_VERSION != 3
+               const unsigned int vithar_blocks[] = {
+                       0x700,  /* VITHAR_JOB_MANAGER,     Block 0 */
+                       0x400,  /* VITHAR_TILER,           Block 1 */
+                       0x000,  /* VITHAR_SHADER_CORE,     Block 2 */
+                       0x500   /* VITHAR_MEMORY_SYSTEM,   Block 3 */
+               };
+#endif
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+               if (!handles)
+                       return -1;
+
+               /* Mali symbols can be called safely since a kbcontext is valid */
+               if (kbase_gator_instr_hwcnt_dump_complete_symbol(handles, &success) == MALI_TRUE) {
+#else
+               if (!kbcontext)
+                       return -1;
+
+               /* Mali symbols can be called safely since a kbcontext is valid */
+               if (kbase_instr_hwcnt_dump_complete_symbol(kbcontext, &success) == MALI_TRUE) {
+#endif
+                       kbase_device_busy = false;
+
+                       if (success == MALI_TRUE) {
+                               /* Cycle through hardware counters and accumulate totals */
+                               for (cnt = 0; cnt < number_of_hardware_counters; cnt++) {
+                                       const struct mali_counter *counter = &counters[cnt];
+
+                                       if (counter->enabled)
+                                               len += read_counter(cnt, len, counter);
+                               }
+                       }
+               }
+
+               if (!kbase_device_busy) {
+                       kbase_device_busy = true;
+#if MALI_DDK_GATOR_API_VERSION == 3
+                       kbase_gator_instr_hwcnt_dump_irq_symbol(handles);
+#else
+                       kbase_instr_hwcnt_dump_irq_symbol(kbcontext);
+#endif
+               }
+       }
+
+       /* Update the buffer */
+       if (buffer)
+               *buffer = counter_dump;
+
+       return len;
+}
+
+static int create_files(struct super_block *sb, struct dentry *root)
+{
+       unsigned int event;
+       /*
+        * Create the filesystem for all events
+        */
+       for (event = 0; event < ARRAY_SIZE(mali_activity); event++) {
+               if (gator_mali_create_file_system("Midgard", mali_activity_names[event], sb, root, &mali_activity[event], NULL) != 0)
+                       return -1;
+       }
+
+       for (event = 0; event < number_of_hardware_counters; event++) {
+               if (gator_mali_create_file_system(mali_name, hardware_counter_names[event], sb, root, &counters[event], NULL) != 0)
+                       return -1;
+       }
+
+       return 0;
+}
+
+static void shutdown(void)
+{
+#if MALI_DDK_GATOR_API_VERSION == 3
+       void (*kbase_gator_hwcnt_term_names_symbol)(void) = NULL;
+       int error_count = 0;
+#endif
+
+       kfree(counters);
+       kfree(counter_dump);
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+       SYMBOL_GET(kbase_gator_hwcnt_term_names, error_count);
+
+       number_of_hardware_counters = -1;
+       hardware_counter_names = NULL;
+       if (kbase_gator_hwcnt_term_names_symbol != NULL) {
+               kbase_gator_hwcnt_term_names_symbol();
+               pr_err("Released symbols\n");
+       }
+
+       SYMBOL_CLEANUP(kbase_gator_hwcnt_term_names);
+#endif
+}
+
+static struct gator_interface gator_events_mali_midgard_interface = {
+       .shutdown = shutdown,
+       .create_files = create_files,
+       .start = start,
+       .stop = stop,
+       .read = read
+};
+
+int gator_events_mali_midgard_hw_init(void)
+{
+#if MALI_DDK_GATOR_API_VERSION == 3
+       const char *const *(*kbase_gator_hwcnt_init_names_symbol)(uint32_t *) = NULL;
+       int error_count = 0;
+#endif
+
+       pr_debug("gator: Mali-Midgard: sw_counters init\n");
+
+#if GATOR_TEST
+       test_all_is_read_scheduled();
+#endif
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+       SYMBOL_GET(kbase_gator_hwcnt_init_names, error_count);
+       if (error_count > 0) {
+               SYMBOL_CLEANUP(kbase_gator_hwcnt_init_names);
+               return 1;
+       }
+
+       number_of_hardware_counters = -1;
+       hardware_counter_names = kbase_gator_hwcnt_init_names_symbol(&number_of_hardware_counters);
+
+       SYMBOL_CLEANUP(kbase_gator_hwcnt_init_names);
+
+       if ((hardware_counter_names == NULL) || (number_of_hardware_counters <= 0)) {
+               pr_err("gator: Error reading hardware counters names: got %d names\n", number_of_hardware_counters);
+               return -1;
+       }
+#else
+       mali_name = "Midgard";
+#endif
+
+       counters = kmalloc(sizeof(*counters)*number_of_hardware_counters, GFP_KERNEL);
+       counter_dump = kmalloc(sizeof(*counter_dump)*number_of_hardware_counters*2, GFP_KERNEL);
+
+       gator_mali_initialise_counters(mali_activity, ARRAY_SIZE(mali_activity));
+       gator_mali_initialise_counters(counters, number_of_hardware_counters);
+
+       return gator_events_install(&gator_events_mali_midgard_interface);
+}
diff --git a/drivers/gator/gator_events_mali_midgard_hw_test.c b/drivers/gator/gator_events_mali_midgard_hw_test.c
new file mode 100644 (file)
index 0000000..31a91e1
--- /dev/null
@@ -0,0 +1,55 @@
+/**
+ * Copyright (C) ARM Limited 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/**
+ * Test functions for mali_t600_hw code.
+ */
+
+static int is_read_scheduled(const struct timespec *current_time, u32 *prev_time_s, s32 *next_read_time_ns);
+
+static int test_is_read_scheduled(u32 s, u32 ns, u32 prev_s, s32 next_ns, int expected_result, s32 expected_next_ns)
+{
+       struct timespec current_time;
+       u32 prev_time_s = prev_s;
+       s32 next_read_time_ns = next_ns;
+
+       current_time.tv_sec = s;
+       current_time.tv_nsec = ns;
+
+       if (is_read_scheduled(&current_time, &prev_time_s, &next_read_time_ns) != expected_result) {
+               pr_err("Failed do_read(%u, %u, %u, %d): expected %d\n", s, ns, prev_s, next_ns, expected_result);
+               return 0;
+       }
+
+       if (next_read_time_ns != expected_next_ns) {
+               pr_err("Failed: next_read_ns expected=%d, actual=%d\n", expected_next_ns, next_read_time_ns);
+               return 0;
+       }
+
+       return 1;
+}
+
+static void test_all_is_read_scheduled(void)
+{
+       const int HIGHEST_NS = 999999999;
+       int n_tests_passed = 0;
+
+       pr_err("gator: running tests on %s\n", __FILE__);
+
+       n_tests_passed += test_is_read_scheduled(0, 0, 0, 0, 1, READ_INTERVAL_NSEC);    /* Null time */
+       n_tests_passed += test_is_read_scheduled(100, 1000, 0, 0, 1, READ_INTERVAL_NSEC + 1000);        /* Initial values */
+
+       n_tests_passed += test_is_read_scheduled(100, HIGHEST_NS, 100, HIGHEST_NS + 500, 0, HIGHEST_NS + 500);
+       n_tests_passed += test_is_read_scheduled(101, 0001, 100, HIGHEST_NS + 500, 0, HIGHEST_NS + 500 - NSEC_PER_SEC);
+       n_tests_passed += test_is_read_scheduled(101, 600, 100, HIGHEST_NS + 500 - NSEC_PER_SEC, 1, 600 + READ_INTERVAL_NSEC);
+
+       n_tests_passed += test_is_read_scheduled(101, 600, 100, HIGHEST_NS + 500, 1, 600 + READ_INTERVAL_NSEC);
+
+       pr_err("gator: %d tests passed\n", n_tests_passed);
+}
diff --git a/drivers/gator/gator_events_mali_t6xx.c b/drivers/gator/gator_events_mali_t6xx.c
deleted file mode 100644 (file)
index 76f14ee..0000000
+++ /dev/null
@@ -1,566 +0,0 @@
-/**
- * Copyright (C) ARM Limited 2011-2014. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include "gator.h"
-
-#include <linux/module.h>
-#include <linux/time.h>
-#include <linux/math64.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-
-#ifdef MALI_DIR_MIDGARD
-/* New DDK Directory structure with kernel/drivers/gpu/arm/midgard*/
-#include "mali_linux_trace.h"
-#else
-/* Old DDK Directory structure with kernel/drivers/gpu/arm/t6xx*/
-#include "linux/mali_linux_trace.h"
-#endif
-
-#include "gator_events_mali_common.h"
-
-/*
- * Check that the MALI_SUPPORT define is set to one of the allowable device codes.
- */
-#if (MALI_SUPPORT != MALI_T6xx)
-#error MALI_SUPPORT set to an invalid device code: expecting MALI_T6xx
-#endif
-
-/* Counters for Mali-T6xx:
- *
- *  - Timeline events
- *    They are tracepoints, but instead of reporting a number they report a START/STOP event.
- *    They are reported in Streamline as number of microseconds while that particular counter was active.
- *
- *  - SW counters
- *    They are tracepoints reporting a particular number.
- *    They are accumulated in sw_counter_data array until they are passed to Streamline, then they are zeroed.
- *
- *  - Accumulators
- *    They are the same as software counters but their value is not zeroed.
- */
-
-/* Timeline (start/stop) activity */
-static const char *timeline_event_names[] = {
-       "PM_SHADER_0",
-       "PM_SHADER_1",
-       "PM_SHADER_2",
-       "PM_SHADER_3",
-       "PM_SHADER_4",
-       "PM_SHADER_5",
-       "PM_SHADER_6",
-       "PM_SHADER_7",
-       "PM_TILER_0",
-       "PM_L2_0",
-       "PM_L2_1",
-       "MMU_AS_0",
-       "MMU_AS_1",
-       "MMU_AS_2",
-       "MMU_AS_3"
-};
-
-enum {
-       PM_SHADER_0 = 0,
-       PM_SHADER_1,
-       PM_SHADER_2,
-       PM_SHADER_3,
-       PM_SHADER_4,
-       PM_SHADER_5,
-       PM_SHADER_6,
-       PM_SHADER_7,
-       PM_TILER_0,
-       PM_L2_0,
-       PM_L2_1,
-       MMU_AS_0,
-       MMU_AS_1,
-       MMU_AS_2,
-       MMU_AS_3
-};
-/* The number of shader blocks in the enum above */
-#define NUM_PM_SHADER (8)
-
-/* Software Counters */
-static const char *software_counter_names[] = {
-       "MMU_PAGE_FAULT_0",
-       "MMU_PAGE_FAULT_1",
-       "MMU_PAGE_FAULT_2",
-       "MMU_PAGE_FAULT_3"
-};
-
-enum {
-       MMU_PAGE_FAULT_0 = 0,
-       MMU_PAGE_FAULT_1,
-       MMU_PAGE_FAULT_2,
-       MMU_PAGE_FAULT_3
-};
-
-/* Software Counters */
-static const char *accumulators_names[] = {
-       "TOTAL_ALLOC_PAGES"
-};
-
-enum {
-       TOTAL_ALLOC_PAGES = 0
-};
-
-#define FIRST_TIMELINE_EVENT (0)
-#define NUMBER_OF_TIMELINE_EVENTS (sizeof(timeline_event_names) / sizeof(timeline_event_names[0]))
-#define FIRST_SOFTWARE_COUNTER (FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS)
-#define NUMBER_OF_SOFTWARE_COUNTERS (sizeof(software_counter_names) / sizeof(software_counter_names[0]))
-#define FIRST_ACCUMULATOR (FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS)
-#define NUMBER_OF_ACCUMULATORS (sizeof(accumulators_names) / sizeof(accumulators_names[0]))
-#define FILMSTRIP (NUMBER_OF_TIMELINE_EVENTS + NUMBER_OF_SOFTWARE_COUNTERS + NUMBER_OF_ACCUMULATORS)
-#define NUMBER_OF_EVENTS (NUMBER_OF_TIMELINE_EVENTS + NUMBER_OF_SOFTWARE_COUNTERS + NUMBER_OF_ACCUMULATORS + 1)
-
-/*
- * gatorfs variables for counter enable state
- */
-static mali_counter counters[NUMBER_OF_EVENTS];
-static unsigned long filmstrip_event;
-
-/* An array used to return the data we recorded
- * as key,value pairs hence the *2
- */
-static unsigned long counter_dump[NUMBER_OF_EVENTS * 2];
-
-/*
- * Array holding counter start times (in ns) for each counter.  A zero here
- * indicates that the activity monitored by this counter is not running.
- */
-static struct timespec timeline_event_starttime[NUMBER_OF_TIMELINE_EVENTS];
-
-/* The data we have recorded */
-static unsigned int timeline_data[NUMBER_OF_TIMELINE_EVENTS];
-static unsigned int sw_counter_data[NUMBER_OF_SOFTWARE_COUNTERS];
-static unsigned int accumulators_data[NUMBER_OF_ACCUMULATORS];
-
-/* Hold the previous timestamp, used to calculate the sample interval. */
-static struct timespec prev_timestamp;
-
-/**
- * Returns the timespan (in microseconds) between the two specified timestamps.
- *
- * @param start Ptr to the start timestamp
- * @param end Ptr to the end timestamp
- *
- * @return Number of microseconds between the two timestamps (can be negative if start follows end).
- */
-static inline long get_duration_us(const struct timespec *start, const struct timespec *end)
-{
-       long event_duration_us = (end->tv_nsec - start->tv_nsec) / 1000;
-       event_duration_us += (end->tv_sec - start->tv_sec) * 1000000;
-
-       return event_duration_us;
-}
-
-static void record_timeline_event(unsigned int timeline_index, unsigned int type)
-{
-       struct timespec event_timestamp;
-       struct timespec *event_start = &timeline_event_starttime[timeline_index];
-
-       switch (type) {
-       case ACTIVITY_START:
-               /* Get the event time... */
-               getnstimeofday(&event_timestamp);
-
-               /* Remember the start time if the activity is not already started */
-               if (event_start->tv_sec == 0) {
-                       *event_start = event_timestamp; /* Structure copy */
-               }
-               break;
-
-       case ACTIVITY_STOP:
-               /* if the counter was started... */
-               if (event_start->tv_sec != 0) {
-                       /* Get the event time... */
-                       getnstimeofday(&event_timestamp);
-
-                       /* Accumulate the duration in us */
-                       timeline_data[timeline_index] += get_duration_us(event_start, &event_timestamp);
-
-                       /* Reset the start time to indicate the activity is stopped. */
-                       event_start->tv_sec = 0;
-               }
-               break;
-
-       default:
-               /* Other activity events are ignored. */
-               break;
-       }
-}
-
-/*
- * Documentation about the following tracepoints is in mali_linux_trace.h
- */
-
-GATOR_DEFINE_PROBE(mali_pm_status, TP_PROTO(unsigned int event_id, unsigned long long value))
-{
-#define SHADER_PRESENT_LO       0x100  /* (RO) Shader core present bitmap, low word */
-#define TILER_PRESENT_LO        0x110  /* (RO) Tiler core present bitmap, low word */
-#define L2_PRESENT_LO           0x120  /* (RO) Level 2 cache present bitmap, low word */
-#define BIT_AT(value, pos) ((value >> pos) & 1)
-
-       static unsigned long long previous_shader_bitmask = 0;
-       static unsigned long long previous_tiler_bitmask = 0;
-       static unsigned long long previous_l2_bitmask = 0;
-
-       switch (event_id) {
-       case SHADER_PRESENT_LO:
-               {
-                       unsigned long long changed_bitmask = previous_shader_bitmask ^ value;
-                       int pos;
-
-                       for (pos = 0; pos < NUM_PM_SHADER; ++pos) {
-                               if (BIT_AT(changed_bitmask, pos)) {
-                                       record_timeline_event(PM_SHADER_0 + pos, BIT_AT(value, pos) ? ACTIVITY_START : ACTIVITY_STOP);
-                               }
-                       }
-
-                       previous_shader_bitmask = value;
-                       break;
-               }
-
-       case TILER_PRESENT_LO:
-               {
-                       unsigned long long changed = previous_tiler_bitmask ^ value;
-
-                       if (BIT_AT(changed, 0)) {
-                               record_timeline_event(PM_TILER_0, BIT_AT(value, 0) ? ACTIVITY_START : ACTIVITY_STOP);
-                       }
-
-                       previous_tiler_bitmask = value;
-                       break;
-               }
-
-       case L2_PRESENT_LO:
-               {
-                       unsigned long long changed = previous_l2_bitmask ^ value;
-
-                       if (BIT_AT(changed, 0)) {
-                               record_timeline_event(PM_L2_0, BIT_AT(value, 0) ? ACTIVITY_START : ACTIVITY_STOP);
-                       }
-                       if (BIT_AT(changed, 4)) {
-                               record_timeline_event(PM_L2_1, BIT_AT(value, 4) ? ACTIVITY_START : ACTIVITY_STOP);
-                       }
-
-                       previous_l2_bitmask = value;
-                       break;
-               }
-
-       default:
-               /* No other blocks are supported at present */
-               break;
-       }
-
-#undef SHADER_PRESENT_LO
-#undef TILER_PRESENT_LO
-#undef L2_PRESENT_LO
-#undef BIT_AT
-}
-
-GATOR_DEFINE_PROBE(mali_page_fault_insert_pages, TP_PROTO(int event_id, unsigned long value))
-{
-       /* We add to the previous since we may receive many tracepoints in one sample period */
-       sw_counter_data[MMU_PAGE_FAULT_0 + event_id] += value;
-}
-
-GATOR_DEFINE_PROBE(mali_mmu_as_in_use, TP_PROTO(int event_id))
-{
-       record_timeline_event(MMU_AS_0 + event_id, ACTIVITY_START);
-}
-
-GATOR_DEFINE_PROBE(mali_mmu_as_released, TP_PROTO(int event_id))
-{
-       record_timeline_event(MMU_AS_0 + event_id, ACTIVITY_STOP);
-}
-
-GATOR_DEFINE_PROBE(mali_total_alloc_pages_change, TP_PROTO(long long int event_id))
-{
-       accumulators_data[TOTAL_ALLOC_PAGES] = event_id;
-}
-
-static int create_files(struct super_block *sb, struct dentry *root)
-{
-       int event;
-       /*
-        * Create the filesystem for all events
-        */
-       int counter_index = 0;
-       const char *mali_name = gator_mali_get_mali_name();
-       mali_profiling_control_type *mali_control;
-
-       for (event = FIRST_TIMELINE_EVENT; event < FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS; event++) {
-               if (gator_mali_create_file_system(mali_name, timeline_event_names[counter_index], sb, root, &counters[event], NULL) != 0) {
-                       return -1;
-               }
-               counter_index++;
-       }
-       counter_index = 0;
-       for (event = FIRST_SOFTWARE_COUNTER; event < FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS; event++) {
-               if (gator_mali_create_file_system(mali_name, software_counter_names[counter_index], sb, root, &counters[event], NULL) != 0) {
-                       return -1;
-               }
-               counter_index++;
-       }
-       counter_index = 0;
-       for (event = FIRST_ACCUMULATOR; event < FIRST_ACCUMULATOR + NUMBER_OF_ACCUMULATORS; event++) {
-               if (gator_mali_create_file_system(mali_name, accumulators_names[counter_index], sb, root, &counters[event], NULL) != 0) {
-                       return -1;
-               }
-               counter_index++;
-       }
-
-       mali_control = symbol_get(_mali_profiling_control);
-       if (mali_control) {     
-               if (gator_mali_create_file_system(mali_name, "Filmstrip_cnt0", sb, root, &counters[FILMSTRIP], &filmstrip_event) != 0) {
-                       return -1;
-               }
-               symbol_put(_mali_profiling_control);
-       }
-
-       return 0;
-}
-
-static int register_tracepoints(void)
-{
-       if (GATOR_REGISTER_TRACE(mali_pm_status)) {
-               pr_debug("gator: Mali-T6xx: mali_pm_status tracepoint failed to activate\n");
-               return 0;
-       }
-
-       if (GATOR_REGISTER_TRACE(mali_page_fault_insert_pages)) {
-               pr_debug("gator: Mali-T6xx: mali_page_fault_insert_pages tracepoint failed to activate\n");
-               return 0;
-       }
-
-       if (GATOR_REGISTER_TRACE(mali_mmu_as_in_use)) {
-               pr_debug("gator: Mali-T6xx: mali_mmu_as_in_use tracepoint failed to activate\n");
-               return 0;
-       }
-
-       if (GATOR_REGISTER_TRACE(mali_mmu_as_released)) {
-               pr_debug("gator: Mali-T6xx: mali_mmu_as_released tracepoint failed to activate\n");
-               return 0;
-       }
-
-       if (GATOR_REGISTER_TRACE(mali_total_alloc_pages_change)) {
-               pr_debug("gator: Mali-T6xx: mali_total_alloc_pages_change tracepoint failed to activate\n");
-               return 0;
-       }
-
-       pr_debug("gator: Mali-T6xx: start\n");
-       pr_debug("gator: Mali-T6xx: mali_pm_status probe is at %p\n", &probe_mali_pm_status);
-       pr_debug("gator: Mali-T6xx: mali_page_fault_insert_pages probe is at %p\n", &probe_mali_page_fault_insert_pages);
-       pr_debug("gator: Mali-T6xx: mali_mmu_as_in_use probe is at %p\n", &probe_mali_mmu_as_in_use);
-       pr_debug("gator: Mali-T6xx: mali_mmu_as_released probe is at %p\n", &probe_mali_mmu_as_released);
-       pr_debug("gator: Mali-T6xx: mali_total_alloc_pages_change probe is at %p\n", &probe_mali_total_alloc_pages_change);
-
-       return 1;
-}
-
-static int start(void)
-{
-       unsigned int cnt;
-       mali_profiling_control_type *mali_control;
-
-       /* Clean all data for the next capture */
-       for (cnt = 0; cnt < NUMBER_OF_TIMELINE_EVENTS; cnt++) {
-               timeline_event_starttime[cnt].tv_sec = timeline_event_starttime[cnt].tv_nsec = 0;
-               timeline_data[cnt] = 0;
-       }
-
-       for (cnt = 0; cnt < NUMBER_OF_SOFTWARE_COUNTERS; cnt++) {
-               sw_counter_data[cnt] = 0;
-       }
-
-       for (cnt = 0; cnt < NUMBER_OF_ACCUMULATORS; cnt++) {
-               accumulators_data[cnt] = 0;
-       }
-
-       /* Register tracepoints */
-       if (register_tracepoints() == 0) {
-               return -1;
-       }
-
-       /* Generic control interface for Mali DDK. */
-       mali_control = symbol_get(_mali_profiling_control);
-       if (mali_control) {
-               /* The event attribute in the XML file keeps the actual frame rate. */
-               unsigned int enabled = counters[FILMSTRIP].enabled ? 1 : 0;
-               unsigned int rate = filmstrip_event & 0xff;
-               unsigned int resize_factor = (filmstrip_event >> 8) & 0xff;
-
-               pr_debug("gator: mali online _mali_profiling_control symbol @ %p\n", mali_control);
-
-#define FBDUMP_CONTROL_ENABLE (1)
-#define FBDUMP_CONTROL_RATE (2)
-#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
-               mali_control(FBDUMP_CONTROL_ENABLE, enabled);
-               mali_control(FBDUMP_CONTROL_RATE, rate);
-               mali_control(FBDUMP_CONTROL_RESIZE_FACTOR, resize_factor);
-
-               pr_debug("gator: sent mali_control enabled=%d, rate=%d, resize_factor=%d\n", enabled, rate, resize_factor);
-
-               symbol_put(_mali_profiling_control);
-       } else {
-               printk("gator: mali online _mali_profiling_control symbol not found\n");
-       }
-
-       /*
-        * Set the first timestamp for calculating the sample interval. The first interval could be quite long,
-        * since it will be the time between 'start' and the first 'read'.
-        * This means that timeline values will be divided by a big number for the first sample.
-        */
-       getnstimeofday(&prev_timestamp);
-
-       return 0;
-}
-
-static void stop(void)
-{
-       mali_profiling_control_type *mali_control;
-
-       pr_debug("gator: Mali-T6xx: stop\n");
-
-       /*
-        * It is safe to unregister traces even if they were not successfully
-        * registered, so no need to check.
-        */
-       GATOR_UNREGISTER_TRACE(mali_pm_status);
-       pr_debug("gator: Mali-T6xx: mali_pm_status tracepoint deactivated\n");
-
-       GATOR_UNREGISTER_TRACE(mali_page_fault_insert_pages);
-       pr_debug("gator: Mali-T6xx: mali_page_fault_insert_pages tracepoint deactivated\n");
-
-       GATOR_UNREGISTER_TRACE(mali_mmu_as_in_use);
-       pr_debug("gator: Mali-T6xx: mali_mmu_as_in_use tracepoint deactivated\n");
-
-       GATOR_UNREGISTER_TRACE(mali_mmu_as_released);
-       pr_debug("gator: Mali-T6xx: mali_mmu_as_released tracepoint deactivated\n");
-
-       GATOR_UNREGISTER_TRACE(mali_total_alloc_pages_change);
-       pr_debug("gator: Mali-T6xx: mali_total_alloc_pages_change tracepoint deactivated\n");
-
-       /* Generic control interface for Mali DDK. */
-       mali_control = symbol_get(_mali_profiling_control);
-       if (mali_control) {
-               pr_debug("gator: mali offline _mali_profiling_control symbol @ %p\n", mali_control);
-
-               mali_control(FBDUMP_CONTROL_ENABLE, 0);
-
-               symbol_put(_mali_profiling_control);
-       } else {
-               printk("gator: mali offline _mali_profiling_control symbol not found\n");
-       }
-}
-
-static int read(int **buffer)
-{
-       int cnt;
-       int len = 0;
-       long sample_interval_us = 0;
-       struct timespec read_timestamp;
-
-       if (!on_primary_core()) {
-               return 0;
-       }
-
-       /* Get the start of this sample period. */
-       getnstimeofday(&read_timestamp);
-
-       /*
-        * Calculate the sample interval if the previous sample time is valid.
-        * We use tv_sec since it will not be 0.
-        */
-       if (prev_timestamp.tv_sec != 0) {
-               sample_interval_us = get_duration_us(&prev_timestamp, &read_timestamp);
-       }
-
-       /* Structure copy. Update the previous timestamp. */
-       prev_timestamp = read_timestamp;
-
-       /*
-        * Report the timeline counters (ACTIVITY_START/STOP)
-        */
-       for (cnt = FIRST_TIMELINE_EVENT; cnt < (FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS); cnt++) {
-               mali_counter *counter = &counters[cnt];
-               if (counter->enabled) {
-                       const int index = cnt - FIRST_TIMELINE_EVENT;
-                       unsigned int value;
-
-                       /* If the activity is still running, reset its start time to the start of this sample period
-                        * to correct the count.  Add the time up to the end of the sample onto the count. */
-                       if (timeline_event_starttime[index].tv_sec != 0) {
-                               const long event_duration = get_duration_us(&timeline_event_starttime[index], &read_timestamp);
-                               timeline_data[index] += event_duration;
-                               timeline_event_starttime[index] = read_timestamp;       /* Activity is still running. */
-                       }
-
-                       if (sample_interval_us != 0) {
-                               /* Convert the counter to a percent-of-sample value */
-                               value = (timeline_data[index] * 100) / sample_interval_us;
-                       } else {
-                               pr_debug("gator: Mali-T6xx: setting value to zero\n");
-                               value = 0;
-                       }
-
-                       /* Clear the counter value ready for the next sample. */
-                       timeline_data[index] = 0;
-
-                       counter_dump[len++] = counter->key;
-                       counter_dump[len++] = value;
-               }
-       }
-
-       /* Report the software counters */
-       for (cnt = FIRST_SOFTWARE_COUNTER; cnt < (FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS); cnt++) {
-               const mali_counter *counter = &counters[cnt];
-               if (counter->enabled) {
-                       const int index = cnt - FIRST_SOFTWARE_COUNTER;
-                       counter_dump[len++] = counter->key;
-                       counter_dump[len++] = sw_counter_data[index];
-                       /* Set the value to zero for the next time */
-                       sw_counter_data[index] = 0;
-               }
-       }
-
-       /* Report the accumulators */
-       for (cnt = FIRST_ACCUMULATOR; cnt < (FIRST_ACCUMULATOR + NUMBER_OF_ACCUMULATORS); cnt++) {
-               const mali_counter *counter = &counters[cnt];
-               if (counter->enabled) {
-                       const int index = cnt - FIRST_ACCUMULATOR;
-                       counter_dump[len++] = counter->key;
-                       counter_dump[len++] = accumulators_data[index];
-                       /* Do not zero the accumulator */
-               }
-       }
-
-       /* Update the buffer */
-       if (buffer) {
-               *buffer = (int *)counter_dump;
-       }
-
-       return len;
-}
-
-static struct gator_interface gator_events_mali_t6xx_interface = {
-       .create_files = create_files,
-       .start = start,
-       .stop = stop,
-       .read = read
-};
-
-extern int gator_events_mali_t6xx_init(void)
-{
-       pr_debug("gator: Mali-T6xx: sw_counters init\n");
-
-       gator_mali_initialise_counters(counters, NUMBER_OF_EVENTS);
-
-       return gator_events_install(&gator_events_mali_t6xx_interface);
-}
diff --git a/drivers/gator/gator_events_mali_t6xx_hw.c b/drivers/gator/gator_events_mali_t6xx_hw.c
deleted file mode 100644 (file)
index dfbc91f..0000000
+++ /dev/null
@@ -1,792 +0,0 @@
-/**
- * Copyright (C) ARM Limited 2012-2014. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include "gator.h"
-
-#include <linux/module.h>
-#include <linux/time.h>
-#include <linux/math64.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-
-/* Mali T6xx DDK includes */
-#ifdef MALI_DIR_MIDGARD
-/* New DDK Directory structure with kernel/drivers/gpu/arm/midgard*/
-#include "mali_linux_trace.h"
-#include "mali_kbase.h"
-#include "mali_kbase_mem_linux.h"
-#else
-/* Old DDK Directory structure with kernel/drivers/gpu/arm/t6xx*/
-#include "linux/mali_linux_trace.h"
-#include "kbase/src/common/mali_kbase.h"
-#include "kbase/src/linux/mali_kbase_mem_linux.h"
-#endif
-
-#include "gator_events_mali_common.h"
-
-/* If API version is not specified then assume API version 1. */
-#ifndef MALI_DDK_GATOR_API_VERSION
-#define MALI_DDK_GATOR_API_VERSION 1
-#endif
-
-#if (MALI_DDK_GATOR_API_VERSION != 1) && (MALI_DDK_GATOR_API_VERSION != 2)
-#error MALI_DDK_GATOR_API_VERSION is invalid (must be 1 for r1/r2 DDK, or 2 for r3 DDK).
-#endif
-
-/*
- * Mali-T6xx
- */
-typedef struct kbase_device *kbase_find_device_type(int);
-typedef kbase_context *kbase_create_context_type(kbase_device *);
-typedef void kbase_destroy_context_type(kbase_context *);
-
-#if MALI_DDK_GATOR_API_VERSION == 1
-typedef void *kbase_va_alloc_type(kbase_context *, u32);
-typedef void kbase_va_free_type(kbase_context *, void *);
-#elif MALI_DDK_GATOR_API_VERSION == 2
-typedef void *kbase_va_alloc_type(kbase_context *, u32, kbase_hwc_dma_mapping * handle);
-typedef void kbase_va_free_type(kbase_context *, kbase_hwc_dma_mapping * handle);
-#endif
-
-typedef mali_error kbase_instr_hwcnt_enable_type(kbase_context *, kbase_uk_hwcnt_setup *);
-typedef mali_error kbase_instr_hwcnt_disable_type(kbase_context *);
-typedef mali_error kbase_instr_hwcnt_clear_type(kbase_context *);
-typedef mali_error kbase_instr_hwcnt_dump_irq_type(kbase_context *);
-typedef mali_bool kbase_instr_hwcnt_dump_complete_type(kbase_context *, mali_bool *);
-
-static kbase_find_device_type *kbase_find_device_symbol;
-static kbase_create_context_type *kbase_create_context_symbol;
-static kbase_va_alloc_type *kbase_va_alloc_symbol;
-static kbase_instr_hwcnt_enable_type *kbase_instr_hwcnt_enable_symbol;
-static kbase_instr_hwcnt_clear_type *kbase_instr_hwcnt_clear_symbol;
-static kbase_instr_hwcnt_dump_irq_type *kbase_instr_hwcnt_dump_irq_symbol;
-static kbase_instr_hwcnt_dump_complete_type *kbase_instr_hwcnt_dump_complete_symbol;
-static kbase_instr_hwcnt_disable_type *kbase_instr_hwcnt_disable_symbol;
-static kbase_va_free_type *kbase_va_free_symbol;
-static kbase_destroy_context_type *kbase_destroy_context_symbol;
-
-static long shader_present_low = 0;
-
-/** The interval between reads, in ns.
- *
- * Earlier we introduced
- * a 'hold off for 1ms after last read' to resolve MIDBASE-2178 and MALINE-724.
- * However, the 1ms hold off is too long if no context switches occur as there is a race
- * between this value and the tick of the read clock in gator which is also 1ms. If we 'miss' the
- * current read, the counter values are effectively 'spread' over 2ms and the values seen are half
- * what they should be (since Streamline averages over sample time). In the presence of context switches
- * this spread can vary and markedly affect the counters.  Currently there is no 'proper' solution to
- * this, but empirically we have found that reducing the minimum read interval to 950us causes the
- * counts to be much more stable.
- */
-static const int READ_INTERVAL_NSEC = 950000;
-
-#if GATOR_TEST
-#include "gator_events_mali_t6xx_hw_test.c"
-#endif
-
-/* Blocks for HW counters */
-enum {
-       JM_BLOCK = 0,
-       TILER_BLOCK,
-       SHADER_BLOCK,
-       MMU_BLOCK
-};
-
-/* Counters for Mali-T6xx:
- *
- *  - HW counters, 4 blocks
- *    For HW counters we need strings to create /dev/gator/events files.
- *    Enums are not needed because the position of the HW name in the array is the same
- *    of the corresponding value in the received block of memory.
- *    HW counters are requested by calculating a bitmask, passed then to the driver.
- *    Every millisecond a HW counters dump is requested, and if the previous has been completed they are read.
- */
-
-/* Hardware Counters */
-static const char *const hardware_counter_names[] = {
-       /* Job Manager */
-       "",
-       "",
-       "",
-       "",
-       "MESSAGES_SENT",
-       "MESSAGES_RECEIVED",
-       "GPU_ACTIVE",           /* 6 */
-       "IRQ_ACTIVE",
-       "JS0_JOBS",
-       "JS0_TASKS",
-       "JS0_ACTIVE",
-       "",
-       "JS0_WAIT_READ",
-       "JS0_WAIT_ISSUE",
-       "JS0_WAIT_DEPEND",
-       "JS0_WAIT_FINISH",
-       "JS1_JOBS",
-       "JS1_TASKS",
-       "JS1_ACTIVE",
-       "",
-       "JS1_WAIT_READ",
-       "JS1_WAIT_ISSUE",
-       "JS1_WAIT_DEPEND",
-       "JS1_WAIT_FINISH",
-       "JS2_JOBS",
-       "JS2_TASKS",
-       "JS2_ACTIVE",
-       "",
-       "JS2_WAIT_READ",
-       "JS2_WAIT_ISSUE",
-       "JS2_WAIT_DEPEND",
-       "JS2_WAIT_FINISH",
-       "JS3_JOBS",
-       "JS3_TASKS",
-       "JS3_ACTIVE",
-       "",
-       "JS3_WAIT_READ",
-       "JS3_WAIT_ISSUE",
-       "JS3_WAIT_DEPEND",
-       "JS3_WAIT_FINISH",
-       "JS4_JOBS",
-       "JS4_TASKS",
-       "JS4_ACTIVE",
-       "",
-       "JS4_WAIT_READ",
-       "JS4_WAIT_ISSUE",
-       "JS4_WAIT_DEPEND",
-       "JS4_WAIT_FINISH",
-       "JS5_JOBS",
-       "JS5_TASKS",
-       "JS5_ACTIVE",
-       "",
-       "JS5_WAIT_READ",
-       "JS5_WAIT_ISSUE",
-       "JS5_WAIT_DEPEND",
-       "JS5_WAIT_FINISH",
-       "JS6_JOBS",
-       "JS6_TASKS",
-       "JS6_ACTIVE",
-       "",
-       "JS6_WAIT_READ",
-       "JS6_WAIT_ISSUE",
-       "JS6_WAIT_DEPEND",
-       "JS6_WAIT_FINISH",
-
-       /*Tiler */
-       "",
-       "",
-       "",
-       "JOBS_PROCESSED",
-       "TRIANGLES",
-       "QUADS",
-       "POLYGONS",
-       "POINTS",
-       "LINES",
-       "VCACHE_HIT",
-       "VCACHE_MISS",
-       "FRONT_FACING",
-       "BACK_FACING",
-       "PRIM_VISIBLE",
-       "PRIM_CULLED",
-       "PRIM_CLIPPED",
-       "LEVEL0",
-       "LEVEL1",
-       "LEVEL2",
-       "LEVEL3",
-       "LEVEL4",
-       "LEVEL5",
-       "LEVEL6",
-       "LEVEL7",
-       "COMMAND_1",
-       "COMMAND_2",
-       "COMMAND_3",
-       "COMMAND_4",
-       "COMMAND_4_7",
-       "COMMAND_8_15",
-       "COMMAND_16_63",
-       "COMMAND_64",
-       "COMPRESS_IN",
-       "COMPRESS_OUT",
-       "COMPRESS_FLUSH",
-       "TIMESTAMPS",
-       "PCACHE_HIT",
-       "PCACHE_MISS",
-       "PCACHE_LINE",
-       "PCACHE_STALL",
-       "WRBUF_HIT",
-       "WRBUF_MISS",
-       "WRBUF_LINE",
-       "WRBUF_PARTIAL",
-       "WRBUF_STALL",
-       "ACTIVE",
-       "LOADING_DESC",
-       "INDEX_WAIT",
-       "INDEX_RANGE_WAIT",
-       "VERTEX_WAIT",
-       "PCACHE_WAIT",
-       "WRBUF_WAIT",
-       "BUS_READ",
-       "BUS_WRITE",
-       "",
-       "",
-       "",
-       "",
-       "",
-       "UTLB_STALL",
-       "UTLB_REPLAY_MISS",
-       "UTLB_REPLAY_FULL",
-       "UTLB_NEW_MISS",
-       "UTLB_HIT",
-
-       /* Shader Core */
-       "",
-       "",
-       "",
-       "SHADER_CORE_ACTIVE",
-       "FRAG_ACTIVE",
-       "FRAG_PRIMATIVES",
-       "FRAG_PRIMATIVES_DROPPED",
-       "FRAG_CYCLE_DESC",
-       "FRAG_CYCLES_PLR",
-       "FRAG_CYCLES_VERT",
-       "FRAG_CYCLES_TRISETUP",
-       "FRAG_CYCLES_RAST",
-       "FRAG_THREADS",
-       "FRAG_DUMMY_THREADS",
-       "FRAG_QUADS_RAST",
-       "FRAG_QUADS_EZS_TEST",
-       "FRAG_QUADS_EZS_KILLED",
-       "FRAG_QUADS_LZS_TEST",
-       "FRAG_QUADS_LZS_KILLED",
-       "FRAG_CYCLE_NO_TILE",
-       "FRAG_NUM_TILES",
-       "FRAG_TRANS_ELIM",
-       "COMPUTE_ACTIVE",
-       "COMPUTE_TASKS",
-       "COMPUTE_THREADS",
-       "COMPUTE_CYCLES_DESC",
-       "TRIPIPE_ACTIVE",
-       "ARITH_WORDS",
-       "ARITH_CYCLES_REG",
-       "ARITH_CYCLES_L0",
-       "ARITH_FRAG_DEPEND",
-       "LS_WORDS",
-       "LS_ISSUES",
-       "LS_RESTARTS",
-       "LS_REISSUES_MISS",
-       "LS_REISSUES_VD",
-       "LS_REISSUE_ATTRIB_MISS",
-       "LS_NO_WB",
-       "TEX_WORDS",
-       "TEX_BUBBLES",
-       "TEX_WORDS_L0",
-       "TEX_WORDS_DESC",
-       "TEX_THREADS",
-       "TEX_RECIRC_FMISS",
-       "TEX_RECIRC_DESC",
-       "TEX_RECIRC_MULTI",
-       "TEX_RECIRC_PMISS",
-       "TEX_RECIRC_CONF",
-       "LSC_READ_HITS",
-       "LSC_READ_MISSES",
-       "LSC_WRITE_HITS",
-       "LSC_WRITE_MISSES",
-       "LSC_ATOMIC_HITS",
-       "LSC_ATOMIC_MISSES",
-       "LSC_LINE_FETCHES",
-       "LSC_DIRTY_LINE",
-       "LSC_SNOOPS",
-       "AXI_TLB_STALL",
-       "AXI_TLB_MIESS",
-       "AXI_TLB_TRANSACTION",
-       "LS_TLB_MISS",
-       "LS_TLB_HIT",
-       "AXI_BEATS_READ",
-       "AXI_BEATS_WRITTEN",
-
-       /*L2 and MMU */
-       "",
-       "",
-       "",
-       "",
-       "MMU_HIT",
-       "MMU_NEW_MISS",
-       "MMU_REPLAY_FULL",
-       "MMU_REPLAY_MISS",
-       "MMU_TABLE_WALK",
-       "",
-       "",
-       "",
-       "",
-       "",
-       "",
-       "",
-       "UTLB_HIT",
-       "UTLB_NEW_MISS",
-       "UTLB_REPLAY_FULL",
-       "UTLB_REPLAY_MISS",
-       "UTLB_STALL",
-       "",
-       "",
-       "",
-       "",
-       "",
-       "",
-       "",
-       "",
-       "",
-       "L2_WRITE_BEATS",
-       "L2_READ_BEATS",
-       "L2_ANY_LOOKUP",
-       "L2_READ_LOOKUP",
-       "L2_SREAD_LOOKUP",
-       "L2_READ_REPLAY",
-       "L2_READ_SNOOP",
-       "L2_READ_HIT",
-       "L2_CLEAN_MISS",
-       "L2_WRITE_LOOKUP",
-       "L2_SWRITE_LOOKUP",
-       "L2_WRITE_REPLAY",
-       "L2_WRITE_SNOOP",
-       "L2_WRITE_HIT",
-       "L2_EXT_READ_FULL",
-       "L2_EXT_READ_HALF",
-       "L2_EXT_WRITE_FULL",
-       "L2_EXT_WRITE_HALF",
-       "L2_EXT_READ",
-       "L2_EXT_READ_LINE",
-       "L2_EXT_WRITE",
-       "L2_EXT_WRITE_LINE",
-       "L2_EXT_WRITE_SMALL",
-       "L2_EXT_BARRIER",
-       "L2_EXT_AR_STALL",
-       "L2_EXT_R_BUF_FULL",
-       "L2_EXT_RD_BUF_FULL",
-       "L2_EXT_R_RAW",
-       "L2_EXT_W_STALL",
-       "L2_EXT_W_BUF_FULL",
-       "L2_EXT_R_W_HAZARD",
-       "L2_TAG_HAZARD",
-       "L2_SNOOP_FULL",
-       "L2_REPLAY_FULL"
-};
-
-#define NUMBER_OF_HARDWARE_COUNTERS (sizeof(hardware_counter_names) / sizeof(hardware_counter_names[0]))
-
-#define GET_HW_BLOCK(c) (((c) >> 6) & 0x3)
-#define GET_COUNTER_OFFSET(c) ((c) & 0x3f)
-
-/* Memory to dump hardware counters into */
-static void *kernel_dump_buffer;
-
-#if MALI_DDK_GATOR_API_VERSION == 2
-/* DMA state used to manage lifetime of the buffer */
-kbase_hwc_dma_mapping kernel_dump_buffer_handle;
-#endif
-
-/* kbase context and device */
-static kbase_context *kbcontext = NULL;
-static struct kbase_device *kbdevice = NULL;
-
-/*
- * The following function has no external prototype in older DDK revisions.  When the DDK
- * is updated then this should be removed.
- */
-struct kbase_device *kbase_find_device(int minor);
-
-static volatile bool kbase_device_busy = false;
-static unsigned int num_hardware_counters_enabled;
-
-/*
- * gatorfs variables for counter enable state
- */
-static mali_counter counters[NUMBER_OF_HARDWARE_COUNTERS];
-
-/* An array used to return the data we recorded
- * as key,value pairs hence the *2
- */
-static unsigned long counter_dump[NUMBER_OF_HARDWARE_COUNTERS * 2];
-
-#define SYMBOL_GET(FUNCTION, ERROR_COUNT) \
-       if(FUNCTION ## _symbol) \
-       { \
-               printk("gator: mali " #FUNCTION " symbol was already registered\n"); \
-               (ERROR_COUNT)++; \
-       } \
-       else \
-       { \
-               FUNCTION ## _symbol = symbol_get(FUNCTION); \
-               if(! FUNCTION ## _symbol) \
-               { \
-                       printk("gator: mali online " #FUNCTION " symbol not found\n"); \
-                       (ERROR_COUNT)++; \
-               } \
-       }
-
-#define SYMBOL_CLEANUP(FUNCTION) \
-       if(FUNCTION ## _symbol) \
-       { \
-        symbol_put(FUNCTION); \
-        FUNCTION ## _symbol = NULL; \
-       }
-
-/**
- * Execute symbol_get for all the Mali symbols and check for success.
- * @return the number of symbols not loaded.
- */
-static int init_symbols(void)
-{
-       int error_count = 0;
-       SYMBOL_GET(kbase_find_device, error_count);
-       SYMBOL_GET(kbase_create_context, error_count);
-       SYMBOL_GET(kbase_va_alloc, error_count);
-       SYMBOL_GET(kbase_instr_hwcnt_enable, error_count);
-       SYMBOL_GET(kbase_instr_hwcnt_clear, error_count);
-       SYMBOL_GET(kbase_instr_hwcnt_dump_irq, error_count);
-       SYMBOL_GET(kbase_instr_hwcnt_dump_complete, error_count);
-       SYMBOL_GET(kbase_instr_hwcnt_disable, error_count);
-       SYMBOL_GET(kbase_va_free, error_count);
-       SYMBOL_GET(kbase_destroy_context, error_count);
-
-       return error_count;
-}
-
-/**
- * Execute symbol_put for all the registered Mali symbols.
- */
-static void clean_symbols(void)
-{
-       SYMBOL_CLEANUP(kbase_find_device);
-       SYMBOL_CLEANUP(kbase_create_context);
-       SYMBOL_CLEANUP(kbase_va_alloc);
-       SYMBOL_CLEANUP(kbase_instr_hwcnt_enable);
-       SYMBOL_CLEANUP(kbase_instr_hwcnt_clear);
-       SYMBOL_CLEANUP(kbase_instr_hwcnt_dump_irq);
-       SYMBOL_CLEANUP(kbase_instr_hwcnt_dump_complete);
-       SYMBOL_CLEANUP(kbase_instr_hwcnt_disable);
-       SYMBOL_CLEANUP(kbase_va_free);
-       SYMBOL_CLEANUP(kbase_destroy_context);
-}
-
-/**
- * Determines whether a read should take place
- * @param current_time The current time, obtained from getnstimeofday()
- * @param prev_time_s The number of seconds at the previous read attempt.
- * @param next_read_time_ns The time (in ns) when the next read should be allowed.
- *
- * Note that this function has been separated out here to allow it to be tested.
- */
-static int is_read_scheduled(const struct timespec *current_time, u32 *prev_time_s, s32 *next_read_time_ns)
-{
-       /* If the current ns count rolls over a second, roll the next read time too. */
-       if (current_time->tv_sec != *prev_time_s) {
-               *next_read_time_ns = *next_read_time_ns - NSEC_PER_SEC;
-       }
-
-       /* Abort the read if the next read time has not arrived. */
-       if (current_time->tv_nsec < *next_read_time_ns) {
-               return 0;
-       }
-
-       /* Set the next read some fixed time after this one, and update the read timestamp. */
-       *next_read_time_ns = current_time->tv_nsec + READ_INTERVAL_NSEC;
-
-       *prev_time_s = current_time->tv_sec;
-       return 1;
-}
-
-static int start(void)
-{
-       kbase_uk_hwcnt_setup setup;
-       mali_error err;
-       int cnt;
-       u16 bitmask[] = { 0, 0, 0, 0 };
-       unsigned long long shadersPresent = 0;
-
-       /* Setup HW counters */
-       num_hardware_counters_enabled = 0;
-
-       if (NUMBER_OF_HARDWARE_COUNTERS != 256) {
-               pr_debug("Unexpected number of hardware counters defined: expecting 256, got %d\n", NUMBER_OF_HARDWARE_COUNTERS);
-       }
-
-       /* Calculate enable bitmasks based on counters_enabled array */
-       for (cnt = 0; cnt < NUMBER_OF_HARDWARE_COUNTERS; cnt++) {
-               const mali_counter *counter = &counters[cnt];
-               if (counter->enabled) {
-                       int block = GET_HW_BLOCK(cnt);
-                       int enable_bit = GET_COUNTER_OFFSET(cnt) / 4;
-                       bitmask[block] |= (1 << enable_bit);
-                       pr_debug("gator: Mali-T6xx: hardware counter %s selected [%d]\n", hardware_counter_names[cnt], cnt);
-                       num_hardware_counters_enabled++;
-               }
-       }
-
-       /* Create a kbase context for HW counters */
-       if (num_hardware_counters_enabled > 0) {
-               if (init_symbols() > 0) {
-                       clean_symbols();
-                       /* No Mali driver code entrypoints found - not a fault. */
-                       return 0;
-               }
-
-               kbdevice = kbase_find_device_symbol(-1);
-
-               /* If we already got a context, fail */
-               if (kbcontext) {
-                       pr_debug("gator: Mali-T6xx: error context already present\n");
-                       goto out;
-               }
-
-               /* kbcontext will only be valid after all the Mali symbols are loaded successfully */
-               kbcontext = kbase_create_context_symbol(kbdevice);
-               if (!kbcontext) {
-                       pr_debug("gator: Mali-T6xx: error creating kbase context\n");
-                       goto out;
-               }
-
-
-               /* See if we can get the number of shader cores */
-               shadersPresent = kbdevice->shader_present_bitmap;
-               shader_present_low = (unsigned long)shadersPresent;
-
-               /*
-                * The amount of memory needed to store the dump (bytes)
-                * DUMP_SIZE = number of core groups
-                *             * number of blocks (always 8 for midgard)
-                *             * number of counters per block (always 64 for midgard)
-                *             * number of bytes per counter (always 4 in midgard)
-                * For a Mali-T6xx with a single core group = 1 * 8 * 64 * 4 = 2048
-                * For a Mali-T6xx with a dual core group   = 2 * 8 * 64 * 4 = 4096
-                */
-#if MALI_DDK_GATOR_API_VERSION == 1
-               kernel_dump_buffer = kbase_va_alloc_symbol(kbcontext, 4096);
-#elif MALI_DDK_GATOR_API_VERSION == 2
-               kernel_dump_buffer = kbase_va_alloc_symbol(kbcontext, 4096, &kernel_dump_buffer_handle);
-#endif
-               if (!kernel_dump_buffer) {
-                       pr_debug("gator: Mali-T6xx: error trying to allocate va\n");
-                       goto destroy_context;
-               }
-
-               setup.dump_buffer = (uintptr_t)kernel_dump_buffer;
-               setup.jm_bm = bitmask[JM_BLOCK];
-               setup.tiler_bm = bitmask[TILER_BLOCK];
-               setup.shader_bm = bitmask[SHADER_BLOCK];
-               setup.mmu_l2_bm = bitmask[MMU_BLOCK];
-               /* These counters do not exist on Mali-T60x */
-               setup.l3_cache_bm = 0;
-
-               /* Use kbase API to enable hardware counters and provide dump buffer */
-               err = kbase_instr_hwcnt_enable_symbol(kbcontext, &setup);
-               if (err != MALI_ERROR_NONE) {
-                       pr_debug("gator: Mali-T6xx: can't setup hardware counters\n");
-                       goto free_buffer;
-               }
-               pr_debug("gator: Mali-T6xx: hardware counters enabled\n");
-               kbase_instr_hwcnt_clear_symbol(kbcontext);
-               pr_debug("gator: Mali-T6xx: hardware counters cleared \n");
-
-               kbase_device_busy = false;
-       }
-
-       return 0;
-
-free_buffer:
-#if MALI_DDK_GATOR_API_VERSION == 1
-       kbase_va_free_symbol(kbcontext, kernel_dump_buffer);
-#elif MALI_DDK_GATOR_API_VERSION == 2
-       kbase_va_free_symbol(kbcontext, &kernel_dump_buffer_handle);
-#endif
-
-destroy_context:
-       kbase_destroy_context_symbol(kbcontext);
-
-out:
-       clean_symbols();
-       return -1;
-}
-
-static void stop(void)
-{
-       unsigned int cnt;
-       kbase_context *temp_kbcontext;
-
-       pr_debug("gator: Mali-T6xx: stop\n");
-
-       /* Set all counters as disabled */
-       for (cnt = 0; cnt < NUMBER_OF_HARDWARE_COUNTERS; cnt++) {
-               counters[cnt].enabled = 0;
-       }
-
-       /* Destroy the context for HW counters */
-       if (num_hardware_counters_enabled > 0 && kbcontext != NULL) {
-               /*
-                * Set the global variable to NULL before destroying it, because
-                * other function will check this before using it.
-                */
-               temp_kbcontext = kbcontext;
-               kbcontext = NULL;
-
-               kbase_instr_hwcnt_disable_symbol(temp_kbcontext);
-
-#if MALI_DDK_GATOR_API_VERSION == 1
-               kbase_va_free_symbol(temp_kbcontext, kernel_dump_buffer);
-#elif MALI_DDK_GATOR_API_VERSION == 2
-               kbase_va_free_symbol(temp_kbcontext, &kernel_dump_buffer_handle);
-#endif
-
-               kbase_destroy_context_symbol(temp_kbcontext);
-
-               pr_debug("gator: Mali-T6xx: hardware counters stopped\n");
-
-               clean_symbols();
-       }
-}
-
-static int read(int **buffer)
-{
-       int cnt;
-       int len = 0;
-       u32 value = 0;
-       mali_bool success;
-
-       struct timespec current_time;
-       static u32 prev_time_s = 0;
-       static s32 next_read_time_ns = 0;
-
-       if (!on_primary_core()) {
-               return 0;
-       }
-
-       getnstimeofday(&current_time);
-
-       /*
-        * Discard reads unless a respectable time has passed.  This reduces the load on the GPU without sacrificing
-        * accuracy on the Streamline display.
-        */
-       if (!is_read_scheduled(&current_time, &prev_time_s, &next_read_time_ns)) {
-               return 0;
-       }
-
-       /*
-        * Report the HW counters
-        * Only process hardware counters if at least one of the hardware counters is enabled.
-        */
-       if (num_hardware_counters_enabled > 0) {
-               const unsigned int vithar_blocks[] = {
-                       0x700,  /* VITHAR_JOB_MANAGER,     Block 0 */
-                       0x400,  /* VITHAR_TILER,           Block 1 */
-                       0x000,  /* VITHAR_SHADER_CORE,     Block 2 */
-                       0x500   /* VITHAR_MEMORY_SYSTEM,   Block 3 */
-               };
-
-               if (!kbcontext) {
-                       return -1;
-               }
-
-               /* Mali symbols can be called safely since a kbcontext is valid */
-               if (kbase_instr_hwcnt_dump_complete_symbol(kbcontext, &success) == MALI_TRUE) {
-                       kbase_device_busy = false;
-
-                       if (success == MALI_TRUE) {
-                               /* Cycle through hardware counters and accumulate totals */
-                               for (cnt = 0; cnt < NUMBER_OF_HARDWARE_COUNTERS; cnt++) {
-                                       const mali_counter *counter = &counters[cnt];
-                                       if (counter->enabled) {
-                                               const int block = GET_HW_BLOCK(cnt);
-                                               const int counter_offset = GET_COUNTER_OFFSET(cnt);
-
-                                               const char* block_base_address = (char*)kernel_dump_buffer + vithar_blocks[block];
-
-                                               /* If counter belongs to shader block need to take into account all cores */
-                                               if (block == SHADER_BLOCK) {
-                                                       int i = 0;
-                                                       int shader_core_count = 0;
-                                                       value = 0;
-
-                                                       for (i = 0; i < 4; i++) {
-                                                               if ((shader_present_low >> i) & 1) {
-                                                                       value += *((u32*) (block_base_address + (0x100 * i)) + counter_offset);
-                                                                       shader_core_count++;
-                                                               }
-                                                       }
-
-                                                       for (i = 0; i < 4; i++) {
-                                                               if((shader_present_low >> (i+4)) & 1) {
-                                                                       value += *((u32*)(block_base_address + (0x100 * i) + 0x800) + counter_offset);
-                                                                       shader_core_count++;
-                                                               }
-                                                       }
-
-                                                       /* Need to total by number of cores to produce an average */
-                                                       if (shader_core_count != 0) {
-                                                               value /= shader_core_count;
-                                                       }
-                                               } else {
-                                                       value = *((u32*)block_base_address + counter_offset);
-                                               }
-
-                                               counter_dump[len++] = counter->key;
-                                               counter_dump[len++] = value;
-                                       }
-                               }
-                       }
-               }
-
-               if (!kbase_device_busy) {
-                       kbase_device_busy = true;
-                       kbase_instr_hwcnt_dump_irq_symbol(kbcontext);
-               }
-       }
-
-       /* Update the buffer */
-       if (buffer) {
-               *buffer = (int *)counter_dump;
-       }
-
-       return len;
-}
-
-static int create_files(struct super_block *sb, struct dentry *root)
-{
-       unsigned int event;
-       /*
-        * Create the filesystem for all events
-        */
-       int counter_index = 0;
-       const char *mali_name = gator_mali_get_mali_name();
-
-       for (event = 0; event < NUMBER_OF_HARDWARE_COUNTERS; event++) {
-               if (gator_mali_create_file_system(mali_name, hardware_counter_names[counter_index], sb, root, &counters[event], NULL) != 0)
-                       return -1;
-               counter_index++;
-       }
-
-       return 0;
-}
-
-static struct gator_interface gator_events_mali_t6xx_interface = {
-       .create_files = create_files,
-       .start = start,
-       .stop = stop,
-       .read = read
-};
-
-int gator_events_mali_t6xx_hw_init(void)
-{
-       pr_debug("gator: Mali-T6xx: sw_counters init\n");
-
-#if GATOR_TEST
-       test_all_is_read_scheduled();
-#endif
-
-       gator_mali_initialise_counters(counters, NUMBER_OF_HARDWARE_COUNTERS);
-
-       return gator_events_install(&gator_events_mali_t6xx_interface);
-}
diff --git a/drivers/gator/gator_events_mali_t6xx_hw_test.c b/drivers/gator/gator_events_mali_t6xx_hw_test.c
deleted file mode 100644 (file)
index ba6553f..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Copyright (C) ARM Limited 2012-2014. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-/**
- * Test functions for mali_t600_hw code.
- */
-
-static int is_read_scheduled(const struct timespec *current_time, u32 *prev_time_s, s32 *next_read_time_ns);
-
-static int test_is_read_scheduled(u32 s, u32 ns, u32 prev_s, s32 next_ns, int expected_result, s32 expected_next_ns)
-{
-       struct timespec current_time;
-       u32 prev_time_s = prev_s;
-       s32 next_read_time_ns = next_ns;
-
-       current_time.tv_sec = s;
-       current_time.tv_nsec = ns;
-
-       if (is_read_scheduled(&current_time, &prev_time_s, &next_read_time_ns) != expected_result) {
-               printk("Failed do_read(%u, %u, %u, %d): expected %d\n", s, ns, prev_s, next_ns, expected_result);
-               return 0;
-       }
-
-       if (next_read_time_ns != expected_next_ns) {
-               printk("Failed: next_read_ns expected=%d, actual=%d\n", expected_next_ns, next_read_time_ns);
-               return 0;
-       }
-
-       return 1;
-}
-
-static void test_all_is_read_scheduled(void)
-{
-       const int HIGHEST_NS = 999999999;
-       int n_tests_passed = 0;
-
-       printk("gator: running tests on %s\n", __FILE__);
-
-       n_tests_passed += test_is_read_scheduled(0, 0, 0, 0, 1, READ_INTERVAL_NSEC);    /* Null time */
-       n_tests_passed += test_is_read_scheduled(100, 1000, 0, 0, 1, READ_INTERVAL_NSEC + 1000);        /* Initial values */
-
-       n_tests_passed += test_is_read_scheduled(100, HIGHEST_NS, 100, HIGHEST_NS + 500, 0, HIGHEST_NS + 500);
-       n_tests_passed += test_is_read_scheduled(101, 0001, 100, HIGHEST_NS + 500, 0, HIGHEST_NS + 500 - NSEC_PER_SEC);
-       n_tests_passed += test_is_read_scheduled(101, 600, 100, HIGHEST_NS + 500 - NSEC_PER_SEC, 1, 600 + READ_INTERVAL_NSEC);
-
-       n_tests_passed += test_is_read_scheduled(101, 600, 100, HIGHEST_NS + 500, 1, 600 + READ_INTERVAL_NSEC);
-
-       printk("gator: %d tests passed\n", n_tests_passed);
-}
index c633dfdce3069c91ac6c19bb8b2d2ab694945dbb..c625ac5af9cd37fac1951535ba4092f08e6b0e59 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/workqueue.h>
 #include <trace/events/kmem.h>
 
+#define USE_THREAD defined(CONFIG_PREEMPT_RT_FULL)
+
 enum {
        MEMINFO_MEMFREE,
        MEMINFO_MEMUSED,
@@ -48,7 +50,7 @@ static bool meminfo_global_enabled;
 static ulong meminfo_enabled[MEMINFO_TOTAL];
 static ulong meminfo_keys[MEMINFO_TOTAL];
 static long long meminfo_buffer[2 * (MEMINFO_TOTAL + 2)];
-static int meminfo_length = 0;
+static int meminfo_length;
 static bool new_data_avail;
 
 static bool proc_global_enabled;
@@ -56,22 +58,44 @@ static ulong proc_enabled[PROC_COUNT];
 static ulong proc_keys[PROC_COUNT];
 static DEFINE_PER_CPU(long long, proc_buffer[2 * (PROC_COUNT + 3)]);
 
+#if USE_THREAD
+
 static int gator_meminfo_func(void *data);
 static bool gator_meminfo_run;
-// Initialize semaphore unlocked to initialize memory values
+/* Initialize semaphore unlocked to initialize memory values */
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
 static DECLARE_MUTEX(gator_meminfo_sem);
 #else
 static DEFINE_SEMAPHORE(gator_meminfo_sem);
 #endif
 
+static void notify(void)
+{
+       up(&gator_meminfo_sem);
+}
+
+#else
+
+static unsigned int mem_event;
+static void wq_sched_handler(struct work_struct *wsptr);
+DECLARE_WORK(work, wq_sched_handler);
+static struct timer_list meminfo_wake_up_timer;
+static void meminfo_wake_up_handler(unsigned long unused_data);
+
+static void notify(void)
+{
+       mem_event++;
+}
+
+#endif
+
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
 GATOR_DEFINE_PROBE(mm_page_free_direct, TP_PROTO(struct page *page, unsigned int order))
 #else
 GATOR_DEFINE_PROBE(mm_page_free, TP_PROTO(struct page *page, unsigned int order))
 #endif
 {
-       up(&gator_meminfo_sem);
+       notify();
 }
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
@@ -80,12 +104,12 @@ GATOR_DEFINE_PROBE(mm_pagevec_free, TP_PROTO(struct page *page, int cold))
 GATOR_DEFINE_PROBE(mm_page_free_batched, TP_PROTO(struct page *page, int cold))
 #endif
 {
-       up(&gator_meminfo_sem);
+       notify();
 }
 
 GATOR_DEFINE_PROBE(mm_page_alloc, TP_PROTO(struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype))
 {
-       up(&gator_meminfo_sem);
+       notify();
 }
 
 static int gator_events_meminfo_create_files(struct super_block *sb, struct dentry *root)
@@ -95,18 +119,16 @@ static int gator_events_meminfo_create_files(struct super_block *sb, struct dent
 
        for (i = 0; i < MEMINFO_TOTAL; i++) {
                dir = gatorfs_mkdir(sb, root, meminfo_names[i]);
-               if (!dir) {
+               if (!dir)
                        return -1;
-               }
                gatorfs_create_ulong(sb, dir, "enabled", &meminfo_enabled[i]);
                gatorfs_create_ro_ulong(sb, dir, "key", &meminfo_keys[i]);
        }
 
        for (i = 0; i < PROC_COUNT; ++i) {
                dir = gatorfs_mkdir(sb, root, proc_names[i]);
-               if (!dir) {
+               if (!dir)
                        return -1;
-               }
                gatorfs_create_ulong(sb, dir, "enabled", &proc_enabled[i]);
                gatorfs_create_ro_ulong(sb, dir, "key", &proc_keys[i]);
        }
@@ -134,9 +156,8 @@ static int gator_events_meminfo_start(void)
                        break;
                }
        }
-       if (meminfo_enabled[MEMINFO_MEMUSED]) {
+       if (meminfo_enabled[MEMINFO_MEMUSED])
                proc_global_enabled = 1;
-       }
 
        if (meminfo_global_enabled == 0)
                return 0;
@@ -156,16 +177,22 @@ static int gator_events_meminfo_start(void)
        if (GATOR_REGISTER_TRACE(mm_page_alloc))
                goto mm_page_alloc_exit;
 
-       // Start worker thread
+#if USE_THREAD
+       /* Start worker thread */
        gator_meminfo_run = true;
-       // Since the mutex starts unlocked, memory values will be initialized
+       /* Since the mutex starts unlocked, memory values will be initialized */
        if (IS_ERR(kthread_run(gator_meminfo_func, NULL, "gator_meminfo")))
                goto kthread_run_exit;
+#else
+       setup_timer(&meminfo_wake_up_timer, meminfo_wake_up_handler, 0);
+#endif
 
        return 0;
 
+#if USE_THREAD
 kthread_run_exit:
        GATOR_UNREGISTER_TRACE(mm_page_alloc);
+#endif
 mm_page_alloc_exit:
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
        GATOR_UNREGISTER_TRACE(mm_pagevec_free);
@@ -194,75 +221,111 @@ static void gator_events_meminfo_stop(void)
 #endif
                GATOR_UNREGISTER_TRACE(mm_page_alloc);
 
-               // Stop worker thread
+#if USE_THREAD
+               /* Stop worker thread */
                gator_meminfo_run = false;
                up(&gator_meminfo_sem);
+#else
+               del_timer_sync(&meminfo_wake_up_timer);
+#endif
        }
 }
 
-// Must be run in process context as the kernel function si_meminfo() can sleep
-static int gator_meminfo_func(void *data)
+static void do_read(void)
 {
        struct sysinfo info;
        int i, len;
        unsigned long long value;
 
-       for (;;) {
-               if (down_killable(&gator_meminfo_sem)) {
-                       break;
+       meminfo_length = len = 0;
+
+       si_meminfo(&info);
+       for (i = 0; i < MEMINFO_TOTAL; i++) {
+               if (meminfo_enabled[i]) {
+                       switch (i) {
+                       case MEMINFO_MEMFREE:
+                               value = info.freeram * PAGE_SIZE;
+                               break;
+                       case MEMINFO_MEMUSED:
+                               /* pid -1 means system wide */
+                               meminfo_buffer[len++] = 1;
+                               meminfo_buffer[len++] = -1;
+                               /* Emit value */
+                               meminfo_buffer[len++] = meminfo_keys[MEMINFO_MEMUSED];
+                               meminfo_buffer[len++] = (info.totalram - info.freeram) * PAGE_SIZE;
+                               /* Clear pid */
+                               meminfo_buffer[len++] = 1;
+                               meminfo_buffer[len++] = 0;
+                               continue;
+                       case MEMINFO_BUFFERRAM:
+                               value = info.bufferram * PAGE_SIZE;
+                               break;
+                       default:
+                               value = 0;
+                               break;
+                       }
+                       meminfo_buffer[len++] = meminfo_keys[i];
+                       meminfo_buffer[len++] = value;
                }
+       }
 
-               // Eat up any pending events
-               while (!down_trylock(&gator_meminfo_sem));
+       meminfo_length = len;
+       new_data_avail = true;
+}
 
-               if (!gator_meminfo_run) {
+#if USE_THREAD
+
+static int gator_meminfo_func(void *data)
+{
+       for (;;) {
+               if (down_killable(&gator_meminfo_sem))
                        break;
-               }
 
-               meminfo_length = len = 0;
-
-               si_meminfo(&info);
-               for (i = 0; i < MEMINFO_TOTAL; i++) {
-                       if (meminfo_enabled[i]) {
-                               switch (i) {
-                               case MEMINFO_MEMFREE:
-                                       value = info.freeram * PAGE_SIZE;
-                                       break;
-                               case MEMINFO_MEMUSED:
-                                       // pid -1 means system wide
-                                       meminfo_buffer[len++] = 1;
-                                       meminfo_buffer[len++] = -1;
-                                       // Emit value
-                                       meminfo_buffer[len++] = meminfo_keys[MEMINFO_MEMUSED];
-                                       meminfo_buffer[len++] = (info.totalram - info.freeram) * PAGE_SIZE;
-                                       // Clear pid
-                                       meminfo_buffer[len++] = 1;
-                                       meminfo_buffer[len++] = 0;
-                                       continue;
-                               case MEMINFO_BUFFERRAM:
-                                       value = info.bufferram * PAGE_SIZE;
-                                       break;
-                               default:
-                                       value = 0;
-                                       break;
-                               }
-                               meminfo_buffer[len++] = meminfo_keys[i];
-                               meminfo_buffer[len++] = value;
-                       }
-               }
+               /* Eat up any pending events */
+               while (!down_trylock(&gator_meminfo_sem))
+                       ;
+
+               if (!gator_meminfo_run)
+                       break;
 
-               meminfo_length = len;
-               new_data_avail = true;
+               do_read();
        }
 
        return 0;
 }
 
+#else
+
+/* Must be run in process context as the kernel function si_meminfo() can sleep */
+static void wq_sched_handler(struct work_struct *wsptr)
+{
+       do_read();
+}
+
+static void meminfo_wake_up_handler(unsigned long unused_data)
+{
+       /* had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater */
+       schedule_work(&work);
+}
+
+#endif
+
 static int gator_events_meminfo_read(long long **buffer)
 {
+#if !USE_THREAD
+       static unsigned int last_mem_event;
+#endif
+
        if (!on_primary_core() || !meminfo_global_enabled)
                return 0;
 
+#if !USE_THREAD
+       if (last_mem_event != mem_event) {
+               last_mem_event = mem_event;
+               mod_timer(&meminfo_wake_up_timer, jiffies + 1);
+       }
+#endif
+
        if (!new_data_avail)
                return 0;
 
@@ -280,6 +343,7 @@ static inline unsigned long gator_get_mm_counter(struct mm_struct *mm, int membe
 {
 #ifdef SPLIT_RSS_COUNTING
        long val = atomic_long_read(&mm->rss_stat.count[member]);
+
        if (val < 0)
                val = 0;
        return (unsigned long)val;
@@ -306,22 +370,19 @@ static int gator_events_meminfo_read_proc(long long **buffer, struct task_struct
        int cpu = get_physical_cpu();
        long long *buf = per_cpu(proc_buffer, cpu);
 
-       if (!proc_global_enabled) {
+       if (!proc_global_enabled)
                return 0;
-       }
 
-       // Collect the memory stats of the process instead of the thread
-       if (task->group_leader != NULL) {
+       /* Collect the memory stats of the process instead of the thread */
+       if (task->group_leader != NULL)
                task = task->group_leader;
-       }
 
-       // get_task_mm/mmput is not needed in this context because the task and it's mm are required as part of the sched_switch
+       /* get_task_mm/mmput is not needed in this context because the task and it's mm are required as part of the sched_switch */
        mm = task->mm;
-       if (mm == NULL) {
+       if (mm == NULL)
                return 0;
-       }
 
-       // Derived from task_statm in fs/proc/task_mmu.c
+       /* Derived from task_statm in fs/proc/task_mmu.c */
        if (meminfo_enabled[MEMINFO_MEMUSED] || proc_enabled[PROC_SHARE]) {
                share = get_mm_counter(mm,
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)
@@ -332,7 +393,7 @@ static int gator_events_meminfo_read_proc(long long **buffer, struct task_struct
                                                           );
        }
 
-       // key of 1 indicates a pid
+       /* key of 1 indicates a pid */
        buf[len++] = 1;
        buf[len++] = task->pid;
 
@@ -366,12 +427,12 @@ static int gator_events_meminfo_read_proc(long long **buffer, struct task_struct
                                                                           MM_ANONPAGES
 #endif
                                                                           );
-               // Send resident for this pid
+               /* Send resident for this pid */
                buf[len++] = meminfo_keys[MEMINFO_MEMUSED];
                buf[len++] = value * PAGE_SIZE;
        }
 
-       // Clear pid
+       /* Clear pid */
        buf[len++] = 1;
        buf[len++] = 0;
 
index 3b248ec24e6e36b0c1013178d3158c5a423062a0..6b2af995ed41e7623ca06473e1e0724819496e5c 100644 (file)
@@ -8,21 +8,25 @@
  * published by the Free Software Foundation.
  *
  * Similar entries to those below must be present in the events.xml file.
- * To add them to the events.xml, create an events-mmap.xml with the 
+ * To add them to the events.xml, create an events-mmap.xml with the
  * following contents and rebuild gatord:
  *
- * <counter_set name="mmapped_cnt" count="3"/>
- * <category name="mmapped" counter_set="mmapped_cnt" per_cpu="no">
- *   <event event="0x0" title="Simulated1" name="Sine" display="maximum" average_selection="yes" description="Sort-of-sine"/>
- *   <event event="0x1" title="Simulated2" name="Triangle" display="maximum" average_selection="yes" description="Triangular wave"/>
- *   <event event="0x2" title="Simulated3" name="PWM" display="maximum" average_selection="yes" description="PWM Signal"/>
+ * <category name="mmapped">
+ *   <event counter="mmapped_cnt0" title="Simulated1" name="Sine" display="maximum" class="absolute" description="Sort-of-sine"/>
+ *   <event counter="mmapped_cnt1" title="Simulated2" name="Triangle" display="maximum" class="absolute" description="Triangular wave"/>
+ *   <event counter="mmapped_cnt2" title="Simulated3" name="PWM" display="maximum" class="absolute" description="PWM Signal"/>
  * </category>
  *
- * When adding custom events, be sure do the following
+ * When adding custom events, be sure to do the following:
  * - add any needed .c files to the gator driver Makefile
  * - call gator_events_install in the events init function
  * - add the init function to GATOR_EVENTS_LIST in gator_main.c
  * - add a new events-*.xml file to the gator daemon and rebuild
+ *
+ * Troubleshooting:
+ * - verify the new events are part of events.xml, which is created when building the daemon
+ * - verify the new events exist at /dev/gator/events/ once gatord is launched
+ * - verify the counter name in the XML matches the name at /dev/gator/events
  */
 
 #include <linux/init.h>
@@ -37,7 +41,6 @@ static int mmapped_global_enabled;
 
 static struct {
        unsigned long enabled;
-       unsigned long event;
        unsigned long key;
 } mmapped_counters[MMAPPED_COUNTERS_NUM];
 
@@ -47,7 +50,7 @@ static s64 prev_time;
 
 /* Adds mmapped_cntX directories and enabled, event, and key files to /dev/gator/events */
 static int gator_events_mmapped_create_files(struct super_block *sb,
-                                           struct dentry *root)
+                                            struct dentry *root)
 {
        int i;
 
@@ -61,8 +64,6 @@ static int gator_events_mmapped_create_files(struct super_block *sb,
                        return -1;
                gatorfs_create_ulong(sb, dir, "enabled",
                                     &mmapped_counters[i].enabled);
-               gatorfs_create_ulong(sb, dir, "event",
-                                    &mmapped_counters[i].event);
                gatorfs_create_ro_ulong(sb, dir, "key",
                                        &mmapped_counters[i].key);
        }
@@ -102,7 +103,7 @@ static int mmapped_simulate(int counter, int delta_in_us)
        switch (counter) {
        case 0:         /* sort-of-sine */
                {
-                       static int t = 0;
+                       static int t;
                        int x;
 
                        t += delta_in_us;
@@ -139,7 +140,7 @@ static int mmapped_simulate(int counter, int delta_in_us)
                break;
        case 2:         /* PWM signal */
                {
-                       static int dc, x, t = 0;
+                       static int dc, x, t;
 
                        t += delta_in_us;
                        if (t > 1000000)
@@ -156,7 +157,7 @@ static int mmapped_simulate(int counter, int delta_in_us)
        return result;
 }
 
-static int gator_events_mmapped_read(int **buffer)
+static int gator_events_mmapped_read(int **buffer, bool sched_switch)
 {
        int i;
        int len = 0;
@@ -177,8 +178,7 @@ static int gator_events_mmapped_read(int **buffer)
                if (mmapped_counters[i].enabled) {
                        mmapped_buffer[len++] = mmapped_counters[i].key;
                        mmapped_buffer[len++] =
-                           mmapped_simulate(mmapped_counters[i].event,
-                                           delta_in_us);
+                           mmapped_simulate(i, delta_in_us);
                }
        }
 
index 11c10e3755117ae317abfcebd720ad2d2f9b12f7..d21b4db7b77cde5455ac91da54548651358d2947 100644 (file)
@@ -25,7 +25,7 @@ static int netGet[TOTALNET * 4];
 
 static struct timer_list net_wake_up_timer;
 
-// Must be run in process context as the kernel function dev_get_stats() can sleep
+/* Must be run in process context as the kernel function dev_get_stats() can sleep */
 static void get_network_stats(struct work_struct *wsptr)
 {
        int rx = 0, tx = 0;
@@ -49,7 +49,7 @@ DECLARE_WORK(wq_get_stats, get_network_stats);
 
 static void net_wake_up_handler(unsigned long unused_data)
 {
-       // had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater
+       /* had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater */
        schedule_work(&wq_get_stats);
 }
 
@@ -73,21 +73,19 @@ static void calculate_delta(int *rx, int *tx)
 
 static int gator_events_net_create_files(struct super_block *sb, struct dentry *root)
 {
-       // Network counters are not currently supported in RT-Preempt full because mod_timer is used
+       /* Network counters are not currently supported in RT-Preempt full because mod_timer is used */
 #ifndef CONFIG_PREEMPT_RT_FULL
        struct dentry *dir;
 
        dir = gatorfs_mkdir(sb, root, "Linux_net_rx");
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
        gatorfs_create_ulong(sb, dir, "enabled", &netrx_enabled);
        gatorfs_create_ro_ulong(sb, dir, "key", &netrx_key);
 
        dir = gatorfs_mkdir(sb, root, "Linux_net_tx");
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
        gatorfs_create_ulong(sb, dir, "enabled", &nettx_enabled);
        gatorfs_create_ro_ulong(sb, dir, "key", &nettx_key);
 #endif
@@ -115,10 +113,10 @@ static void gator_events_net_stop(void)
        nettx_enabled = 0;
 }
 
-static int gator_events_net_read(int **buffer)
+static int gator_events_net_read(int **buffer, bool sched_switch)
 {
        int len, rx_delta, tx_delta;
-       static int last_rx_delta = 0, last_tx_delta = 0;
+       static int last_rx_delta, last_tx_delta;
 
        if (!on_primary_core())
                return 0;
@@ -134,7 +132,8 @@ static int gator_events_net_read(int **buffer)
        if (netrx_enabled && last_rx_delta != rx_delta) {
                last_rx_delta = rx_delta;
                netGet[len++] = netrx_key;
-               netGet[len++] = 0;      // indicates to Streamline that rx_delta bytes were transmitted now, not since the last message
+               /* indicates to Streamline that rx_delta bytes were transmitted now, not since the last message */
+               netGet[len++] = 0;
                netGet[len++] = netrx_key;
                netGet[len++] = rx_delta;
        }
@@ -142,7 +141,8 @@ static int gator_events_net_read(int **buffer)
        if (nettx_enabled && last_tx_delta != tx_delta) {
                last_tx_delta = tx_delta;
                netGet[len++] = nettx_key;
-               netGet[len++] = 0;      // indicates to Streamline that tx_delta bytes were transmitted now, not since the last message
+               /* indicates to Streamline that tx_delta bytes were transmitted now, not since the last message */
+               netGet[len++] = 0;
                netGet[len++] = nettx_key;
                netGet[len++] = tx_delta;
        }
index 8b2d67a058b36c77d8e3cc30e1e3ca831da6794f..47cf278e508b110fa7746fb2bf8d9a7d2420eeb5 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "gator.h"
 
-// gator_events_armvX.c is used for Linux 2.6.x
+/* gator_events_armvX.c is used for Linux 2.6.x */
 #if GATOR_PERF_PMU_SUPPORT
 
 #include <linux/io.h>
 
 extern bool event_based_sampling;
 
-// Maximum number of per-core counters - currently reserves enough space for two full hardware PMUs for big.LITTLE
+/* Maximum number of per-core counters - currently reserves enough space for two full hardware PMUs for big.LITTLE */
 #define CNTMAX 16
 #define CCI_400 4
-// Maximum number of uncore counters
-// + 1 for the cci-400 cycles counter
-#define UCCNT (CCI_400 + 1)
+#define CCN_5XX 8
+/* Maximum number of uncore counters */
+/* + 1 for the cci-400 cycles counter */
+/* + 1 for the CCN-5xx cycles counter */
+#define UCCNT (CCI_400 + 1 + CCN_5XX + 1)
 
-// Default to 0 if unable to probe the revision which was the previous behavior
+/* Default to 0 if unable to probe the revision which was the previous behavior */
 #define DEFAULT_CCI_REVISION 0
 
-// A gator_attr is needed for every counter
+/* A gator_attr is needed for every counter */
 struct gator_attr {
-       // Set once in gator_events_perf_pmu_*_init - the name of the event in the gatorfs
+       /* Set once in gator_events_perf_pmu_*_init - the name of the event in the gatorfs */
        char name[40];
-       // Exposed in gatorfs - set by gatord to enable this counter
+       /* Exposed in gatorfs - set by gatord to enable this counter */
        unsigned long enabled;
-       // Set once in gator_events_perf_pmu_*_init - the perf type to use, see perf_type_id in the perf_event.h header file.
+       /* Set once in gator_events_perf_pmu_*_init - the perf type to use, see perf_type_id in the perf_event.h header file. */
        unsigned long type;
-       // Exposed in gatorfs - set by gatord to select the event to collect
+       /* Exposed in gatorfs - set by gatord to select the event to collect */
        unsigned long event;
-       // Exposed in gatorfs - set by gatord with the sample period to use and enable EBS for this counter
+       /* Exposed in gatorfs - set by gatord with the sample period to use and enable EBS for this counter */
        unsigned long count;
-       // Exposed as read only in gatorfs - set once in __attr_init as the key to use in the APC data
+       /* Exposed as read only in gatorfs - set once in __attr_init as the key to use in the APC data */
        unsigned long key;
 };
 
-// Per-core counter attributes
+/* Per-core counter attributes */
 static struct gator_attr attrs[CNTMAX];
-// Number of initialized per-core counters
+/* Number of initialized per-core counters */
 static int attr_count;
-// Uncore counter attributes
+/* Uncore counter attributes */
 static struct gator_attr uc_attrs[UCCNT];
-// Number of initialized uncore counters
+/* Number of initialized uncore counters */
 static int uc_attr_count;
 
 struct gator_event {
@@ -74,13 +76,11 @@ static int __create_files(struct super_block *sb, struct dentry *root, struct ga
 {
        struct dentry *dir;
 
-       if (attr->name[0] == '\0') {
+       if (attr->name[0] == '\0')
                return 0;
-       }
        dir = gatorfs_mkdir(sb, root, attr->name);
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
        gatorfs_create_ulong(sb, dir, "enabled", &attr->enabled);
        gatorfs_create_ulong(sb, dir, "count", &attr->count);
        gatorfs_create_ro_ulong(sb, dir, "key", &attr->key);
@@ -94,15 +94,13 @@ static int gator_events_perf_pmu_create_files(struct super_block *sb, struct den
        int cnt;
 
        for (cnt = 0; cnt < attr_count; cnt++) {
-               if (__create_files(sb, root, &attrs[cnt]) != 0) {
+               if (__create_files(sb, root, &attrs[cnt]) != 0)
                        return -1;
-               }
        }
 
        for (cnt = 0; cnt < uc_attr_count; cnt++) {
-               if (__create_files(sb, root, &uc_attrs[cnt]) != 0) {
+               if (__create_files(sb, root, &uc_attrs[cnt]) != 0)
                        return -1;
-               }
        }
 
        return 0;
@@ -123,14 +121,14 @@ static void dummy_handler(struct perf_event *event, int unused, struct perf_samp
 static void dummy_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs)
 #endif
 {
-// Required as perf_event_create_kernel_counter() requires an overflow handler, even though all we do is poll
+       /* Required as perf_event_create_kernel_counter() requires an overflow handler, even though all we do is poll */
 }
 
-static int gator_events_perf_pmu_read(int **buffer);
+static int gator_events_perf_pmu_read(int **buffer, bool sched_switch);
 
 static int gator_events_perf_pmu_online(int **buffer, bool migrate)
 {
-       return gator_events_perf_pmu_read(buffer);
+       return gator_events_perf_pmu_read(buffer, false);
 }
 
 static void __online_dispatch(int cpu, bool migrate, struct gator_attr *const attr, struct gator_event *const event)
@@ -139,15 +137,13 @@ static void __online_dispatch(int cpu, bool migrate, struct gator_attr *const at
 
        event->zero = true;
 
-       if (event->pevent != NULL || event->pevent_attr == 0 || migrate) {
+       if (event->pevent != NULL || event->pevent_attr == 0 || migrate)
                return;
-       }
 
-       if (attr->count > 0) {
+       if (attr->count > 0)
                handler = ebs_overflow_handler;
-       } else {
+       else
                handler = dummy_handler;
-       }
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
        event->pevent = perf_event_create_kernel_counter(event->pevent_attr, cpu, 0, handler);
@@ -174,14 +170,12 @@ static void gator_events_perf_pmu_online_dispatch(int cpu, bool migrate)
 
        cpu = pcpu_to_lcpu(cpu);
 
-       for (cnt = 0; cnt < attr_count; cnt++) {
+       for (cnt = 0; cnt < attr_count; cnt++)
                __online_dispatch(cpu, migrate, &attrs[cnt], &per_cpu(events, cpu)[cnt]);
-       }
 
        if (cpu == 0) {
-               for (cnt = 0; cnt < uc_attr_count; cnt++) {
+               for (cnt = 0; cnt < uc_attr_count; cnt++)
                        __online_dispatch(cpu, migrate, &uc_attrs[cnt], &uc_events[cnt]);
-               }
        }
 }
 
@@ -194,28 +188,24 @@ static void __offline_dispatch(int cpu, struct gator_event *const event)
                event->pevent = NULL;
        }
 
-       if (pe) {
+       if (pe)
                perf_event_release_kernel(pe);
-       }
 }
 
 static void gator_events_perf_pmu_offline_dispatch(int cpu, bool migrate)
 {
        int cnt;
 
-       if (migrate) {
+       if (migrate)
                return;
-       }
        cpu = pcpu_to_lcpu(cpu);
 
-       for (cnt = 0; cnt < attr_count; cnt++) {
+       for (cnt = 0; cnt < attr_count; cnt++)
                __offline_dispatch(cpu, &per_cpu(events, cpu)[cnt]);
-       }
 
        if (cpu == 0) {
-               for (cnt = 0; cnt < uc_attr_count; cnt++) {
+               for (cnt = 0; cnt < uc_attr_count; cnt++)
                        __offline_dispatch(cpu, &uc_events[cnt]);
-               }
        }
 }
 
@@ -225,7 +215,7 @@ static int __check_ebs(struct gator_attr *const attr)
                if (!event_based_sampling) {
                        event_based_sampling = true;
                } else {
-                       printk(KERN_WARNING "gator: Only one ebs counter is allowed\n");
+                       pr_warning("gator: Only one ebs counter is allowed\n");
                        return -1;
                }
        }
@@ -238,9 +228,9 @@ static int __start(struct gator_attr *const attr, struct gator_event *const even
        u32 size = sizeof(struct perf_event_attr);
 
        event->pevent = NULL;
-       if (!attr->enabled) {   // Skip disabled counters
+       /* Skip disabled counters */
+       if (!attr->enabled)
                return 0;
-       }
 
        event->prev = 0;
        event->curr = 0;
@@ -267,29 +257,25 @@ static int gator_events_perf_pmu_start(void)
 
        event_based_sampling = false;
        for (cnt = 0; cnt < attr_count; cnt++) {
-               if (__check_ebs(&attrs[cnt]) != 0) {
+               if (__check_ebs(&attrs[cnt]) != 0)
                        return -1;
-               }
        }
 
        for (cnt = 0; cnt < uc_attr_count; cnt++) {
-               if (__check_ebs(&uc_attrs[cnt]) != 0) {
+               if (__check_ebs(&uc_attrs[cnt]) != 0)
                        return -1;
-               }
        }
 
        for_each_present_cpu(cpu) {
                for (cnt = 0; cnt < attr_count; cnt++) {
-                       if (__start(&attrs[cnt], &per_cpu(events, cpu)[cnt]) != 0) {
+                       if (__start(&attrs[cnt], &per_cpu(events, cpu)[cnt]) != 0)
                                return -1;
-                       }
                }
        }
 
        for (cnt = 0; cnt < uc_attr_count; cnt++) {
-               if (__start(&uc_attrs[cnt], &uc_events[cnt]) != 0) {
+               if (__start(&uc_attrs[cnt], &uc_events[cnt]) != 0)
                        return -1;
-               }
        }
 
        return 0;
@@ -297,10 +283,8 @@ static int gator_events_perf_pmu_start(void)
 
 static void __event_stop(struct gator_event *const event)
 {
-       if (event->pevent_attr) {
-               kfree(event->pevent_attr);
-               event->pevent_attr = NULL;
-       }
+       kfree(event->pevent_attr);
+       event->pevent_attr = NULL;
 }
 
 static void __attr_stop(struct gator_attr *const attr)
@@ -315,29 +299,25 @@ static void gator_events_perf_pmu_stop(void)
        unsigned int cnt, cpu;
 
        for_each_present_cpu(cpu) {
-               for (cnt = 0; cnt < attr_count; cnt++) {
+               for (cnt = 0; cnt < attr_count; cnt++)
                        __event_stop(&per_cpu(events, cpu)[cnt]);
-               }
        }
 
-       for (cnt = 0; cnt < uc_attr_count; cnt++) {
+       for (cnt = 0; cnt < uc_attr_count; cnt++)
                __event_stop(&uc_events[cnt]);
-       }
 
-       for (cnt = 0; cnt < attr_count; cnt++) {
+       for (cnt = 0; cnt < attr_count; cnt++)
                __attr_stop(&attrs[cnt]);
-       }
 
-       for (cnt = 0; cnt < uc_attr_count; cnt++) {
+       for (cnt = 0; cnt < uc_attr_count; cnt++)
                __attr_stop(&uc_attrs[cnt]);
-       }
 }
 
 static void __read(int *const len, int cpu, struct gator_attr *const attr, struct gator_event *const event)
 {
        int delta;
-
        struct perf_event *const ev = event->pevent;
+
        if (ev != NULL && ev->state == PERF_EVENT_STATE_ACTIVE) {
                /* After creating the perf counter in __online_dispatch, there
                 * is a race condition between gator_events_perf_pmu_online and
@@ -361,33 +341,29 @@ static void __read(int *const len, int cpu, struct gator_attr *const attr, struc
                                event->prev_delta = delta;
                                event->prev = event->curr;
                                per_cpu(perf_cnt, cpu)[(*len)++] = attr->key;
-                               if (delta < 0) {
+                               if (delta < 0)
                                        delta *= -1;
-                               }
                                per_cpu(perf_cnt, cpu)[(*len)++] = delta;
                        }
                }
        }
 }
 
-static int gator_events_perf_pmu_read(int **buffer)
+static int gator_events_perf_pmu_read(int **buffer, bool sched_switch)
 {
        int cnt, len = 0;
        const int cpu = get_logical_cpu();
 
-       for (cnt = 0; cnt < attr_count; cnt++) {
+       for (cnt = 0; cnt < attr_count; cnt++)
                __read(&len, cpu, &attrs[cnt], &per_cpu(events, cpu)[cnt]);
-       }
 
        if (cpu == 0) {
-               for (cnt = 0; cnt < uc_attr_count; cnt++) {
+               for (cnt = 0; cnt < uc_attr_count; cnt++)
                        __read(&len, cpu, &uc_attrs[cnt], &uc_events[cnt]);
-               }
        }
 
-       if (buffer) {
+       if (buffer)
                *buffer = per_cpu(perf_cnt, cpu);
-       }
 
        return len;
 }
@@ -428,23 +404,20 @@ static int probe_cci_revision(void)
        int ret = DEFAULT_CCI_REVISION;
 
        np = of_find_matching_node(NULL, arm_cci_matches);
-       if (!np) {
+       if (!np)
                return ret;
-       }
 
-       if (of_address_to_resource(np, 0, &res)) {
+       if (of_address_to_resource(np, 0, &res))
                goto node_put;
-       }
 
        cci_ctrl_base = ioremap(res.start, resource_size(&res));
 
        rev = (readl_relaxed(cci_ctrl_base + 0xfe8) >> 4) & 0xf;
 
-       if (rev <= 4) {
+       if (rev <= 4)
                ret = 0;
-       } else if (rev <= 6) {
+       else if (rev <= 6)
                ret = 1;
-       }
 
        iounmap(cci_ctrl_base);
 
@@ -463,32 +436,39 @@ static int probe_cci_revision(void)
 
 #endif
 
-static void gator_events_perf_pmu_cci_init(const int type)
+static void gator_events_perf_pmu_uncore_init(const char *const name, const int type, const int count)
 {
        int cnt;
+
+       snprintf(uc_attrs[uc_attr_count].name, sizeof(uc_attrs[uc_attr_count].name), "%s_ccnt", name);
+       uc_attrs[uc_attr_count].type = type;
+       ++uc_attr_count;
+
+       for (cnt = 0; cnt < count; ++cnt, ++uc_attr_count) {
+               struct gator_attr *const attr = &uc_attrs[uc_attr_count];
+
+               snprintf(attr->name, sizeof(attr->name), "%s_cnt%d", name, cnt);
+               attr->type = type;
+       }
+}
+
+static void gator_events_perf_pmu_cci_init(const int type)
+{
        const char *cci_name;
 
        switch (probe_cci_revision()) {
        case 0:
-               cci_name = "cci-400";
+               cci_name = "CCI_400";
                break;
        case 1:
-               cci_name = "cci-400-r1";
+               cci_name = "CCI_400-r1";
                break;
        default:
                pr_debug("gator: unrecognized cci-400 revision\n");
                return;
        }
 
-       snprintf(uc_attrs[uc_attr_count].name, sizeof(uc_attrs[uc_attr_count].name), "%s_ccnt", cci_name);
-       uc_attrs[uc_attr_count].type = type;
-       ++uc_attr_count;
-
-       for (cnt = 0; cnt < CCI_400; ++cnt, ++uc_attr_count) {
-               struct gator_attr *const attr = &uc_attrs[uc_attr_count];
-               snprintf(attr->name, sizeof(attr->name), "%s_cnt%d", cci_name, cnt);
-               attr->type = type;
-       }
+       gator_events_perf_pmu_uncore_init(cci_name, type, CCI_400);
 }
 
 static void gator_events_perf_pmu_cpu_init(const struct gator_cpu *const gator_cpu, const int type)
@@ -501,6 +481,7 @@ static void gator_events_perf_pmu_cpu_init(const struct gator_cpu *const gator_c
 
        for (cnt = 0; cnt < gator_cpu->pmnc_counters; ++cnt, ++attr_count) {
                struct gator_attr *const attr = &attrs[attr_count];
+
                snprintf(attr->name, sizeof(attr->name), "%s_cnt%d", gator_cpu->pmnc_name, cnt);
                attr->type = type;
        }
@@ -516,12 +497,10 @@ int gator_events_perf_pmu_init(void)
        int cnt;
        bool found_cpu = false;
 
-       for (cnt = 0; cnt < CNTMAX; cnt++) {
+       for (cnt = 0; cnt < CNTMAX; cnt++)
                __attr_init(&attrs[cnt]);
-       }
-       for (cnt = 0; cnt < UCCNT; cnt++) {
+       for (cnt = 0; cnt < UCCNT; cnt++)
                __attr_init(&uc_attrs[cnt]);
-       }
 
        memset(&pea, 0, sizeof(pea));
        pea.size = sizeof(pea);
@@ -531,7 +510,7 @@ int gator_events_perf_pmu_init(void)
        for (type = PERF_TYPE_MAX; type < 0x20; ++type) {
                pea.type = type;
 
-               // A particular PMU may work on some but not all cores, so try on each core
+               /* A particular PMU may work on some but not all cores, so try on each core */
                pe = NULL;
                for_each_present_cpu(cpu) {
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
@@ -539,23 +518,31 @@ int gator_events_perf_pmu_init(void)
 #else
                        pe = perf_event_create_kernel_counter(&pea, cpu, 0, dummy_handler, 0);
 #endif
-                       if (!IS_ERR(pe)) {
+                       if (!IS_ERR(pe))
                                break;
-                       }
                }
-               // Assume that valid PMUs are contiguous
+               /* Assume that valid PMUs are contiguous */
                if (IS_ERR(pe)) {
-                       break;
+                       pea.config = 0xff00;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+                       pe = perf_event_create_kernel_counter(&pea, 0, 0, dummy_handler);
+#else
+                       pe = perf_event_create_kernel_counter(&pea, 0, 0, dummy_handler, 0);
+#endif
+                       if (IS_ERR(pe))
+                               break;
                }
 
                if (pe->pmu != NULL && type == pe->pmu->type) {
-                       if (strcmp("CCI", pe->pmu->name) == 0 || strcmp("CCI_400", pe->pmu->name) == 0) {
+                       if (strcmp("CCI", pe->pmu->name) == 0 || strcmp("CCI_400", pe->pmu->name) == 0 || strcmp("CCI_400-r1", pe->pmu->name) == 0) {
                                gator_events_perf_pmu_cci_init(type);
+                       } else if (strcmp("ccn", pe->pmu->name) == 0) {
+                               gator_events_perf_pmu_uncore_init("ARM_CCN_5XX", type, CCN_5XX);
                        } else if ((gator_cpu = gator_find_cpu_by_pmu_name(pe->pmu->name)) != NULL) {
                                found_cpu = true;
                                gator_events_perf_pmu_cpu_init(gator_cpu, type);
                        }
-                       // Initialize gator_attrs for dynamic PMUs here
+                       /* Initialize gator_attrs for dynamic PMUs here */
                }
 
                perf_event_release_kernel(pe);
@@ -563,21 +550,21 @@ int gator_events_perf_pmu_init(void)
 
        if (!found_cpu) {
                const struct gator_cpu *const gator_cpu = gator_find_cpu_by_cpuid(gator_cpuid());
-               if (gator_cpu == NULL) {
+
+               if (gator_cpu == NULL)
                        return -1;
-               }
                gator_events_perf_pmu_cpu_init(gator_cpu, PERF_TYPE_RAW);
        }
 
-       // Initialize gator_attrs for non-dynamic PMUs here
+       /* Initialize gator_attrs for non-dynamic PMUs here */
 
        if (attr_count > CNTMAX) {
-               printk(KERN_ERR "gator: Too many perf counters\n");
+               pr_err("gator: Too many perf counters\n");
                return -1;
        }
 
        if (uc_attr_count > UCCNT) {
-               printk(KERN_ERR "gator: Too many perf uncore counters\n");
+               pr_err("gator: Too many perf uncore counters\n");
                return -1;
        }
 
index 9e39158301820e0b33bd80d204650f59710ff2c0..637107d6af1d6c8b4c8048d0f352380d17ee0d3f 100644 (file)
@@ -26,8 +26,9 @@ GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct task_struct *prev, struct task_
 {
        unsigned long flags;
 
-       // disable interrupts to synchronize with gator_events_sched_read()
-       // spinlocks not needed since percpu buffers are used
+       /* disable interrupts to synchronize with gator_events_sched_read()
+        * spinlocks not needed since percpu buffers are used
+        */
        local_irq_save(flags);
        per_cpu(schedCnt, get_physical_cpu())[SCHED_SWITCH]++;
        local_irq_restore(flags);
@@ -39,9 +40,8 @@ static int gator_events_sched_create_files(struct super_block *sb, struct dentry
 
        /* switch */
        dir = gatorfs_mkdir(sb, root, "Linux_sched_switch");
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
        gatorfs_create_ulong(sb, dir, "enabled", &sched_switch_enabled);
        gatorfs_create_ro_ulong(sb, dir, "key", &sched_switch_key);
 
@@ -50,7 +50,7 @@ static int gator_events_sched_create_files(struct super_block *sb, struct dentry
 
 static int gator_events_sched_start(void)
 {
-       // register tracepoints
+       /* register tracepoints */
        if (sched_switch_enabled)
                if (GATOR_REGISTER_TRACE(sched_switch))
                        goto sched_switch_exit;
@@ -58,7 +58,7 @@ static int gator_events_sched_start(void)
 
        return 0;
 
-       // unregister tracepoints on error
+       /* unregister tracepoints on error */
 sched_switch_exit:
        pr_err("gator: scheduler event tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
 
@@ -74,7 +74,7 @@ static void gator_events_sched_stop(void)
        sched_switch_enabled = 0;
 }
 
-static int gator_events_sched_read(int **buffer)
+static int gator_events_sched_read(int **buffer, bool sched_switch)
 {
        unsigned long flags;
        int len, value;
index 8ca251af0e2672e4216f19b085d4b1f7cf3b3102..49219362db092515ee590c002dbc66cff870d977 100644 (file)
@@ -8,13 +8,13 @@
 
 #include "gator.h"
 
-// gator_events_perf_pmu.c is used if perf is supported
+/* gator_events_perf_pmu.c is used if perf is supported */
 #if GATOR_NO_PERF_SUPPORT
 
 static const char *pmnc_name;
 static int pmnc_counters;
 
-// Per-CPU PMNC: config reg
+/* Per-CPU PMNC: config reg */
 #define PMNC_E         (1 << 0)        /* Enable all counters */
 #define PMNC_P         (1 << 1)        /* Reset all counters */
 #define PMNC_C         (1 << 2)        /* Cycle counter reset */
@@ -23,12 +23,12 @@ static int pmnc_counters;
 #define PMNC_DP                (1 << 5)        /* Disable CCNT if non-invasive debug */
 #define        PMNC_MASK       0x3f    /* Mask for writable bits */
 
-// ccnt reg
+/* ccnt reg */
 #define CCNT_REG       (1 << 31)
 
-#define CCNT           0
+#define CCNT           0
 #define CNT0           1
-#define CNTMAX                 (4+1)
+#define CNTMAX         (4+1)
 
 static unsigned long pmnc_enabled[CNTMAX];
 static unsigned long pmnc_event[CNTMAX];
@@ -243,6 +243,7 @@ static inline void scorpion_pmnc_write(u32 val)
 static inline u32 scorpion_pmnc_read(void)
 {
        u32 val;
+
        asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
        return val;
 }
@@ -250,6 +251,7 @@ static inline u32 scorpion_pmnc_read(void)
 static inline u32 scorpion_ccnt_read(void)
 {
        u32 val;
+
        asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
        return val;
 }
@@ -257,6 +259,7 @@ static inline u32 scorpion_ccnt_read(void)
 static inline u32 scorpion_cntn_read(void)
 {
        u32 val;
+
        asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
        return val;
 }
@@ -317,6 +320,7 @@ static inline int scorpion_pmnc_select_counter(unsigned int cnt)
 static u32 scorpion_read_lpm0(void)
 {
        u32 val;
+
        asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
        return val;
 }
@@ -329,6 +333,7 @@ static void scorpion_write_lpm0(u32 val)
 static u32 scorpion_read_lpm1(void)
 {
        u32 val;
+
        asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
        return val;
 }
@@ -341,6 +346,7 @@ static void scorpion_write_lpm1(u32 val)
 static u32 scorpion_read_lpm2(void)
 {
        u32 val;
+
        asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
        return val;
 }
@@ -353,6 +359,7 @@ static void scorpion_write_lpm2(u32 val)
 static u32 scorpion_read_l2lpm(void)
 {
        u32 val;
+
        asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
        return val;
 }
@@ -365,6 +372,7 @@ static void scorpion_write_l2lpm(u32 val)
 static u32 scorpion_read_vlpm(void)
 {
        u32 val;
+
        asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
        return val;
 }
@@ -375,7 +383,7 @@ static void scorpion_write_vlpm(u32 val)
 }
 
 struct scorpion_access_funcs {
-       u32(*read)(void);
+       u32 (*read)(void);
        void (*write)(u32);
 };
 
@@ -420,17 +428,17 @@ static u32 scorpion_get_columnmask(u32 setval)
 {
        if (setval & COLMN0MASK)
                return 0xffffff00;
-       else if (setval & COLMN1MASK)
+       if (setval & COLMN1MASK)
                return 0xffff00ff;
-       else if (setval & COLMN2MASK)
+       if (setval & COLMN2MASK)
                return 0xff00ffff;
-       else
-               return 0x80ffffff;
+       return 0x80ffffff;
 }
 
 static void scorpion_evt_setup(u32 gr, u32 setval)
 {
        u32 val;
+
        if (gr == 4)
                scorpion_pre_vlpm();
        val = scorpion_get_columnmask(setval) & scor_func[gr].read();
@@ -443,6 +451,7 @@ static void scorpion_evt_setup(u32 gr, u32 setval)
 static int get_scorpion_evtinfo(unsigned int evt_type, struct scorp_evt *evtinfo)
 {
        u32 idx;
+
        if ((evt_type < 0x4c) || (evt_type >= MSM_MAX_EVT))
                return 0;
        idx = evt_type - 0x4c;
@@ -463,7 +472,7 @@ static inline void scorpion_pmnc_write_evtsel(unsigned int cnt, u32 val)
                } else {
                        u32 zero = 0;
                        struct scorp_evt evtinfo;
-                       // extract evtinfo.grp and evtinfo.tevt_type_act from val
+                       /* extract evtinfo.grp and evtinfo.tevt_type_act from val */
                        if (get_scorpion_evtinfo(val, &evtinfo) == 0)
                                return;
                        asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (evtinfo.evt_type_act));
@@ -505,20 +514,18 @@ static int gator_events_scorpion_create_files(struct super_block *sb, struct den
 
        for (i = 0; i < pmnc_counters; i++) {
                char buf[40];
-               if (i == 0) {
-                       snprintf(buf, sizeof buf, "%s_ccnt", pmnc_name);
-               } else {
-                       snprintf(buf, sizeof buf, "%s_cnt%d", pmnc_name, i - 1);
-               }
+
+               if (i == 0)
+                       snprintf(buf, sizeof(buf), "%s_ccnt", pmnc_name);
+               else
+                       snprintf(buf, sizeof(buf), "%s_cnt%d", pmnc_name, i - 1);
                dir = gatorfs_mkdir(sb, root, buf);
-               if (!dir) {
+               if (!dir)
                        return -1;
-               }
                gatorfs_create_ulong(sb, dir, "enabled", &pmnc_enabled[i]);
                gatorfs_create_ro_ulong(sb, dir, "key", &pmnc_key[i]);
-               if (i > 0) {
+               if (i > 0)
                        gatorfs_create_ulong(sb, dir, "event", &pmnc_event[i]);
-               }
        }
 
        return 0;
@@ -528,9 +535,8 @@ static int gator_events_scorpion_online(int **buffer, bool migrate)
 {
        unsigned int cnt, len = 0, cpu = smp_processor_id();
 
-       if (scorpion_pmnc_read() & PMNC_E) {
+       if (scorpion_pmnc_read() & PMNC_E)
                scorpion_pmnc_write(scorpion_pmnc_read() & ~PMNC_E);
-       }
 
        /* Initialize & Reset PMNC: C bit and P bit */
        scorpion_pmnc_write(PMNC_P | PMNC_C);
@@ -541,33 +547,32 @@ static int gator_events_scorpion_online(int **buffer, bool migrate)
                if (!pmnc_enabled[cnt])
                        continue;
 
-               // disable counter
+               /* disable counter */
                scorpion_pmnc_disable_counter(cnt);
 
                event = pmnc_event[cnt] & 255;
 
-               // Set event (if destined for PMNx counters), We don't need to set the event if it's a cycle count
+               /* Set event (if destined for PMNx counters), We don't need to set the event if it's a cycle count */
                if (cnt != CCNT)
                        scorpion_pmnc_write_evtsel(cnt, event);
 
-               // reset counter
+               /* reset counter */
                scorpion_pmnc_reset_counter(cnt);
 
-               // Enable counter, do not enable interrupt for this counter
+               /* Enable counter, do not enable interrupt for this counter */
                scorpion_pmnc_enable_counter(cnt);
        }
 
-       // enable
+       /* enable */
        scorpion_pmnc_write(scorpion_pmnc_read() | PMNC_E);
 
-       // read the counters and toss the invalid data, return zero instead
+       /* read the counters and toss the invalid data, return zero instead */
        for (cnt = 0; cnt < pmnc_counters; cnt++) {
                if (pmnc_enabled[cnt]) {
-                       if (cnt == CCNT) {
+                       if (cnt == CCNT)
                                scorpion_ccnt_read();
-                       } else if (scorpion_pmnc_select_counter(cnt) == cnt) {
+                       else if (scorpion_pmnc_select_counter(cnt) == cnt)
                                scorpion_cntn_read();
-                       }
                        scorpion_pmnc_reset_counter(cnt);
 
                        per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
@@ -597,26 +602,25 @@ static void gator_events_scorpion_stop(void)
        }
 }
 
-static int gator_events_scorpion_read(int **buffer)
+static int gator_events_scorpion_read(int **buffer, bool sched_switch)
 {
        int cnt, len = 0;
        int cpu = smp_processor_id();
 
-       // a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled
-       if (!(scorpion_pmnc_read() & PMNC_E)) {
+       /* a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled */
+       if (!(scorpion_pmnc_read() & PMNC_E))
                return 0;
-       }
 
        for (cnt = 0; cnt < pmnc_counters; cnt++) {
                if (pmnc_enabled[cnt]) {
                        int value;
-                       if (cnt == CCNT) {
+
+                       if (cnt == CCNT)
                                value = scorpion_ccnt_read();
-                       } else if (scorpion_pmnc_select_counter(cnt) == cnt) {
+                       else if (scorpion_pmnc_select_counter(cnt) == cnt)
                                value = scorpion_cntn_read();
-                       } else {
+                       else
                                value = 0;
-                       }
                        scorpion_pmnc_reset_counter(cnt);
 
                        per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
@@ -655,7 +659,8 @@ int gator_events_scorpion_init(void)
                return -1;
        }
 
-       pmnc_counters++;        // CNT[n] + CCNT
+       /* CNT[n] + CCNT */
+       pmnc_counters++;
 
        for (cnt = CCNT; cnt < CNTMAX; cnt++) {
                pmnc_enabled[cnt] = 0;
index 166cfe7d681de292396163897aa4023d623b81d0..d8fb357b9edac1d61c9420e97a0dc4f0b55925f8 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #define gatorfs_MAGIC 0x24051020
 #define TMPBUFSIZE 50
@@ -43,6 +43,7 @@ static ssize_t gatorfs_ulong_to_user(unsigned long val, char __user *buf, size_t
 {
        char tmpbuf[TMPBUFSIZE];
        size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
+
        if (maxlen > TMPBUFSIZE)
                maxlen = TMPBUFSIZE;
        return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
@@ -52,6 +53,7 @@ static ssize_t gatorfs_u64_to_user(u64 val, char __user *buf, size_t count, loff
 {
        char tmpbuf[TMPBUFSIZE];
        size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%llu\n", val);
+
        if (maxlen > TMPBUFSIZE)
                maxlen = TMPBUFSIZE;
        return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
@@ -104,12 +106,14 @@ static int gatorfs_u64_from_user(u64 *val, char const __user *buf, size_t count)
 static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
        unsigned long *val = file->private_data;
+
        return gatorfs_ulong_to_user(*val, buf, count, offset);
 }
 
 static ssize_t u64_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
        u64 *val = file->private_data;
+
        return gatorfs_u64_to_user(*val, buf, count, offset);
 }
 
@@ -231,7 +235,7 @@ int gatorfs_create_ro_ulong(struct super_block *sb, struct dentry *root,
 }
 
 static int gatorfs_create_ro_u64(struct super_block *sb, struct dentry *root,
-                                char const *name, u64 * val)
+                                char const *name, u64 *val)
 {
        struct dentry *d =
            __gatorfs_create_file(sb, root, name, &u64_ro_fops, 0444);
@@ -245,6 +249,7 @@ static int gatorfs_create_ro_u64(struct super_block *sb, struct dentry *root,
 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
        atomic_t *val = file->private_data;
+
        return gatorfs_ulong_to_user(atomic_read(val), buf, count, offset);
 }
 
index 76584554b00fd1c0ae08a85af1ea1f21351488db..c1525e10a8da37cd67c91a5a41ab947c78d6e612 100644 (file)
@@ -18,6 +18,7 @@ static void gator_hrtimer_offline(void);
 static enum hrtimer_restart gator_hrtimer_notify(struct hrtimer *hrtimer)
 {
        int cpu = get_logical_cpu();
+
        hrtimer_forward(hrtimer, per_cpu(hrtimer_expire, cpu), profiling_interval);
        per_cpu(hrtimer_expire, cpu) = ktime_add(per_cpu(hrtimer_expire, cpu), profiling_interval);
        (*callback)();
@@ -64,12 +65,11 @@ static int gator_hrtimer_init(int interval, void (*func)(void))
                per_cpu(hrtimer_is_active, cpu) = 0;
        }
 
-       // calculate profiling interval
-       if (interval > 0) {
+       /* calculate profiling interval */
+       if (interval > 0)
                profiling_interval = ns_to_ktime(1000000000UL / interval);
-       } else {
+       else
                profiling_interval.tv64 = 0;
-       }
 
        return 0;
 }
index e90dfcce93810258999e5aac782937c5b2ac969f..fb78c10fd987458f7bcdb99cacc55b3e3ab0e22c 100644 (file)
@@ -16,7 +16,7 @@
 
 static bool map_cpuids;
 static int mpidr_cpuids[NR_CPUS];
-static const struct gator_cpu * mpidr_cpus[NR_CPUS];
+static const struct gator_cpu *mpidr_cpus[NR_CPUS];
 static int __lcpu_to_pcpu[NR_CPUS];
 
 static const struct gator_cpu *gator_find_cpu_by_dt_name(const char *const name)
@@ -25,9 +25,9 @@ static const struct gator_cpu *gator_find_cpu_by_dt_name(const char *const name)
 
        for (i = 0; gator_cpus[i].cpuid != 0; ++i) {
                const struct gator_cpu *const gator_cpu = &gator_cpus[i];
-               if (gator_cpu->dt_name != NULL && strcmp(gator_cpu->dt_name, name) == 0) {
+
+               if (gator_cpu->dt_name != NULL && strcmp(gator_cpu->dt_name, name) == 0)
                        return gator_cpu;
-               }
        }
 
        return NULL;
@@ -41,7 +41,7 @@ static void calc_first_cluster_size(void)
        struct device_node *cn = NULL;
        int mpidr_cpuids_count = 0;
 
-       // Zero is a valid cpuid, so initialize the array to 0xff's
+       /* Zero is a valid cpuid, so initialize the array to 0xff's */
        memset(&mpidr_cpuids, 0xff, sizeof(mpidr_cpuids));
        memset(&mpidr_cpus, 0, sizeof(mpidr_cpus));
 
@@ -70,10 +70,10 @@ static void calc_first_cluster_size(void)
 static int linearize_mpidr(int mpidr)
 {
        int i;
+
        for (i = 0; i < nr_cpu_ids; ++i) {
-               if (mpidr_cpuids[i] == mpidr) {
+               if (mpidr_cpuids[i] == mpidr)
                        return i;
-               }
        }
 
        BUG();
@@ -113,6 +113,7 @@ static void gator_update_cpu_mapping(u32 cpu_hwid)
 {
        int lcpu = smp_processor_id();
        int pcpu = linearize_mpidr(cpu_hwid & MPIDR_HWID_BITMASK);
+
        BUG_ON(lcpu >= nr_cpu_ids || lcpu < 0);
        BUG_ON(pcpu >= nr_cpu_ids || pcpu < 0);
        __lcpu_to_pcpu[lcpu] = pcpu;
@@ -132,7 +133,7 @@ GATOR_DEFINE_PROBE(cpu_migrate_finish, TP_PROTO(u64 timestamp, u32 cpu_hwid))
 
        gator_update_cpu_mapping(cpu_hwid);
 
-       // get_physical_cpu must be called after gator_update_cpu_mapping
+       /* get_physical_cpu must be called after gator_update_cpu_mapping */
        cpu = get_physical_cpu();
        gator_timer_online_dispatch(cpu, true);
        gator_timer_online((void *)1);
@@ -146,12 +147,11 @@ GATOR_DEFINE_PROBE(cpu_migrate_current, TP_PROTO(u64 timestamp, u32 cpu_hwid))
 static void gator_send_iks_core_names(void)
 {
        int cpu;
-       // Send the cpu names
+       /* Send the cpu names */
        preempt_disable();
        for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
-               if (mpidr_cpus[cpu] != NULL) {
-                       gator_send_core_name(cpu, mpidr_cpus[cpu]->cpuid, mpidr_cpus[cpu]);
-               }
+               if (mpidr_cpus[cpu] != NULL)
+                       gator_send_core_name(cpu, mpidr_cpus[cpu]->cpuid);
        }
        preempt_enable();
 }
@@ -170,7 +170,7 @@ static int gator_migrate_start(void)
        if (retval == 0)
                retval = GATOR_REGISTER_TRACE(cpu_migrate_current);
        if (retval == 0) {
-               // Initialize the logical to physical cpu mapping
+               /* Initialize the logical to physical cpu mapping */
                memset(&__lcpu_to_pcpu, 0xff, sizeof(__lcpu_to_pcpu));
                bL_switcher_trace_trigger();
        }
index e67f7c5cc61d02553bcf63d476b19e91fd6320f8..30bf60d952869d83280c16c0bb6390405954b3d1 100644 (file)
@@ -7,8 +7,8 @@
  *
  */
 
-// This version must match the gator daemon version
-#define PROTOCOL_VERSION 18
+/* This version must match the gator daemon version */
+#define PROTOCOL_VERSION 20
 static unsigned long gator_protocol_version = PROTOCOL_VERSION;
 
 #include <linux/slab.h>
@@ -25,7 +25,7 @@ static unsigned long gator_protocol_version = PROTOCOL_VERSION;
 #include <linux/utsname.h>
 #include <linux/kthread.h>
 #include <asm/stacktrace.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #include "gator.h"
 
@@ -67,12 +67,12 @@ static unsigned long gator_protocol_version = PROTOCOL_VERSION;
 #define SUMMARY_BUFFER_SIZE       (1*1024)
 #define BACKTRACE_BUFFER_SIZE     (128*1024)
 #define NAME_BUFFER_SIZE          (64*1024)
-#define COUNTER_BUFFER_SIZE       (64*1024)    // counters have the core as part of the data and the core value in the frame header may be discarded
+#define COUNTER_BUFFER_SIZE       (64*1024)    /* counters have the core as part of the data and the core value in the frame header may be discarded */
 #define BLOCK_COUNTER_BUFFER_SIZE (128*1024)
-#define ANNOTATE_BUFFER_SIZE      (128*1024)   // annotate counters have the core as part of the data and the core value in the frame header may be discarded
+#define ANNOTATE_BUFFER_SIZE      (128*1024)   /* annotate counters have the core as part of the data and the core value in the frame header may be discarded */
 #define SCHED_TRACE_BUFFER_SIZE   (128*1024)
-#define GPU_TRACE_BUFFER_SIZE     (64*1024)    // gpu trace counters have the core as part of the data and the core value in the frame header may be discarded
-#define IDLE_BUFFER_SIZE          (32*1024)    // idle counters have the core as part of the data and the core value in the frame header may be discarded
+#define IDLE_BUFFER_SIZE          (32*1024)    /* idle counters have the core as part of the data and the core value in the frame header may be discarded */
+#define ACTIVITY_BUFFER_SIZE      (128*1024)
 
 #define NO_COOKIE      0U
 #define UNRESOLVED_COOKIE ~0U
@@ -84,33 +84,32 @@ static unsigned long gator_protocol_version = PROTOCOL_VERSION;
 #define FRAME_BLOCK_COUNTER 5
 #define FRAME_ANNOTATE      6
 #define FRAME_SCHED_TRACE   7
-#define FRAME_GPU_TRACE     8
 #define FRAME_IDLE          9
+#define FRAME_ACTIVITY     13
 
 #define MESSAGE_END_BACKTRACE 1
 
-// Name Frame Messages
+/* Name Frame Messages */
 #define MESSAGE_COOKIE      1
 #define MESSAGE_THREAD_NAME 2
 #define MESSAGE_LINK        4
 
-// GPU Trace Frame Messages
-#define MESSAGE_GPU_START 1
-#define MESSAGE_GPU_STOP  2
-
-// Scheduler Trace Frame Messages
+/* Scheduler Trace Frame Messages */
 #define MESSAGE_SCHED_SWITCH 1
 #define MESSAGE_SCHED_EXIT   2
-#define MESSAGE_SCHED_START  3
 
-// Idle Frame Messages
+/* Idle Frame Messages */
 #define MESSAGE_IDLE_ENTER 1
 #define MESSAGE_IDLE_EXIT  2
 
-// Summary Frame Messages
+/* Summary Frame Messages */
 #define MESSAGE_SUMMARY   1
 #define MESSAGE_CORE_NAME 3
 
+/* Activity Frame Messages */
+#define MESSAGE_SWITCH 2
+#define MESSAGE_EXIT   3
+
 #define MAXSIZE_PACK32     5
 #define MAXSIZE_PACK64    10
 
@@ -132,8 +131,8 @@ enum {
        BLOCK_COUNTER_BUF,
        ANNOTATE_BUF,
        SCHED_TRACE_BUF,
-       GPU_TRACE_BUF,
        IDLE_BUF,
+       ACTIVITY_BUF,
        NUM_GATOR_BUFS
 };
 
@@ -141,14 +140,15 @@ enum {
  * Globals
  ******************************************************************************/
 static unsigned long gator_cpu_cores;
-// Size of the largest buffer. Effectively constant, set in gator_op_create_files
+/* Size of the largest buffer. Effectively constant, set in gator_op_create_files */
 static unsigned long userspace_buffer_size;
 static unsigned long gator_backtrace_depth;
-// How often to commit the buffers for live in nanoseconds
+/* How often to commit the buffers for live in nanoseconds */
 static u64 gator_live_rate;
 
 static unsigned long gator_started;
 static u64 gator_monotonic_started;
+static u64 gator_sync_time;
 static u64 gator_hibernate_time;
 static unsigned long gator_buffer_opened;
 static unsigned long gator_timer_count;
@@ -162,7 +162,7 @@ static DECLARE_WAIT_QUEUE_HEAD(gator_buffer_wait);
 static DECLARE_WAIT_QUEUE_HEAD(gator_annotate_wait);
 static struct timer_list gator_buffer_wake_up_timer;
 static bool gator_buffer_wake_run;
-// Initialize semaphore unlocked to initialize memory values
+/* Initialize semaphore unlocked to initialize memory values */
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
 static DECLARE_MUTEX(gator_buffer_wake_sem);
 #else
@@ -175,6 +175,7 @@ static DEFINE_PER_CPU(u64, last_timestamp);
 
 static bool printed_monotonic_warning;
 
+static u32 gator_cpuids[NR_CPUS];
 static bool sent_core_name[NR_CPUS];
 
 static DEFINE_PER_CPU(bool, in_scheduler_context);
@@ -183,33 +184,43 @@ static DEFINE_PER_CPU(bool, in_scheduler_context);
  * Prototypes
  ******************************************************************************/
 static u64 gator_get_time(void);
+static void gator_emit_perf_time(u64 time);
 static void gator_op_create_files(struct super_block *sb, struct dentry *root);
 
-// gator_buffer is protected by being per_cpu and by having IRQs disabled when writing to it.
-// Most marshal_* calls take care of this except for marshal_cookie*, marshal_backtrace* and marshal_frame where the caller is responsible for doing so.
-// No synchronization is needed with the backtrace buffer as it is per cpu and is only used from the hrtimer.
-// The annotate_lock must be held when using the annotation buffer as it is not per cpu.
-// collect_counters which is the sole writer to the block counter frame is additionally protected by the per cpu collecting flag
+/* gator_buffer is protected by being per_cpu and by having IRQs
+ * disabled when writing to it. Most marshal_* calls take care of this
+ * except for marshal_cookie*, marshal_backtrace* and marshal_frame
+ * where the caller is responsible for doing so. No synchronization is
+ * needed with the backtrace buffer as it is per cpu and is only used
+ * from the hrtimer. The annotate_lock must be held when using the
+ * annotation buffer as it is not per cpu. collect_counters which is
+ * the sole writer to the block counter frame is additionally
+ * protected by the per cpu collecting flag.
+ */
 
-// Size of the buffer, must be a power of 2. Effectively constant, set in gator_op_setup.
+/* Size of the buffer, must be a power of 2. Effectively constant, set in gator_op_setup. */
 static uint32_t gator_buffer_size[NUM_GATOR_BUFS];
-// gator_buffer_size - 1, bitwise and with pos to get offset into the array. Effectively constant, set in gator_op_setup.
+/* gator_buffer_size - 1, bitwise and with pos to get offset into the array. Effectively constant, set in gator_op_setup. */
 static uint32_t gator_buffer_mask[NUM_GATOR_BUFS];
-// Read position in the buffer. Initialized to zero in gator_op_setup and incremented after bytes are read by userspace in userspace_buffer_read
+/* Read position in the buffer. Initialized to zero in gator_op_setup and incremented after bytes are read by userspace in userspace_buffer_read */
 static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_read);
-// Write position in the buffer. Initialized to zero in gator_op_setup and incremented after bytes are written to the buffer
+/* Write position in the buffer. Initialized to zero in gator_op_setup and incremented after bytes are written to the buffer */
 static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_write);
-// Commit position in the buffer. Initialized to zero in gator_op_setup and incremented after a frame is ready to be read by userspace
+/* Commit position in the buffer. Initialized to zero in gator_op_setup and incremented after a frame is ready to be read by userspace */
 static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_commit);
-// If set to false, decreases the number of bytes returned by buffer_bytes_available. Set in buffer_check_space if no space is remaining. Initialized to true in gator_op_setup
-// This means that if we run out of space, continue to report that no space is available until bytes are read by userspace
+/* If set to false, decreases the number of bytes returned by
+ * buffer_bytes_available. Set in buffer_check_space if no space is
+ * remaining. Initialized to true in gator_op_setup. This means that
+ * if we run out of space, continue to report that no space is
+ * available until bytes are read by userspace
+ */
 static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], buffer_space_available);
-// The buffer. Allocated in gator_op_setup
+/* The buffer. Allocated in gator_op_setup */
 static DEFINE_PER_CPU(char *[NUM_GATOR_BUFS], gator_buffer);
-// The time after which the buffer should be committed for live display
+/* The time after which the buffer should be committed for live display */
 static DEFINE_PER_CPU(u64, gator_buffer_commit_time);
 
-// List of all gator events - new events must be added to this list
+/* List of all gator events - new events must be added to this list */
 #define GATOR_EVENTS_LIST \
        GATOR_EVENT(gator_events_armv6_init) \
        GATOR_EVENT(gator_events_armv7_init) \
@@ -218,8 +229,8 @@ static DEFINE_PER_CPU(u64, gator_buffer_commit_time);
        GATOR_EVENT(gator_events_irq_init) \
        GATOR_EVENT(gator_events_l2c310_init) \
        GATOR_EVENT(gator_events_mali_init) \
-       GATOR_EVENT(gator_events_mali_t6xx_hw_init) \
-       GATOR_EVENT(gator_events_mali_t6xx_init) \
+       GATOR_EVENT(gator_events_mali_midgard_hw_init) \
+       GATOR_EVENT(gator_events_mali_midgard_init) \
        GATOR_EVENT(gator_events_meminfo_init) \
        GATOR_EVENT(gator_events_mmapped_init) \
        GATOR_EVENT(gator_events_net_init) \
@@ -313,13 +324,6 @@ static const struct gator_cpu gator_cpus[] = {
                .dt_name = "arm,cortex-a9",
                .pmnc_counters = 6,
        },
-       {
-               .cpuid = CORTEX_A12,
-               .core_name = "Cortex-A12",
-               .pmnc_name = "ARMv7_Cortex_A12",
-               .dt_name = "arm,cortex-a12",
-               .pmnc_counters = 6,
-       },
        {
                .cpuid = CORTEX_A15,
                .core_name = "Cortex-A15",
@@ -399,23 +403,32 @@ const struct gator_cpu *gator_find_cpu_by_cpuid(const u32 cpuid)
 
        for (i = 0; gator_cpus[i].cpuid != 0; ++i) {
                const struct gator_cpu *const gator_cpu = &gator_cpus[i];
-               if (gator_cpu->cpuid == cpuid) {
+
+               if (gator_cpu->cpuid == cpuid)
                        return gator_cpu;
-               }
        }
 
        return NULL;
 }
 
+static const char OLD_PMU_PREFIX[] = "ARMv7 Cortex-";
+static const char NEW_PMU_PREFIX[] = "ARMv7_Cortex_";
+
 const struct gator_cpu *gator_find_cpu_by_pmu_name(const char *const name)
 {
        int i;
 
        for (i = 0; gator_cpus[i].cpuid != 0; ++i) {
                const struct gator_cpu *const gator_cpu = &gator_cpus[i];
-               if (gator_cpu->pmnc_name != NULL && strcmp(gator_cpu->pmnc_name, name) == 0) {
+
+               if (gator_cpu->pmnc_name != NULL &&
+                   /* Do the names match exactly? */
+                   (strcasecmp(gator_cpu->pmnc_name, name) == 0 ||
+                    /* Do these names match but have the old vs new prefix? */
+                    ((strncasecmp(name, OLD_PMU_PREFIX, sizeof(OLD_PMU_PREFIX) - 1) == 0 &&
+                      strncasecmp(gator_cpu->pmnc_name, NEW_PMU_PREFIX, sizeof(NEW_PMU_PREFIX) - 1) == 0 &&
+                      strcasecmp(name + sizeof(OLD_PMU_PREFIX) - 1, gator_cpu->pmnc_name + sizeof(NEW_PMU_PREFIX) - 1) == 0))))
                        return gator_cpu;
-               }
        }
 
        return NULL;
@@ -444,16 +457,15 @@ static void gator_buffer_wake_up(unsigned long data)
 static int gator_buffer_wake_func(void *data)
 {
        for (;;) {
-               if (down_killable(&gator_buffer_wake_sem)) {
+               if (down_killable(&gator_buffer_wake_sem))
                        break;
-               }
 
-               // Eat up any pending events
-               while (!down_trylock(&gator_buffer_wake_sem));
+               /* Eat up any pending events */
+               while (!down_trylock(&gator_buffer_wake_sem))
+                       ;
 
-               if (!gator_buffer_wake_run) {
+               if (!gator_buffer_wake_run)
                        break;
-               }
 
                gator_buffer_wake_up(0);
        }
@@ -467,6 +479,7 @@ static int gator_buffer_wake_func(void *data)
 static bool buffer_commit_ready(int *cpu, int *buftype)
 {
        int cpu_x, x;
+
        for_each_present_cpu(cpu_x) {
                for (x = 0; x < NUM_GATOR_BUFS; x++)
                        if (per_cpu(gator_buffer_commit, cpu_x)[x] != per_cpu(gator_buffer_read, cpu_x)[x]) {
@@ -486,6 +499,7 @@ static bool buffer_commit_ready(int *cpu, int *buftype)
 static void gator_timer_interrupt(void)
 {
        struct pt_regs *const regs = get_irq_regs();
+
        gator_backtrace_handler(regs);
 }
 
@@ -494,15 +508,14 @@ void gator_backtrace_handler(struct pt_regs *const regs)
        u64 time = gator_get_time();
        int cpu = get_physical_cpu();
 
-       // Output backtrace
+       /* Output backtrace */
        gator_add_sample(cpu, regs, time);
 
-       // Collect counters
-       if (!per_cpu(collecting, cpu)) {
-               collect_counters(time, NULL);
-       }
+       /* Collect counters */
+       if (!per_cpu(collecting, cpu))
+               collect_counters(time, current, false);
 
-       // No buffer flushing occurs during sched switch for RT-Preempt full. The block counter frame will be flushed by collect_counters, but the sched buffer needs to be explicitly flushed
+       /* No buffer flushing occurs during sched switch for RT-Preempt full. The block counter frame will be flushed by collect_counters, but the sched buffer needs to be explicitly flushed */
 #ifdef CONFIG_PREEMPT_RT_FULL
        buffer_check(cpu, SCHED_TRACE_BUF, time);
 #endif
@@ -510,7 +523,7 @@ void gator_backtrace_handler(struct pt_regs *const regs)
 
 static int gator_running;
 
-// This function runs in interrupt context and on the appropriate core
+/* This function runs in interrupt context and on the appropriate core */
 static void gator_timer_offline(void *migrate)
 {
        struct gator_interface *gi;
@@ -521,11 +534,10 @@ static void gator_timer_offline(void *migrate)
        gator_trace_sched_offline();
        gator_trace_power_offline();
 
-       if (!migrate) {
+       if (!migrate)
                gator_hrtimer_offline();
-       }
 
-       // Offline any events and output counters
+       /* Offline any events and output counters */
        time = gator_get_time();
        if (marshal_event_header(time)) {
                list_for_each_entry(gi, &gator_events, list) {
@@ -534,24 +546,23 @@ static void gator_timer_offline(void *migrate)
                                marshal_event(len, buffer);
                        }
                }
-               // Only check after writing all counters so that time and corresponding counters appear in the same frame
+               /* Only check after writing all counters so that time and corresponding counters appear in the same frame */
                buffer_check(cpu, BLOCK_COUNTER_BUF, time);
        }
 
-       // Flush all buffers on this core
+       /* Flush all buffers on this core */
        for (i = 0; i < NUM_GATOR_BUFS; i++)
                gator_commit_buffer(cpu, i, time);
 }
 
-// This function runs in interrupt context and may be running on a core other than core 'cpu'
+/* This function runs in interrupt context and may be running on a core other than core 'cpu' */
 static void gator_timer_offline_dispatch(int cpu, bool migrate)
 {
        struct gator_interface *gi;
 
        list_for_each_entry(gi, &gator_events, list) {
-               if (gi->offline_dispatch) {
+               if (gi->offline_dispatch)
                        gi->offline_dispatch(cpu, migrate);
-               }
        }
 }
 
@@ -570,27 +581,38 @@ static void gator_timer_stop(void)
        }
 }
 
-#if defined(__arm__) || defined(__aarch64__)
-static void gator_send_core_name(int cpu, const u32 cpuid, const struct gator_cpu *const gator_cpu)
+static void gator_send_core_name(const int cpu, const u32 cpuid)
 {
-       const char *core_name = NULL;
-       char core_name_buf[32];
+#if defined(__arm__) || defined(__aarch64__)
+       if (!sent_core_name[cpu] || (cpuid != gator_cpuids[cpu])) {
+               const struct gator_cpu *const gator_cpu = gator_find_cpu_by_cpuid(cpuid);
+               const char *core_name = NULL;
+               char core_name_buf[32];
 
-       if (!sent_core_name[cpu]) {
+               /* Save off this cpuid */
+               gator_cpuids[cpu] = cpuid;
                if (gator_cpu != NULL) {
                        core_name = gator_cpu->core_name;
                } else {
-                       snprintf(core_name_buf, sizeof(core_name_buf), "Unknown (0x%.3x)", cpuid);
+                       if (cpuid == -1)
+                               snprintf(core_name_buf, sizeof(core_name_buf), "Unknown");
+                       else
+                               snprintf(core_name_buf, sizeof(core_name_buf), "Unknown (0x%.3x)", cpuid);
                        core_name = core_name_buf;
                }
 
                marshal_core_name(cpu, cpuid, core_name);
                sent_core_name[cpu] = true;
        }
-}
 #endif
+}
+
+static void gator_read_cpuid(void *arg)
+{
+       gator_cpuids[get_physical_cpu()] = gator_cpuid();
+}
 
-// This function runs in interrupt context and on the appropriate core
+/* This function runs in interrupt context and on the appropriate core */
 static void gator_timer_online(void *migrate)
 {
        struct gator_interface *gi;
@@ -598,9 +620,12 @@ static void gator_timer_online(void *migrate)
        int *buffer;
        u64 time;
 
+       /* Send what is currently running on this core */
+       marshal_sched_trace_switch(current->pid, 0);
+
        gator_trace_power_online();
 
-       // online any events and output counters
+       /* online any events and output counters */
        time = gator_get_time();
        if (marshal_event_header(time)) {
                list_for_each_entry(gi, &gator_events, list) {
@@ -609,31 +634,24 @@ static void gator_timer_online(void *migrate)
                                marshal_event(len, buffer);
                        }
                }
-               // Only check after writing all counters so that time and corresponding counters appear in the same frame
+               /* Only check after writing all counters so that time and corresponding counters appear in the same frame */
                buffer_check(cpu, BLOCK_COUNTER_BUF, time);
        }
 
-       if (!migrate) {
+       if (!migrate)
                gator_hrtimer_online();
-       }
 
-#if defined(__arm__) || defined(__aarch64__)
-       if (!sent_core_name[cpu]) {
-               const u32 cpuid = gator_cpuid();
-               gator_send_core_name(cpu, cpuid, gator_find_cpu_by_cpuid(cpuid));
-       }
-#endif
+       gator_send_core_name(cpu, gator_cpuid());
 }
 
-// This function runs in interrupt context and may be running on a core other than core 'cpu'
+/* This function runs in interrupt context and may be running on a core other than core 'cpu' */
 static void gator_timer_online_dispatch(int cpu, bool migrate)
 {
        struct gator_interface *gi;
 
        list_for_each_entry(gi, &gator_events, list) {
-               if (gi->online_dispatch) {
+               if (gi->online_dispatch)
                        gi->online_dispatch(cpu, migrate);
-               }
        }
 }
 
@@ -650,14 +668,20 @@ static int gator_timer_start(unsigned long sample_rate)
 
        gator_running = 1;
 
-       // event based sampling trumps hr timer based sampling
-       if (event_based_sampling) {
+       /* event based sampling trumps hr timer based sampling */
+       if (event_based_sampling)
                sample_rate = 0;
-       }
 
        if (gator_hrtimer_init(sample_rate, gator_timer_interrupt) == -1)
                return -1;
 
+       /* Send off the previously saved cpuids */
+       for_each_present_cpu(cpu) {
+               preempt_disable();
+               gator_send_core_name(cpu, gator_cpuids[cpu]);
+               preempt_enable();
+       }
+
        gator_send_iks_core_names();
        for_each_online_cpu(cpu) {
                gator_timer_online_dispatch(lcpu_to_pcpu(cpu), false);
@@ -675,21 +699,24 @@ static u64 gator_get_time(void)
        u64 delta;
        int cpu = smp_processor_id();
 
-       // Match clock_gettime(CLOCK_MONOTONIC_RAW, &ts) from userspace
+       /* Match clock_gettime(CLOCK_MONOTONIC_RAW, &ts) from userspace */
        getrawmonotonic(&ts);
        timestamp = timespec_to_ns(&ts);
 
-       // getrawmonotonic is not monotonic on all systems. Detect and attempt to correct these cases.
-       // up to 0.5ms delta has been seen on some systems, which can skew Streamline data when viewing at high resolution.
-       // This doesn't work well with interrupts, but that it's OK - the real concern is to catch big jumps in time
+       /* getrawmonotonic is not monotonic on all systems. Detect and
+        * attempt to correct these cases. up to 0.5ms delta has been seen
+        * on some systems, which can skew Streamline data when viewing at
+        * high resolution. This doesn't work well with interrupts, but that
+        * it's OK - the real concern is to catch big jumps in time
+        */
        prev_timestamp = per_cpu(last_timestamp, cpu);
        if (prev_timestamp <= timestamp) {
                per_cpu(last_timestamp, cpu) = timestamp;
        } else {
                delta = prev_timestamp - timestamp;
-               // Log the error once
+               /* Log the error once */
                if (!printed_monotonic_warning && delta > 500000) {
-                       printk(KERN_ERR "%s: getrawmonotonic is not monotonic  cpu: %i  delta: %lli\nSkew in Streamline data may be present at the fine zoom levels\n", __FUNCTION__, cpu, delta);
+                       pr_err("%s: getrawmonotonic is not monotonic  cpu: %i  delta: %lli\nSkew in Streamline data may be present at the fine zoom levels\n", __func__, cpu, delta);
                        printed_monotonic_warning = true;
                }
                timestamp = prev_timestamp;
@@ -698,6 +725,19 @@ static u64 gator_get_time(void)
        return timestamp - gator_monotonic_started;
 }
 
+static void gator_emit_perf_time(u64 time)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+       if (time >= gator_sync_time) {
+               int cpu = get_physical_cpu();
+
+               marshal_event_single64(0, -1, local_clock());
+               gator_sync_time += NSEC_PER_SEC;
+               gator_commit_buffer(cpu, COUNTER_BUF, time);
+       }
+#endif
+}
+
 /******************************************************************************
  * cpu hotplug and pm notifiers
  ******************************************************************************/
@@ -725,8 +765,10 @@ static struct notifier_block __refdata gator_hotcpu_notifier = {
        .notifier_call = gator_hotcpu_notify,
 };
 
-// n.b. calling "on_each_cpu" only runs on those that are online
-// Registered linux events are not disabled, so their counters will continue to collect
+/* n.b. calling "on_each_cpu" only runs on those that are online.
+ * Registered linux events are not disabled, so their counters will
+ * continue to collect
+ */
 static int gator_pm_notify(struct notifier_block *nb, unsigned long event, void *dummy)
 {
        int cpu;
@@ -742,13 +784,13 @@ static int gator_pm_notify(struct notifier_block *nb, unsigned long event, void
                        gator_timer_offline_dispatch(lcpu_to_pcpu(cpu), false);
                }
 
-               // Record the wallclock hibernate time
+               /* Record the wallclock hibernate time */
                getnstimeofday(&ts);
                gator_hibernate_time = timespec_to_ns(&ts) - gator_get_time();
                break;
        case PM_POST_HIBERNATION:
        case PM_POST_SUSPEND:
-               // Adjust gator_monotonic_started for the time spent sleeping, as gator_get_time does not account for it
+               /* Adjust gator_monotonic_started for the time spent sleeping, as gator_get_time does not account for it */
                if (gator_hibernate_time > 0) {
                        getnstimeofday(&ts);
                        gator_monotonic_started += gator_hibernate_time + gator_get_time() - timespec_to_ns(&ts);
@@ -774,6 +816,7 @@ static struct notifier_block gator_pm_notifier = {
 static int gator_notifier_start(void)
 {
        int retval;
+
        retval = register_hotcpu_notifier(&gator_hotcpu_notifier);
        if (retval == 0)
                retval = register_pm_notifier(&gator_pm_notifier);
@@ -794,28 +837,37 @@ static void gator_summary(void)
        u64 timestamp, uptime;
        struct timespec ts;
        char uname_buf[512];
-       void (*m2b)(struct timespec *ts);
 
        snprintf(uname_buf, sizeof(uname_buf), "%s %s %s %s %s GNU/Linux", utsname()->sysname, utsname()->nodename, utsname()->release, utsname()->version, utsname()->machine);
 
        getnstimeofday(&ts);
        timestamp = timespec_to_ns(&ts);
 
-       do_posix_clock_monotonic_gettime(&ts);
-       // monotonic_to_bootbased is not defined for some versions of Android
-       m2b = symbol_get(monotonic_to_bootbased);
-       if (m2b) {
-               m2b(&ts);
+       /* Similar to reading /proc/uptime from fs/proc/uptime.c, calculate uptime */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
+       {
+               void (*m2b)(struct timespec *ts);
+
+               do_posix_clock_monotonic_gettime(&ts);
+               /* monotonic_to_bootbased is not defined for some versions of Android */
+               m2b = symbol_get(monotonic_to_bootbased);
+               if (m2b)
+                       m2b(&ts);
        }
+#else
+       get_monotonic_boottime(&ts);
+#endif
        uptime = timespec_to_ns(&ts);
 
-       // Disable preemption as gator_get_time calls smp_processor_id to verify time is monotonic
+       /* Disable preemption as gator_get_time calls smp_processor_id to verify time is monotonic */
        preempt_disable();
-       // Set monotonic_started to zero as gator_get_time is uptime minus monotonic_started
+       /* Set monotonic_started to zero as gator_get_time is uptime minus monotonic_started */
        gator_monotonic_started = 0;
        gator_monotonic_started = gator_get_time();
 
        marshal_summary(timestamp, uptime, gator_monotonic_started, uname_buf);
+       gator_sync_time = 0;
+       gator_emit_perf_time(gator_monotonic_started);  
        preempt_enable();
 }
 
@@ -828,12 +880,14 @@ int gator_events_install(struct gator_interface *interface)
 
 int gator_events_get_key(void)
 {
-       // key 0 is reserved as a timestamp
-       // key 1 is reserved as the marker for thread specific counters
-       // Odd keys are assigned by the driver, even keys by the daemon
+       /* key 0 is reserved as a timestamp. key 1 is reserved as the marker
+        * for thread specific counters. key 2 is reserved as the marker for
+        * core. Odd keys are assigned by the driver, even keys by the
+        * daemon.
+        */
        static int key = 3;
-
        const int ret = key;
+
        key += 2;
        return ret;
 }
@@ -844,7 +898,7 @@ static int gator_init(void)
 
        calc_first_cluster_size();
 
-       // events sources
+       /* events sources */
        for (i = 0; i < ARRAY_SIZE(gator_events_list); i++)
                if (gator_events_list[i])
                        gator_events_list[i]();
@@ -870,26 +924,25 @@ static int gator_start(void)
        struct gator_interface *gi;
 
        gator_buffer_wake_run = true;
-       if (IS_ERR(gator_buffer_wake_thread = kthread_run(gator_buffer_wake_func, NULL, "gator_bwake"))) {
+       gator_buffer_wake_thread = kthread_run(gator_buffer_wake_func, NULL, "gator_bwake");
+       if (IS_ERR(gator_buffer_wake_thread))
                goto bwake_failure;
-       }
 
        if (gator_migrate_start())
                goto migrate_failure;
 
-       // Initialize the buffer with the frame type and core
+       /* Initialize the buffer with the frame type and core */
        for_each_present_cpu(cpu) {
-               for (i = 0; i < NUM_GATOR_BUFS; i++) {
+               for (i = 0; i < NUM_GATOR_BUFS; i++)
                        marshal_frame(cpu, i);
-               }
                per_cpu(last_timestamp, cpu) = 0;
        }
        printed_monotonic_warning = false;
 
-       // Capture the start time
+       /* Capture the start time */
        gator_summary();
 
-       // start all events
+       /* start all events */
        list_for_each_entry(gi, &gator_events, list) {
                if (gi->start && gi->start() != 0) {
                        struct list_head *ptr = gi->list.prev;
@@ -906,7 +959,7 @@ static int gator_start(void)
                }
        }
 
-       // cookies shall be initialized before trace_sched_start() and gator_timer_start()
+       /* cookies shall be initialized before trace_sched_start() and gator_timer_start() */
        if (cookies_initialize())
                goto cookies_failure;
        if (gator_annotate_start())
@@ -937,7 +990,7 @@ sched_failure:
 annotate_failure:
        cookies_release();
 cookies_failure:
-       // stop all events
+       /* stop all events */
        list_for_each_entry(gi, &gator_events, list)
                if (gi->stop)
                        gi->stop();
@@ -961,11 +1014,11 @@ static void gator_stop(void)
        gator_trace_power_stop();
        gator_trace_gpu_stop();
 
-       // stop all interrupt callback reads before tearing down other interfaces
-       gator_notifier_stop();  // should be called before gator_timer_stop to avoid re-enabling the hrtimer after it has been offlined
+       /* stop all interrupt callback reads before tearing down other interfaces */
+       gator_notifier_stop();  /* should be called before gator_timer_stop to avoid re-enabling the hrtimer after it has been offlined */
        gator_timer_stop();
 
-       // stop all events
+       /* stop all events */
        list_for_each_entry(gi, &gator_events, list)
                if (gi->stop)
                        gi->stop();
@@ -1009,15 +1062,15 @@ static int gator_op_setup(void)
        gator_buffer_size[SCHED_TRACE_BUF] = SCHED_TRACE_BUFFER_SIZE;
        gator_buffer_mask[SCHED_TRACE_BUF] = SCHED_TRACE_BUFFER_SIZE - 1;
 
-       gator_buffer_size[GPU_TRACE_BUF] = GPU_TRACE_BUFFER_SIZE;
-       gator_buffer_mask[GPU_TRACE_BUF] = GPU_TRACE_BUFFER_SIZE - 1;
-
        gator_buffer_size[IDLE_BUF] = IDLE_BUFFER_SIZE;
        gator_buffer_mask[IDLE_BUF] = IDLE_BUFFER_SIZE - 1;
 
-       // Initialize percpu per buffer variables
+       gator_buffer_size[ACTIVITY_BUF] = ACTIVITY_BUFFER_SIZE;
+       gator_buffer_mask[ACTIVITY_BUF] = ACTIVITY_BUFFER_SIZE - 1;
+
+       /* Initialize percpu per buffer variables */
        for (i = 0; i < NUM_GATOR_BUFS; i++) {
-               // Verify buffers are a power of 2
+               /* Verify buffers are a power of 2 */
                if (gator_buffer_size[i] & (gator_buffer_size[i] - 1)) {
                        err = -ENOEXEC;
                        goto setup_error;
@@ -1030,7 +1083,7 @@ static int gator_op_setup(void)
                        per_cpu(buffer_space_available, cpu)[i] = true;
                        per_cpu(gator_buffer_commit_time, cpu) = gator_live_rate;
 
-                       // Annotation is a special case that only uses a single buffer
+                       /* Annotation is a special case that only uses a single buffer */
                        if (cpu > 0 && i == ANNOTATE_BUF) {
                                per_cpu(gator_buffer, cpu)[i] = NULL;
                                continue;
@@ -1170,7 +1223,8 @@ static int userspace_buffer_open(struct inode *inode, struct file *file)
        if (test_and_set_bit_lock(0, &gator_buffer_opened))
                return -EBUSY;
 
-       if ((err = gator_op_setup()))
+       err = gator_op_setup();
+       if (err)
                goto fail;
 
        /* NB: the actual start happens from userspace
@@ -1200,22 +1254,20 @@ static ssize_t userspace_buffer_read(struct file *file, char __user *buf, size_t
        int cpu, buftype;
        int written = 0;
 
-       // ensure there is enough space for a whole frame
-       if (count < userspace_buffer_size || *offset) {
+       /* ensure there is enough space for a whole frame */
+       if (count < userspace_buffer_size || *offset)
                return -EINVAL;
-       }
 
-       // sleep until the condition is true or a signal is received
-       // the condition is checked each time gator_buffer_wait is woken up
+       /* sleep until the condition is true or a signal is received the
+        * condition is checked each time gator_buffer_wait is woken up
+        */
        wait_event_interruptible(gator_buffer_wait, buffer_commit_ready(&cpu, &buftype) || !gator_started);
 
-       if (signal_pending(current)) {
+       if (signal_pending(current))
                return -EINTR;
-       }
 
-       if (buftype == -1 || cpu == -1) {
+       if (buftype == -1 || cpu == -1)
                return 0;
-       }
 
        mutex_lock(&gator_buffer_mutex);
 
@@ -1223,12 +1275,11 @@ static ssize_t userspace_buffer_read(struct file *file, char __user *buf, size_t
                read = per_cpu(gator_buffer_read, cpu)[buftype];
                commit = per_cpu(gator_buffer_commit, cpu)[buftype];
 
-               // May happen if the buffer is freed during pending reads.
-               if (!per_cpu(gator_buffer, cpu)[buftype]) {
+               /* May happen if the buffer is freed during pending reads. */
+               if (!per_cpu(gator_buffer, cpu)[buftype])
                        break;
-               }
 
-               // determine the size of two halves
+               /* determine the size of two halves */
                length1 = commit - read;
                length2 = 0;
                buffer1 = &(per_cpu(gator_buffer, cpu)[buftype][read]);
@@ -1238,32 +1289,28 @@ static ssize_t userspace_buffer_read(struct file *file, char __user *buf, size_t
                        length2 = commit;
                }
 
-               if (length1 + length2 > count - written) {
+               if (length1 + length2 > count - written)
                        break;
-               }
 
-               // start, middle or end
-               if (length1 > 0 && copy_to_user(&buf[written], buffer1, length1)) {
+               /* start, middle or end */
+               if (length1 > 0 && copy_to_user(&buf[written], buffer1, length1))
                        break;
-               }
 
-               // possible wrap around
-               if (length2 > 0 && copy_to_user(&buf[written + length1], buffer2, length2)) {
+               /* possible wrap around */
+               if (length2 > 0 && copy_to_user(&buf[written + length1], buffer2, length2))
                        break;
-               }
 
                per_cpu(gator_buffer_read, cpu)[buftype] = commit;
                written += length1 + length2;
 
-               // Wake up annotate_write if more space is available
-               if (buftype == ANNOTATE_BUF) {
+               /* Wake up annotate_write if more space is available */
+               if (buftype == ANNOTATE_BUF)
                        wake_up(&gator_annotate_wait);
-               }
        } while (buffer_commit_ready(&cpu, &buftype));
 
        mutex_unlock(&gator_buffer_mutex);
 
-       // kick just in case we've lost an SMP event
+       /* kick just in case we've lost an SMP event */
        wake_up(&gator_buffer_wait);
 
        return written > 0 ? written : -EFAULT;
@@ -1330,30 +1377,86 @@ static void gator_op_create_files(struct super_block *sb, struct dentry *root)
        gatorfs_create_ro_u64(sb, root, "started", &gator_monotonic_started);
        gatorfs_create_u64(sb, root, "live_rate", &gator_live_rate);
 
-       // Annotate interface
+       /* Annotate interface */
        gator_annotate_create_files(sb, root);
 
-       // Linux Events
+       /* Linux Events */
        dir = gatorfs_mkdir(sb, root, "events");
        list_for_each_entry(gi, &gator_events, list)
                if (gi->create_files)
                        gi->create_files(sb, dir);
 
-       // Sched Events
+       /* Sched Events */
        sched_trace_create_files(sb, dir);
 
-       // Power interface
+       /* Power interface */
        gator_trace_power_create_files(sb, dir);
 }
 
 /******************************************************************************
  * Module
  ******************************************************************************/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+
+#define GATOR_TRACEPOINTS \
+       GATOR_HANDLE_TRACEPOINT(block_rq_complete); \
+       GATOR_HANDLE_TRACEPOINT(cpu_frequency); \
+       GATOR_HANDLE_TRACEPOINT(cpu_idle); \
+       GATOR_HANDLE_TRACEPOINT(cpu_migrate_begin); \
+       GATOR_HANDLE_TRACEPOINT(cpu_migrate_current); \
+       GATOR_HANDLE_TRACEPOINT(cpu_migrate_finish); \
+       GATOR_HANDLE_TRACEPOINT(irq_handler_exit); \
+       GATOR_HANDLE_TRACEPOINT(mali_hw_counter); \
+       GATOR_HANDLE_TRACEPOINT(mali_job_slots_event); \
+       GATOR_HANDLE_TRACEPOINT(mali_mmu_as_in_use); \
+       GATOR_HANDLE_TRACEPOINT(mali_mmu_as_released); \
+       GATOR_HANDLE_TRACEPOINT(mali_page_fault_insert_pages); \
+       GATOR_HANDLE_TRACEPOINT(mali_pm_status); \
+       GATOR_HANDLE_TRACEPOINT(mali_sw_counter); \
+       GATOR_HANDLE_TRACEPOINT(mali_sw_counters); \
+       GATOR_HANDLE_TRACEPOINT(mali_timeline_event); \
+       GATOR_HANDLE_TRACEPOINT(mali_total_alloc_pages_change); \
+       GATOR_HANDLE_TRACEPOINT(mm_page_alloc); \
+       GATOR_HANDLE_TRACEPOINT(mm_page_free); \
+       GATOR_HANDLE_TRACEPOINT(mm_page_free_batched); \
+       GATOR_HANDLE_TRACEPOINT(sched_process_exec); \
+       GATOR_HANDLE_TRACEPOINT(sched_process_fork); \
+       GATOR_HANDLE_TRACEPOINT(sched_process_free); \
+       GATOR_HANDLE_TRACEPOINT(sched_switch); \
+       GATOR_HANDLE_TRACEPOINT(softirq_exit); \
+       GATOR_HANDLE_TRACEPOINT(task_rename); \
+
+#define GATOR_HANDLE_TRACEPOINT(probe_name) \
+       struct tracepoint *gator_tracepoint_##probe_name
+GATOR_TRACEPOINTS;
+#undef GATOR_HANDLE_TRACEPOINT
+
+static void gator_save_tracepoint(struct tracepoint *tp, void *priv)
+{
+#define GATOR_HANDLE_TRACEPOINT(probe_name) \
+       do { \
+               if (strcmp(tp->name, #probe_name) == 0) { \
+                       gator_tracepoint_##probe_name = tp; \
+                       return; \
+               } \
+       } while (0)
+GATOR_TRACEPOINTS;
+#undef GATOR_HANDLE_TRACEPOINT
+}
+
+#else
+
+#define for_each_kernel_tracepoint(fct, priv)
+
+#endif
+
 static int __init gator_module_init(void)
 {
-       if (gatorfs_register()) {
+       for_each_kernel_tracepoint(gator_save_tracepoint, NULL);
+
+       if (gatorfs_register())
                return -1;
-       }
 
        if (gator_init()) {
                gatorfs_unregister();
@@ -1362,6 +1465,10 @@ static int __init gator_module_init(void)
 
        setup_timer(&gator_buffer_wake_up_timer, gator_buffer_wake_up, 0);
 
+       /* Initialize the list of cpuids */
+       memset(gator_cpuids, -1, sizeof(gator_cpuids));
+       on_each_cpu(gator_read_cpuid, NULL, 1);
+
        return 0;
 }
 
index fd413ad1331c03984a469bfe301da5ca5c0076ee..0d11676436429640d91b41fec7f0ce79c5a40321 100644 (file)
@@ -23,7 +23,7 @@
 #include "gator_events_mali_common.h"
 #endif
 
-static void marshal_summary(long long timestamp, long long uptime, long long monotonic_delta, const char * uname)
+static void marshal_summary(long long timestamp, long long uptime, long long monotonic_delta, const char *uname)
 {
        unsigned long flags;
        int cpu = 0;
@@ -40,19 +40,27 @@ static void marshal_summary(long long timestamp, long long uptime, long long mon
        gator_buffer_write_string(cpu, SUMMARY_BUF, "iks");
        gator_buffer_write_string(cpu, SUMMARY_BUF, "");
 #endif
-       // Let Streamline know which GPU is used so that it can label the GPU Activity appropriately. This is a temporary fix, to be improved in a future release.
+#ifdef CONFIG_PREEMPT_RTB
+       gator_buffer_write_string(cpu, SUMMARY_BUF, "preempt_rtb");
+       gator_buffer_write_string(cpu, SUMMARY_BUF, "");
+#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+       gator_buffer_write_string(cpu, SUMMARY_BUF, "preempt_rt_full");
+       gator_buffer_write_string(cpu, SUMMARY_BUF, "");
+#endif
+       /* Let Streamline know which GPU is used so that it can label the GPU Activity appropriately. This is a temporary fix, to be improved in a future release. */
 #ifdef MALI_SUPPORT
        gator_buffer_write_string(cpu, SUMMARY_BUF, "mali_type");
 #if (MALI_SUPPORT == MALI_4xx)
        gator_buffer_write_string(cpu, SUMMARY_BUF, "4xx");
-#elif (MALI_SUPPORT == MALI_T6xx)
+#elif (MALI_SUPPORT == MALI_MIDGARD)
        gator_buffer_write_string(cpu, SUMMARY_BUF, "6xx");
 #else
        gator_buffer_write_string(cpu, SUMMARY_BUF, "unknown");
 #endif
 #endif
        gator_buffer_write_string(cpu, SUMMARY_BUF, "");
-       // Commit the buffer now so it can be one of the first frames read by Streamline
+       /* Commit the buffer now so it can be one of the first frames read by Streamline */
        local_irq_restore(flags);
        gator_commit_buffer(cpu, SUMMARY_BUF, gator_get_time());
 }
@@ -60,13 +68,14 @@ static void marshal_summary(long long timestamp, long long uptime, long long mon
 static bool marshal_cookie_header(const char *text)
 {
        int cpu = get_physical_cpu();
+
        return buffer_check_space(cpu, NAME_BUF, strlen(text) + 3 * MAXSIZE_PACK32);
 }
 
 static void marshal_cookie(int cookie, const char *text)
 {
        int cpu = get_physical_cpu();
-       // buffer_check_space already called by marshal_cookie_header
+       /* buffer_check_space already called by marshal_cookie_header */
        gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_COOKIE);
        gator_buffer_write_packed_int(cpu, NAME_BUF, cookie);
        gator_buffer_write_string(cpu, NAME_BUF, text);
@@ -77,6 +86,7 @@ static void marshal_thread_name(int pid, char *name)
 {
        unsigned long flags, cpu;
        u64 time;
+
        local_irq_save(flags);
        cpu = get_physical_cpu();
        time = gator_get_time();
@@ -105,15 +115,16 @@ static void marshal_link(int cookie, int tgid, int pid)
                gator_buffer_write_packed_int(cpu, NAME_BUF, pid);
        }
        local_irq_restore(flags);
-       // Check and commit; commit is set to occur once buffer is 3/4 full
+       /* Check and commit; commit is set to occur once buffer is 3/4 full */
        buffer_check(cpu, NAME_BUF, time);
 }
 
 static bool marshal_backtrace_header(int exec_cookie, int tgid, int pid, u64 time)
 {
        int cpu = get_physical_cpu();
+
        if (!buffer_check_space(cpu, BACKTRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32 + gator_backtrace_depth * 2 * MAXSIZE_PACK32)) {
-               // Check and commit; commit is set to occur once buffer is 3/4 full
+               /* Check and commit; commit is set to occur once buffer is 3/4 full */
                buffer_check(cpu, BACKTRACE_BUF, time);
 
                return false;
@@ -130,9 +141,9 @@ static bool marshal_backtrace_header(int exec_cookie, int tgid, int pid, u64 tim
 static void marshal_backtrace(unsigned long address, int cookie, int in_kernel)
 {
        int cpu = get_physical_cpu();
-       if (cookie == 0 && !in_kernel) {
+
+       if (cookie == 0 && !in_kernel)
                cookie = UNRESOLVED_COOKIE;
-       }
        gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, cookie);
        gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, address);
 }
@@ -140,9 +151,10 @@ static void marshal_backtrace(unsigned long address, int cookie, int in_kernel)
 static void marshal_backtrace_footer(u64 time)
 {
        int cpu = get_physical_cpu();
+
        gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, MESSAGE_END_BACKTRACE);
 
-       // Check and commit; commit is set to occur once buffer is 3/4 full
+       /* Check and commit; commit is set to occur once buffer is 3/4 full */
        buffer_check(cpu, BACKTRACE_BUF, time);
 }
 
@@ -153,7 +165,7 @@ static bool marshal_event_header(u64 time)
 
        local_irq_save(flags);
        if (buffer_check_space(cpu, BLOCK_COUNTER_BUF, MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
-               gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, 0);       // key of zero indicates a timestamp
+               gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, 0);       /* key of zero indicates a timestamp */
                gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, time);
                retval = true;
        }
@@ -169,18 +181,17 @@ static void marshal_event(int len, int *buffer)
        if (len <= 0)
                return;
 
-       // length must be even since all data is a (key, value) pair
+       /* length must be even since all data is a (key, value) pair */
        if (len & 0x1) {
-               pr_err("gator: invalid counter data detected and discarded");
+               pr_err("gator: invalid counter data detected and discarded\n");
                return;
        }
 
-       // events must be written in key,value pairs
+       /* events must be written in key,value pairs */
        local_irq_save(flags);
        for (i = 0; i < len; i += 2) {
-               if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK32)) {
+               if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK32))
                        break;
-               }
                gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, buffer[i]);
                gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, buffer[i + 1]);
        }
@@ -194,26 +205,24 @@ static void marshal_event64(int len, long long *buffer64)
        if (len <= 0)
                return;
 
-       // length must be even since all data is a (key, value) pair
+       /* length must be even since all data is a (key, value) pair */
        if (len & 0x1) {
-               pr_err("gator: invalid counter data detected and discarded");
+               pr_err("gator: invalid counter data detected and discarded\n");
                return;
        }
 
-       // events must be written in key,value pairs
+       /* events must be written in key,value pairs */
        local_irq_save(flags);
        for (i = 0; i < len; i += 2) {
-               if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK64)) {
+               if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK64))
                        break;
-               }
                gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i]);
                gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i + 1]);
        }
        local_irq_restore(flags);
 }
 
-#if GATOR_CPU_FREQ_SUPPORT
-static void marshal_event_single(int core, int key, int value)
+static void __maybe_unused marshal_event_single(int core, int key, int value)
 {
        unsigned long flags, cpu;
        u64 time;
@@ -228,78 +237,30 @@ static void marshal_event_single(int core, int key, int value)
                gator_buffer_write_packed_int(cpu, COUNTER_BUF, value);
        }
        local_irq_restore(flags);
-       // Check and commit; commit is set to occur once buffer is 3/4 full
+       /* Check and commit; commit is set to occur once buffer is 3/4 full */
        buffer_check(cpu, COUNTER_BUF, time);
 }
-#endif
-
-static void marshal_sched_gpu_start(int unit, int core, int tgid, int pid)
-{
-       unsigned long cpu = get_physical_cpu(), flags;
-       u64 time;
-
-       if (!per_cpu(gator_buffer, cpu)[GPU_TRACE_BUF])
-               return;
-
-       local_irq_save(flags);
-       time = gator_get_time();
-       if (buffer_check_space(cpu, GPU_TRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
-               gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, MESSAGE_GPU_START);
-               gator_buffer_write_packed_int64(cpu, GPU_TRACE_BUF, time);
-               gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, unit);
-               gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, core);
-               gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, tgid);
-               gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, pid);
-       }
-       local_irq_restore(flags);
-       // Check and commit; commit is set to occur once buffer is 3/4 full
-       buffer_check(cpu, GPU_TRACE_BUF, time);
-}
-
-static void marshal_sched_gpu_stop(int unit, int core)
-{
-       unsigned long cpu = get_physical_cpu(), flags;
-       u64 time;
-
-       if (!per_cpu(gator_buffer, cpu)[GPU_TRACE_BUF])
-               return;
-
-       local_irq_save(flags);
-       time = gator_get_time();
-       if (buffer_check_space(cpu, GPU_TRACE_BUF, MAXSIZE_PACK64 + 3 * MAXSIZE_PACK32)) {
-               gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, MESSAGE_GPU_STOP);
-               gator_buffer_write_packed_int64(cpu, GPU_TRACE_BUF, time);
-               gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, unit);
-               gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, core);
-       }
-       local_irq_restore(flags);
-       // Check and commit; commit is set to occur once buffer is 3/4 full
-       buffer_check(cpu, GPU_TRACE_BUF, time);
-}
 
-static void marshal_sched_trace_start(int tgid, int pid, int cookie)
+static void __maybe_unused marshal_event_single64(int core, int key, long long value)
 {
-       unsigned long cpu = get_physical_cpu(), flags;
+       unsigned long flags, cpu;
        u64 time;
 
-       if (!per_cpu(gator_buffer, cpu)[SCHED_TRACE_BUF])
-               return;
-
        local_irq_save(flags);
+       cpu = get_physical_cpu();
        time = gator_get_time();
-       if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
-               gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_START);
-               gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, time);
-               gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, tgid);
-               gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
-               gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, cookie);
+       if (buffer_check_space(cpu, COUNTER_BUF, 2 * MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
+               gator_buffer_write_packed_int64(cpu, COUNTER_BUF, time);
+               gator_buffer_write_packed_int(cpu, COUNTER_BUF, core);
+               gator_buffer_write_packed_int(cpu, COUNTER_BUF, key);
+               gator_buffer_write_packed_int64(cpu, COUNTER_BUF, value);
        }
        local_irq_restore(flags);
-       // Check and commit; commit is set to occur once buffer is 3/4 full
-       buffer_check(cpu, SCHED_TRACE_BUF, time);
+       /* Check and commit; commit is set to occur once buffer is 3/4 full */
+       buffer_check(cpu, COUNTER_BUF, time);
 }
 
-static void marshal_sched_trace_switch(int tgid, int pid, int cookie, int state)
+static void marshal_sched_trace_switch(int pid, int state)
 {
        unsigned long cpu = get_physical_cpu(), flags;
        u64 time;
@@ -312,13 +273,11 @@ static void marshal_sched_trace_switch(int tgid, int pid, int cookie, int state)
        if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
                gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_SWITCH);
                gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, time);
-               gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, tgid);
                gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
-               gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, cookie);
                gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, state);
        }
        local_irq_restore(flags);
-       // Check and commit; commit is set to occur once buffer is 3/4 full
+       /* Check and commit; commit is set to occur once buffer is 3/4 full */
        buffer_check(cpu, SCHED_TRACE_BUF, time);
 }
 
@@ -338,7 +297,7 @@ static void marshal_sched_trace_exit(int tgid, int pid)
                gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
        }
        local_irq_restore(flags);
-       // Check and commit; commit is set to occur once buffer is 3/4 full
+       /* Check and commit; commit is set to occur once buffer is 3/4 full */
        buffer_check(cpu, SCHED_TRACE_BUF, time);
 }
 
@@ -357,7 +316,7 @@ static void marshal_idle(int core, int state)
                gator_buffer_write_packed_int(cpu, IDLE_BUF, core);
        }
        local_irq_restore(flags);
-       // Check and commit; commit is set to occur once buffer is 3/4 full
+       /* Check and commit; commit is set to occur once buffer is 3/4 full */
        buffer_check(cpu, IDLE_BUF, time);
 }
 #endif
@@ -367,6 +326,7 @@ static void marshal_core_name(const int core, const int cpuid, const char *name)
 {
        int cpu = get_physical_cpu();
        unsigned long flags;
+
        local_irq_save(flags);
        if (buffer_check_space(cpu, SUMMARY_BUF, MAXSIZE_PACK32 + MAXSIZE_CORE_NAME)) {
                gator_buffer_write_packed_int(cpu, SUMMARY_BUF, MESSAGE_CORE_NAME);
@@ -374,8 +334,38 @@ static void marshal_core_name(const int core, const int cpuid, const char *name)
                gator_buffer_write_packed_int(cpu, SUMMARY_BUF, cpuid);
                gator_buffer_write_string(cpu, SUMMARY_BUF, name);
        }
-       // Commit core names now so that they can show up in live
+       /* Commit core names now so that they can show up in live */
        local_irq_restore(flags);
        gator_commit_buffer(cpu, SUMMARY_BUF, gator_get_time());
 }
 #endif
+
+static void marshal_activity_switch(int core, int key, int activity, int pid, int state)
+{
+       unsigned long cpu = get_physical_cpu(), flags;
+       u64 time;
+
+       if (!per_cpu(gator_buffer, cpu)[ACTIVITY_BUF])
+               return;
+
+       local_irq_save(flags);
+       time = gator_get_time();
+       if (buffer_check_space(cpu, ACTIVITY_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
+               gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, MESSAGE_SWITCH);
+               gator_buffer_write_packed_int64(cpu, ACTIVITY_BUF, time);
+               gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, core);
+               gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, key);
+               gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, activity);
+               gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, pid);
+               gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, state);
+       }
+       local_irq_restore(flags);
+       /* Check and commit; commit is set to occur once buffer is 3/4 full */
+       buffer_check(cpu, ACTIVITY_BUF, time);
+}
+
+void gator_marshal_activity_switch(int core, int key, int activity, int pid)
+{
+       /* state is reserved for cpu use only */
+       marshal_activity_switch(core, key, activity, pid, 0);
+}
index 6332098e595840d9ffe1797b22addf39d006fa60..5de9152e365aa4cdf4229c515fa9144df1efe07c 100644 (file)
@@ -23,8 +23,6 @@
 #endif
 #endif
 
-#include "gator_trace_gpu.h"
-
 /*
  * Taken from MALI_PROFILING_EVENT_TYPE_* items in Mali DDK.
  */
@@ -37,7 +35,6 @@
 /* Note whether tracepoints have been registered */
 static int mali_timeline_trace_registered;
 static int mali_job_slots_trace_registered;
-static int gpu_trace_registered;
 
 enum {
        GPU_UNIT_NONE = 0,
@@ -47,74 +44,117 @@ enum {
        NUMBER_OF_GPU_UNITS
 };
 
-#define MALI_4xx     (0x0b07)
-#define MALI_T6xx    (0x0056)
+#if defined(MALI_SUPPORT)
 
-struct mali_gpu_job {
+struct mali_activity {
+       int core;
+       int key;
        int count;
-       int last_tgid;
+       int last_activity;
        int last_pid;
-       int last_job_id;
 };
 
 #define NUMBER_OF_GPU_CORES 16
-static struct mali_gpu_job mali_gpu_jobs[NUMBER_OF_GPU_UNITS][NUMBER_OF_GPU_CORES];
-static DEFINE_SPINLOCK(mali_gpu_jobs_lock);
-
-/* Only one event should be running on a unit and core at a time (ie, a start
- * event can only be followed by a stop and vice versa), but because the kernel
- * only knows when a job is enqueued and not started, it is possible for a
- * start1, start2, stop1, stop2. Change it back into start1, stop1, start2,
- * stop2 by queueing up start2 and releasing it when stop1 is received.
+static struct mali_activity mali_activities[NUMBER_OF_GPU_UNITS*NUMBER_OF_GPU_CORES];
+static DEFINE_SPINLOCK(mali_activities_lock);
+
+/* Only one event should be running on a unit and core at a time (ie,
+ * a start event can only be followed by a stop and vice versa), but
+ * because the kernel only knows when a job is enqueued and not
+ * started, it is possible for a start1, start2, stop1, stop2. Change
+ * it back into start1, stop1, start2, stop2 by queueing up start2 and
+ * releasing it when stop1 is received.
  */
-static void mali_gpu_enqueue(int unit, int core, int tgid, int pid, int job_id)
+
+static int mali_activity_index(int core, int key)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(mali_activities); ++i) {
+               if ((mali_activities[i].core == core) && (mali_activities[i].key == key))
+                       break;
+               if ((mali_activities[i].core == 0) && (mali_activities[i].key == 0)) {
+                       mali_activities[i].core = core;
+                       mali_activities[i].key = key;
+                       break;
+               }
+       }
+       BUG_ON(i >= ARRAY_SIZE(mali_activities));
+
+       return i;
+}
+
+static void mali_activity_enqueue(int core, int key, int activity, int pid)
 {
+       int i;
        int count;
 
-       spin_lock(&mali_gpu_jobs_lock);
-       count = mali_gpu_jobs[unit][core].count;
+       spin_lock(&mali_activities_lock);
+       i = mali_activity_index(core, key);
+
+       count = mali_activities[i].count;
        BUG_ON(count < 0);
-       ++mali_gpu_jobs[unit][core].count;
+       ++mali_activities[i].count;
        if (count) {
-               mali_gpu_jobs[unit][core].last_tgid = tgid;
-               mali_gpu_jobs[unit][core].last_pid = pid;
-               mali_gpu_jobs[unit][core].last_job_id = job_id;
+               mali_activities[i].last_activity = activity;
+               mali_activities[i].last_pid = pid;
        }
-       spin_unlock(&mali_gpu_jobs_lock);
+       spin_unlock(&mali_activities_lock);
 
-       if (!count) {
-               marshal_sched_gpu_start(unit, core, tgid, pid/*, job_id*/);
-       }
+       if (!count)
+               gator_marshal_activity_switch(core, key, activity, pid);
 }
 
-static void mali_gpu_stop(int unit, int core)
+static void mali_activity_stop(int core, int key)
 {
+       int i;
        int count;
-       int last_tgid = 0;
+       int last_activity = 0;
        int last_pid = 0;
-       //int last_job_id = 0;
 
-       spin_lock(&mali_gpu_jobs_lock);
-       if (mali_gpu_jobs[unit][core].count == 0) {
-               spin_unlock(&mali_gpu_jobs_lock);
+       spin_lock(&mali_activities_lock);
+       i = mali_activity_index(core, key);
+
+       if (mali_activities[i].count == 0) {
+               spin_unlock(&mali_activities_lock);
                return;
        }
-       --mali_gpu_jobs[unit][core].count;
-       count = mali_gpu_jobs[unit][core].count;
+       --mali_activities[i].count;
+       count = mali_activities[i].count;
        if (count) {
-               last_tgid = mali_gpu_jobs[unit][core].last_tgid;
-               last_pid = mali_gpu_jobs[unit][core].last_pid;
-               //last_job_id = mali_gpu_jobs[unit][core].last_job_id;
+               last_activity = mali_activities[i].last_activity;
+               last_pid = mali_activities[i].last_pid;
        }
-       spin_unlock(&mali_gpu_jobs_lock);
+       spin_unlock(&mali_activities_lock);
 
-       marshal_sched_gpu_stop(unit, core);
-       if (count) {
-               marshal_sched_gpu_start(unit, core, last_tgid, last_pid/*, last_job_id*/);
+       gator_marshal_activity_switch(core, key, 0, 0);
+       if (count)
+               gator_marshal_activity_switch(core, key, last_activity, last_pid);
+}
+
+void mali_activity_clear(struct mali_counter mali_activity[], size_t mali_activity_size)
+{
+       int activity;
+       int cores;
+       int core;
+
+       for (activity = 0; activity < mali_activity_size; ++activity) {
+               cores = mali_activity[activity].cores;
+               if (cores < 0)
+                       cores = 1;
+               for (core = 0; core < cores; ++core) {
+                       if (mali_activity[activity].enabled) {
+                               preempt_disable();
+                               gator_marshal_activity_switch(core, mali_activity[activity].key, 0, 0);
+                               preempt_enable();
+                       }
+               }
        }
 }
 
-#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_T6xx)
+#endif
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_MIDGARD)
 #include "gator_events_mali_4xx.h"
 
 /*
@@ -142,30 +182,36 @@ enum {
        EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE = 1,
 };
 
+struct mali_counter mali_activity[2];
+
 GATOR_DEFINE_PROBE(mali_timeline_event, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned int d2, unsigned int d3, unsigned int d4))
 {
        unsigned int component, state;
 
-       // do as much work as possible before disabling interrupts
-       component = (event_id >> 16) & 0xFF;    // component is an 8-bit field
-       state = (event_id >> 24) & 0xF; // state is a 4-bit field
+       /* do as much work as possible before disabling interrupts */
+       component = (event_id >> 16) & 0xFF;    /* component is an 8-bit field */
+       state = (event_id >> 24) & 0xF; /* state is a 4-bit field */
 
        switch (state) {
        case EVENT_TYPE_START:
                if (component == EVENT_CHANNEL_VP0) {
                        /* tgid = d0; pid = d1; */
-                       mali_gpu_enqueue(GPU_UNIT_VP, 0, d0, d1, 0);
+                       if (mali_activity[1].enabled)
+                               mali_activity_enqueue(0, mali_activity[1].key, 1, d1);
                } else if (component >= EVENT_CHANNEL_FP0 && component <= EVENT_CHANNEL_FP7) {
                        /* tgid = d0; pid = d1; */
-                       mali_gpu_enqueue(GPU_UNIT_FP, component - EVENT_CHANNEL_FP0, d0, d1, 0);
+                       if (mali_activity[0].enabled)
+                               mali_activity_enqueue(component - EVENT_CHANNEL_FP0, mali_activity[0].key, 1, d1);
                }
                break;
 
        case EVENT_TYPE_STOP:
                if (component == EVENT_CHANNEL_VP0) {
-                       mali_gpu_stop(GPU_UNIT_VP, 0);
+                       if (mali_activity[1].enabled)
+                               mali_activity_stop(0, mali_activity[1].key);
                } else if (component >= EVENT_CHANNEL_FP0 && component <= EVENT_CHANNEL_FP7) {
-                       mali_gpu_stop(GPU_UNIT_FP, component - EVENT_CHANNEL_FP0);
+                       if (mali_activity[0].enabled)
+                               mali_activity_stop(component - EVENT_CHANNEL_FP0, mali_activity[0].key);
                }
                break;
 
@@ -173,9 +219,8 @@ GATOR_DEFINE_PROBE(mali_timeline_event, TP_PROTO(unsigned int event_id, unsigned
                if (component == EVENT_CHANNEL_GPU) {
                        unsigned int reason = (event_id & 0xffff);
 
-                       if (reason == EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE) {
+                       if (reason == EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE)
                                gator_events_mali_log_dvfs_event(d0, d1);
-                       }
                }
                break;
 
@@ -185,7 +230,10 @@ GATOR_DEFINE_PROBE(mali_timeline_event, TP_PROTO(unsigned int event_id, unsigned
 }
 #endif
 
-#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_T6xx)
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_MIDGARD)
+
+struct mali_counter mali_activity[3];
+
 #if defined(MALI_JOB_SLOTS_EVENT_CHANGED)
 GATOR_DEFINE_PROBE(mali_job_slots_event, TP_PROTO(unsigned int event_id, unsigned int tgid, unsigned int pid, unsigned char job_id))
 #else
@@ -197,8 +245,8 @@ GATOR_DEFINE_PROBE(mali_job_slots_event, TP_PROTO(unsigned int event_id, unsigne
        unsigned char job_id = 0;
 #endif
 
-       component = (event_id >> 16) & 0xFF;    // component is an 8-bit field
-       state = (event_id >> 24) & 0xF; // state is a 4-bit field
+       component = (event_id >> 16) & 0xFF;    /* component is an 8-bit field */
+       state = (event_id >> 24) & 0xF; /* state is a 4-bit field */
 
        switch (component) {
        case 0:
@@ -217,31 +265,19 @@ GATOR_DEFINE_PROBE(mali_job_slots_event, TP_PROTO(unsigned int event_id, unsigne
        if (unit != GPU_UNIT_NONE) {
                switch (state) {
                case EVENT_TYPE_START:
-                       mali_gpu_enqueue(unit, 0, tgid, (pid != 0 ? pid : tgid), job_id);
+                       if (mali_activity[component].enabled)
+                               mali_activity_enqueue(0, mali_activity[component].key, 1, (pid != 0 ? pid : tgid));
                        break;
                case EVENT_TYPE_STOP:
-                       mali_gpu_stop(unit, 0);
+               default: /* Some jobs can be soft-stopped, so ensure that this terminates the activity trace. */
+                       if (mali_activity[component].enabled)
+                               mali_activity_stop(0, mali_activity[component].key);
                        break;
-               default:
-                       /*
-                        * Some jobs can be soft-stopped, so ensure that this terminates the activity trace.
-                        */
-                       mali_gpu_stop(unit, 0);
                }
        }
 }
 #endif
 
-GATOR_DEFINE_PROBE(gpu_activity_start, TP_PROTO(int gpu_unit, int gpu_core, struct task_struct *p))
-{
-       mali_gpu_enqueue(gpu_unit, gpu_core, (int)p->tgid, (int)p->pid, 0);
-}
-
-GATOR_DEFINE_PROBE(gpu_activity_stop, TP_PROTO(int gpu_unit, int gpu_core))
-{
-       mali_gpu_stop(gpu_unit, gpu_core);
-}
-
 static int gator_trace_gpu_start(void)
 {
        /*
@@ -249,53 +285,37 @@ static int gator_trace_gpu_start(void)
         * Absence of gpu trace points is not an error
         */
 
-       memset(&mali_gpu_jobs, 0, sizeof(mali_gpu_jobs));
-       gpu_trace_registered = mali_timeline_trace_registered = mali_job_slots_trace_registered = 0;
+#if defined(MALI_SUPPORT)
+       memset(&mali_activities, 0, sizeof(mali_activities));
+#endif
+       mali_timeline_trace_registered = mali_job_slots_trace_registered = 0;
 
-#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_T6xx)
-       if (!GATOR_REGISTER_TRACE(mali_timeline_event)) {
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_MIDGARD)
+       mali_activity_clear(mali_activity, ARRAY_SIZE(mali_activity));
+       if (!GATOR_REGISTER_TRACE(mali_timeline_event))
                mali_timeline_trace_registered = 1;
-       }
 #endif
 
-#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_T6xx)
-       if (!GATOR_REGISTER_TRACE(mali_job_slots_event)) {
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_MIDGARD)
+       mali_activity_clear(mali_activity, ARRAY_SIZE(mali_activity));
+       if (!GATOR_REGISTER_TRACE(mali_job_slots_event))
                mali_job_slots_trace_registered = 1;
-       }
 #endif
 
-       if (!mali_timeline_trace_registered) {
-               if (GATOR_REGISTER_TRACE(gpu_activity_start)) {
-                       return 0;
-               }
-               if (GATOR_REGISTER_TRACE(gpu_activity_stop)) {
-                       GATOR_UNREGISTER_TRACE(gpu_activity_start);
-                       return 0;
-               }
-               gpu_trace_registered = 1;
-       }
-
        return 0;
 }
 
 static void gator_trace_gpu_stop(void)
 {
-#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_T6xx)
-       if (mali_timeline_trace_registered) {
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_MIDGARD)
+       if (mali_timeline_trace_registered)
                GATOR_UNREGISTER_TRACE(mali_timeline_event);
-       }
 #endif
 
-#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_T6xx)
-       if (mali_job_slots_trace_registered) {
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_MIDGARD)
+       if (mali_job_slots_trace_registered)
                GATOR_UNREGISTER_TRACE(mali_job_slots_event);
-       }
 #endif
 
-       if (gpu_trace_registered) {
-               GATOR_UNREGISTER_TRACE(gpu_activity_stop);
-               GATOR_UNREGISTER_TRACE(gpu_activity_start);
-       }
-
-       gpu_trace_registered = mali_timeline_trace_registered = mali_job_slots_trace_registered = 0;
+       mali_timeline_trace_registered = mali_job_slots_trace_registered = 0;
 }
diff --git a/drivers/gator/gator_trace_gpu.h b/drivers/gator/gator_trace_gpu.h
deleted file mode 100644 (file)
index 5113d45..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Copyright (C) ARM Limited 2010-2014. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#undef TRACE_GPU
-#define TRACE_GPU gpu
-
-#if !defined(_TRACE_GPU_H)
-#define _TRACE_GPU_H
-
-#include <linux/tracepoint.h>
-
-/*
- * UNIT - the GPU processor type
- *  1 = Vertex Processor
- *  2 = Fragment Processor
- *
- * CORE - the GPU processor core number
- *  this is not the CPU core number
- */
-
-/*
- * Tracepoint for calling GPU unit start activity on core
- */
-TRACE_EVENT(gpu_activity_start,
-
-           TP_PROTO(int gpu_unit, int gpu_core, struct task_struct *p),
-
-           TP_ARGS(gpu_unit, gpu_core, p),
-
-           TP_STRUCT__entry(
-                            __field(int, gpu_unit)
-                            __field(int, gpu_core)
-                            __array(char, comm, TASK_COMM_LEN)
-                            __field(pid_t, pid)
-           ),
-
-           TP_fast_assign(
-                          __entry->gpu_unit = gpu_unit;
-                          __entry->gpu_core = gpu_core;
-                          memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-                          __entry->pid = p->pid;
-           ),
-
-           TP_printk("unit=%d core=%d comm=%s pid=%d",
-                     __entry->gpu_unit, __entry->gpu_core, __entry->comm,
-                     __entry->pid)
-    );
-
-/*
- * Tracepoint for calling GPU unit stop activity on core
- */
-TRACE_EVENT(gpu_activity_stop,
-
-           TP_PROTO(int gpu_unit, int gpu_core),
-
-           TP_ARGS(gpu_unit, gpu_core),
-
-           TP_STRUCT__entry(
-                            __field(int, gpu_unit)
-                            __field(int, gpu_core)
-           ),
-
-           TP_fast_assign(
-                          __entry->gpu_unit = gpu_unit;
-                          __entry->gpu_core = gpu_core;
-           ),
-
-           TP_printk("unit=%d core=%d", __entry->gpu_unit, __entry->gpu_core)
-    );
-
-#endif /* _TRACE_GPU_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
index 1895bb988c9fcb4559920fe214647dc72e2a8d10..46e04b29a18748c3058819850fbd6664d4329f7c 100644 (file)
 
 #endif
 
-// cpu_frequency and cpu_idle trace points were introduced in Linux kernel v2.6.38
-// the now deprecated power_frequency trace point was available prior to 2.6.38, but only for x86
+/* cpu_frequency and cpu_idle trace points were introduced in Linux
+ * kernel v2.6.38 the now deprecated power_frequency trace point was
+ * available prior to 2.6.38, but only for x86
+ */
 #if GATOR_CPU_FREQ_SUPPORT
 enum {
        POWER_CPU_FREQ,
-       POWER_CPU_IDLE,
        POWER_TOTAL
 };
 
 static DEFINE_PER_CPU(ulong, idle_prev_state);
 static ulong power_cpu_enabled[POWER_TOTAL];
 static ulong power_cpu_key[POWER_TOTAL];
+static ulong power_cpu_cores;
 
 static int gator_trace_power_create_files(struct super_block *sb, struct dentry *root)
 {
@@ -41,8 +43,9 @@ static int gator_trace_power_create_files(struct super_block *sb, struct dentry
        int cpu;
        bool found_nonzero_freq = false;
 
-       // Even if CONFIG_CPU_FREQ is defined, it still may not be used. Check
-       // for non-zero values from cpufreq_quick_get
+       /* Even if CONFIG_CPU_FREQ is defined, it still may not be
+        * used. Check for non-zero values from cpufreq_quick_get
+        */
        for_each_online_cpu(cpu) {
                if (cpufreq_quick_get(cpu) > 0) {
                        found_nonzero_freq = true;
@@ -51,87 +54,72 @@ static int gator_trace_power_create_files(struct super_block *sb, struct dentry
        }
 
        if (found_nonzero_freq) {
-               // cpu_frequency
+               /* cpu_frequency */
                dir = gatorfs_mkdir(sb, root, "Linux_power_cpu_freq");
-               if (!dir) {
+               if (!dir)
                        return -1;
-               }
                gatorfs_create_ulong(sb, dir, "enabled", &power_cpu_enabled[POWER_CPU_FREQ]);
                gatorfs_create_ro_ulong(sb, dir, "key", &power_cpu_key[POWER_CPU_FREQ]);
        }
 
-       // cpu_idle
-       dir = gatorfs_mkdir(sb, root, "Linux_power_cpu_idle");
-       if (!dir) {
-               return -1;
-       }
-       gatorfs_create_ulong(sb, dir, "enabled", &power_cpu_enabled[POWER_CPU_IDLE]);
-       gatorfs_create_ro_ulong(sb, dir, "key", &power_cpu_key[POWER_CPU_IDLE]);
-
        return 0;
 }
 
-// 'cpu' may not equal smp_processor_id(), i.e. may not be running on the core that is having the freq/idle state change
+/* 'cpu' may not equal smp_processor_id(), i.e. may not be running on the core that is having the freq/idle state change */
 GATOR_DEFINE_PROBE(cpu_frequency, TP_PROTO(unsigned int frequency, unsigned int cpu))
 {
        cpu = lcpu_to_pcpu(cpu);
-       marshal_event_single(cpu, power_cpu_key[POWER_CPU_FREQ], frequency * 1000);
+       marshal_event_single64(cpu, power_cpu_key[POWER_CPU_FREQ], frequency * 1000L);
 }
 
 GATOR_DEFINE_PROBE(cpu_idle, TP_PROTO(unsigned int state, unsigned int cpu))
 {
        cpu = lcpu_to_pcpu(cpu);
 
-       if (state == per_cpu(idle_prev_state, cpu)) {
+       if (state == per_cpu(idle_prev_state, cpu))
                return;
-       }
 
        if (implements_wfi()) {
                if (state == PWR_EVENT_EXIT) {
-                       // transition from wfi to non-wfi
+                       /* transition from wfi to non-wfi */
                        marshal_idle(cpu, MESSAGE_IDLE_EXIT);
                } else {
-                       // transition from non-wfi to wfi
+                       /* transition from non-wfi to wfi */
                        marshal_idle(cpu, MESSAGE_IDLE_ENTER);
                }
        }
 
        per_cpu(idle_prev_state, cpu) = state;
-
-       if (power_cpu_enabled[POWER_CPU_IDLE]) {
-               // Increment state so that no negative numbers are sent
-               marshal_event_single(cpu, power_cpu_key[POWER_CPU_IDLE], state + 1);
-       }
 }
 
 static void gator_trace_power_online(void)
 {
        int pcpu = get_physical_cpu();
        int lcpu = get_logical_cpu();
-       if (power_cpu_enabled[POWER_CPU_FREQ]) {
-               marshal_event_single(pcpu, power_cpu_key[POWER_CPU_FREQ], cpufreq_quick_get(lcpu) * 1000);
-       }
+
+       if (power_cpu_enabled[POWER_CPU_FREQ])
+               marshal_event_single64(pcpu, power_cpu_key[POWER_CPU_FREQ], cpufreq_quick_get(lcpu) * 1000L);
 }
 
 static void gator_trace_power_offline(void)
 {
-       // Set frequency to zero on an offline
+       /* Set frequency to zero on an offline */
        int cpu = get_physical_cpu();
-       if (power_cpu_enabled[POWER_CPU_FREQ]) {
+
+       if (power_cpu_enabled[POWER_CPU_FREQ])
                marshal_event_single(cpu, power_cpu_key[POWER_CPU_FREQ], 0);
-       }
 }
 
 static int gator_trace_power_start(void)
 {
        int cpu;
 
-       // register tracepoints
+       /* register tracepoints */
        if (power_cpu_enabled[POWER_CPU_FREQ])
                if (GATOR_REGISTER_TRACE(cpu_frequency))
                        goto fail_cpu_frequency_exit;
 
-       // Always register for cpu:idle for detecting WFI, independent of power_cpu_enabled[POWER_CPU_IDLE]
+       /* Always register for cpu_idle for detecting WFI */
        if (GATOR_REGISTER_TRACE(cpu_idle))
                goto fail_cpu_idle_exit;
        pr_debug("gator: registered power event tracepoints\n");
@@ -142,7 +130,7 @@ static int gator_trace_power_start(void)
 
        return 0;
 
-       // unregister tracepoints on error
+       /* unregister tracepoints on error */
 fail_cpu_idle_exit:
        if (power_cpu_enabled[POWER_CPU_FREQ])
                GATOR_UNREGISTER_TRACE(cpu_frequency);
@@ -161,14 +149,15 @@ static void gator_trace_power_stop(void)
        GATOR_UNREGISTER_TRACE(cpu_idle);
        pr_debug("gator: unregistered power event tracepoints\n");
 
-       for (i = 0; i < POWER_TOTAL; i++) {
+       for (i = 0; i < POWER_TOTAL; i++)
                power_cpu_enabled[i] = 0;
-       }
 }
 
 static void gator_trace_power_init(void)
 {
        int i;
+
+       power_cpu_cores = nr_cpu_ids;
        for (i = 0; i < POWER_TOTAL; i++) {
                power_cpu_enabled[i] = 0;
                power_cpu_key[i] = gator_events_get_key();
index 52990e9d48117b868b5391ca2dbd4840514d2f68..6d7cbd7348e151d43fe992b521caf89311fe5be6 100644 (file)
@@ -8,6 +8,10 @@
  */
 
 #include <trace/events/sched.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+#include <trace/events/task.h>
+#endif
+
 #include "gator.h"
 
 #define TASK_MAP_ENTRIES               1024    /* must be power of 2 */
@@ -23,8 +27,10 @@ enum {
 static DEFINE_PER_CPU(uint64_t *, taskname_keys);
 static DEFINE_PER_CPU(int, collecting);
 
-// this array is never read as the cpu wait charts are derived counters
-// the files are needed, nonetheless, to show that these counters are available
+/* this array is never read as the cpu wait charts are derived
+ * counters the files are needed, nonetheless, to show that these
+ * counters are available
+ */
 static ulong cpu_wait_enabled[CPU_WAIT_TOTAL];
 static ulong sched_cpu_key[CPU_WAIT_TOTAL];
 
@@ -32,26 +38,24 @@ static int sched_trace_create_files(struct super_block *sb, struct dentry *root)
 {
        struct dentry *dir;
 
-       // CPU Wait - Contention
+       /* CPU Wait - Contention */
        dir = gatorfs_mkdir(sb, root, "Linux_cpu_wait_contention");
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
        gatorfs_create_ulong(sb, dir, "enabled", &cpu_wait_enabled[STATE_CONTENTION]);
        gatorfs_create_ro_ulong(sb, dir, "key", &sched_cpu_key[STATE_CONTENTION]);
 
-       // CPU Wait - I/O
+       /* CPU Wait - I/O */
        dir = gatorfs_mkdir(sb, root, "Linux_cpu_wait_io");
-       if (!dir) {
+       if (!dir)
                return -1;
-       }
        gatorfs_create_ulong(sb, dir, "enabled", &cpu_wait_enabled[STATE_WAIT_ON_IO]);
        gatorfs_create_ro_ulong(sb, dir, "key", &sched_cpu_key[STATE_WAIT_ON_IO]);
 
        return 0;
 }
 
-static void emit_pid_name(struct task_struct *task)
+static void emit_pid_name(const char *comm, struct task_struct *task)
 {
        bool found = false;
        char taskcomm[TASK_COMM_LEN + 3];
@@ -59,10 +63,10 @@ static void emit_pid_name(struct task_struct *task)
        uint64_t *keys = &(per_cpu(taskname_keys, cpu)[(task->pid & 0xFF) * TASK_MAX_COLLISIONS]);
        uint64_t value;
 
-       value = gator_chksum_crc32(task->comm);
+       value = gator_chksum_crc32(comm);
        value = (value << 32) | (uint32_t)task->pid;
 
-       // determine if the thread name was emitted already
+       /* determine if the thread name was emitted already */
        for (x = 0; x < TASK_MAX_COLLISIONS; x++) {
                if (keys[x] == value) {
                        found = true;
@@ -71,17 +75,18 @@ static void emit_pid_name(struct task_struct *task)
        }
 
        if (!found) {
-               // shift values, new value always in front
+               /* shift values, new value always in front */
                uint64_t oldv, newv = value;
+
                for (x = 0; x < TASK_MAX_COLLISIONS; x++) {
                        oldv = keys[x];
                        keys[x] = newv;
                        newv = oldv;
                }
 
-               // emit pid names, cannot use get_task_comm, as it's not exported on all kernel versions
-               if (strlcpy(taskcomm, task->comm, TASK_COMM_LEN) == TASK_COMM_LEN - 1) {
-                       // append ellipses if task->comm has length of TASK_COMM_LEN - 1
+               /* emit pid names, cannot use get_task_comm, as it's not exported on all kernel versions */
+               if (strlcpy(taskcomm, comm, TASK_COMM_LEN) == TASK_COMM_LEN - 1) {
+                       /* append ellipses if comm has length of TASK_COMM_LEN - 1 */
                        strcat(taskcomm, "...");
                }
 
@@ -89,7 +94,7 @@ static void emit_pid_name(struct task_struct *task)
        }
 }
 
-static void collect_counters(u64 time, struct task_struct *task)
+static void collect_counters(u64 time, struct task_struct *task, bool sched_switch)
 {
        int *buffer, len, cpu = get_physical_cpu();
        long long *buffer64;
@@ -98,7 +103,7 @@ static void collect_counters(u64 time, struct task_struct *task)
        if (marshal_event_header(time)) {
                list_for_each_entry(gi, &gator_events, list) {
                        if (gi->read) {
-                               len = gi->read(&buffer);
+                               len = gi->read(&buffer, sched_switch);
                                marshal_event(len, buffer);
                        } else if (gi->read64) {
                                len = gi->read64(&buffer64);
@@ -109,22 +114,26 @@ static void collect_counters(u64 time, struct task_struct *task)
                                marshal_event64(len, buffer64);
                        }
                }
-               // Only check after writing all counters so that time and corresponding counters appear in the same frame
+               if (cpu == 0)
+                       gator_emit_perf_time(time);
+               /* Only check after writing all counters so that time and corresponding counters appear in the same frame */
                buffer_check(cpu, BLOCK_COUNTER_BUF, time);
 
-               // Commit buffers on timeout
+               /* Commit buffers on timeout */
                if (gator_live_rate > 0 && time >= per_cpu(gator_buffer_commit_time, cpu)) {
-                       static const int buftypes[] = { NAME_BUF, COUNTER_BUF, BLOCK_COUNTER_BUF, SCHED_TRACE_BUF };
+                       static const int buftypes[] = { NAME_BUF, COUNTER_BUF, BLOCK_COUNTER_BUF, SCHED_TRACE_BUF, ACTIVITY_BUF };
                        int i;
 
-                       for (i = 0; i < ARRAY_SIZE(buftypes); ++i) {
+                       for (i = 0; i < ARRAY_SIZE(buftypes); ++i)
                                gator_commit_buffer(cpu, buftypes[i], time);
-                       }
 
-                       // spinlocks are noops on uniprocessor machines and mutexes do not work in sched_switch context in
-                       // RT-Preempt full, so disable proactive flushing of the annotate frame on uniprocessor machines.
+                       /* spinlocks are noops on uniprocessor machines and mutexes do
+                        * not work in sched_switch context in RT-Preempt full, so
+                        * disable proactive flushing of the annotate frame on
+                        * uniprocessor machines.
+                        */
 #ifdef CONFIG_SMP
-                       // Try to preemptively flush the annotate buffer to reduce the chance of the buffer being full
+                       /* Try to preemptively flush the annotate buffer to reduce the chance of the buffer being full */
                        if (on_primary_core() && spin_trylock(&annotate_lock)) {
                                gator_commit_buffer(0, ANNOTATE_BUF, time);
                                spin_unlock(&annotate_lock);
@@ -134,51 +143,71 @@ static void collect_counters(u64 time, struct task_struct *task)
        }
 }
 
-// special case used during a suspend of the system
+/* special case used during a suspend of the system */
 static void trace_sched_insert_idle(void)
 {
-       marshal_sched_trace_switch(0, 0, 0, 0);
+       marshal_sched_trace_switch(0, 0);
 }
 
-GATOR_DEFINE_PROBE(sched_process_fork, TP_PROTO(struct task_struct *parent, struct task_struct *child))
+static void gator_trace_emit_link(struct task_struct *p)
 {
        int cookie;
        int cpu = get_physical_cpu();
 
-       cookie = get_exec_cookie(cpu, child);
-       emit_pid_name(child);
+       cookie = get_exec_cookie(cpu, p);
+       emit_pid_name(p->comm, p);
 
-       marshal_sched_trace_start(child->tgid, child->pid, cookie);
+       marshal_link(cookie, p->tgid, p->pid);
 }
 
+GATOR_DEFINE_PROBE(sched_process_fork, TP_PROTO(struct task_struct *parent, struct task_struct *child))
+{
+       gator_trace_emit_link(child);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+GATOR_DEFINE_PROBE(sched_process_exec, TP_PROTO(struct task_struct *p, pid_t old_pid, struct linux_binprm *bprm))
+{
+       gator_trace_emit_link(p);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
+GATOR_DEFINE_PROBE(task_rename, TP_PROTO(struct task_struct *task, char *comm))
+#else
+GATOR_DEFINE_PROBE(task_rename, TP_PROTO(struct task_struct *task, const char *comm))
+#endif
+{
+       emit_pid_name(comm, task);
+}
+#endif
+
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
 GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct rq *rq, struct task_struct *prev, struct task_struct *next))
 #else
 GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct task_struct *prev, struct task_struct *next))
 #endif
 {
-       int cookie;
        int state;
        int cpu = get_physical_cpu();
 
        per_cpu(in_scheduler_context, cpu) = true;
 
-       // do as much work as possible before disabling interrupts
-       cookie = get_exec_cookie(cpu, next);
-       emit_pid_name(next);
-       if (prev->state == TASK_RUNNING) {
+       /* do as much work as possible before disabling interrupts */
+       if (prev->state == TASK_RUNNING)
                state = STATE_CONTENTION;
-       } else if (prev->in_iowait) {
+       else if (prev->in_iowait)
                state = STATE_WAIT_ON_IO;
-       } else {
+       else
                state = STATE_WAIT_ON_OTHER;
-       }
 
        per_cpu(collecting, cpu) = 1;
-       collect_counters(gator_get_time(), prev);
+       collect_counters(gator_get_time(), prev, true);
        per_cpu(collecting, cpu) = 0;
 
-       marshal_sched_trace_switch(next->tgid, next->pid, cookie, state);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
+       gator_trace_emit_link(next);
+#endif
+       marshal_sched_trace_switch(next->pid, state);
 
        per_cpu(in_scheduler_context, cpu) = false;
 }
@@ -190,31 +219,44 @@ GATOR_DEFINE_PROBE(sched_process_free, TP_PROTO(struct task_struct *p))
 
 static void do_nothing(void *info)
 {
-       // Intentionally do nothing
+       /* Intentionally do nothing */
        (void)info;
 }
 
 static int register_scheduler_tracepoints(void)
 {
-       // register tracepoints
+       /* register tracepoints */
        if (GATOR_REGISTER_TRACE(sched_process_fork))
                goto fail_sched_process_fork;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+       if (GATOR_REGISTER_TRACE(sched_process_exec))
+               goto fail_sched_process_exec;
+       if (GATOR_REGISTER_TRACE(task_rename))
+               goto fail_task_rename;
+#endif
        if (GATOR_REGISTER_TRACE(sched_switch))
                goto fail_sched_switch;
        if (GATOR_REGISTER_TRACE(sched_process_free))
                goto fail_sched_process_free;
        pr_debug("gator: registered tracepoints\n");
 
-       // Now that the scheduler tracepoint is registered, force a context switch
-       // on all cpus to capture what is currently running.
+       /* Now that the scheduler tracepoint is registered, force a context
+        * switch on all cpus to capture what is currently running.
+        */
        on_each_cpu(do_nothing, NULL, 0);
 
        return 0;
 
-       // unregister tracepoints on error
+       /* unregister tracepoints on error */
 fail_sched_process_free:
        GATOR_UNREGISTER_TRACE(sched_switch);
 fail_sched_switch:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+       GATOR_UNREGISTER_TRACE(task_rename);
+fail_task_rename:
+       GATOR_UNREGISTER_TRACE(sched_process_exec);
+fail_sched_process_exec:
+#endif
        GATOR_UNREGISTER_TRACE(sched_process_fork);
 fail_sched_process_fork:
        pr_err("gator: tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
@@ -222,29 +264,13 @@ fail_sched_process_fork:
        return -1;
 }
 
-static int gator_trace_sched_start(void)
-{
-       int cpu, size;
-
-       for_each_present_cpu(cpu) {
-               size = TASK_MAP_ENTRIES * TASK_MAX_COLLISIONS * sizeof(uint64_t);
-               per_cpu(taskname_keys, cpu) = (uint64_t *)kmalloc(size, GFP_KERNEL);
-               if (!per_cpu(taskname_keys, cpu))
-                       return -1;
-               memset(per_cpu(taskname_keys, cpu), 0, size);
-       }
-
-       return register_scheduler_tracepoints();
-}
-
-static void gator_trace_sched_offline(void)
-{
-       trace_sched_insert_idle();
-}
-
 static void unregister_scheduler_tracepoints(void)
 {
        GATOR_UNREGISTER_TRACE(sched_process_fork);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+       GATOR_UNREGISTER_TRACE(sched_process_exec);
+       GATOR_UNREGISTER_TRACE(task_rename);
+#endif
        GATOR_UNREGISTER_TRACE(sched_switch);
        GATOR_UNREGISTER_TRACE(sched_process_free);
        pr_debug("gator: unregistered tracepoints\n");
@@ -253,6 +279,7 @@ static void unregister_scheduler_tracepoints(void)
 static void gator_trace_sched_stop(void)
 {
        int cpu;
+
        unregister_scheduler_tracepoints();
 
        for_each_present_cpu(cpu) {
@@ -260,9 +287,33 @@ static void gator_trace_sched_stop(void)
        }
 }
 
+static int gator_trace_sched_start(void)
+{
+       int cpu, size;
+       int ret;
+
+       for_each_present_cpu(cpu) {
+               size = TASK_MAP_ENTRIES * TASK_MAX_COLLISIONS * sizeof(uint64_t);
+               per_cpu(taskname_keys, cpu) = kmalloc(size, GFP_KERNEL);
+               if (!per_cpu(taskname_keys, cpu))
+                       return -1;
+               memset(per_cpu(taskname_keys, cpu), 0, size);
+       }
+
+       ret = register_scheduler_tracepoints();
+
+       return ret;
+}
+
+static void gator_trace_sched_offline(void)
+{
+       trace_sched_insert_idle();
+}
+
 static void gator_trace_sched_init(void)
 {
        int i;
+
        for (i = 0; i < CPU_WAIT_TOTAL; i++) {
                cpu_wait_enabled[i] = 0;
                sched_cpu_key[i] = gator_events_get_key();
diff --git a/drivers/gator/mali/mali_kbase_gator_api.h b/drivers/gator/mali/mali_kbase_gator_api.h
new file mode 100644 (file)
index 0000000..5ed0697
--- /dev/null
@@ -0,0 +1,219 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _KBASE_GATOR_API_H_
+#define _KBASE_GATOR_API_H_
+
+/**
+ * @brief This file describes the API used by Gator to collect hardware counters data from a Mali device.
+ */
+
+/* This define is used by the gator kernel module compile to select which DDK
+ * API calling convention to use. If not defined (legacy DDK) gator assumes
+ * version 1. The version to DDK release mapping is:
+ *     Version 1 API: DDK versions r1px, r2px
+ *     Version 2 API: DDK versions r3px, r4px
+ *     Version 3 API: DDK version r5p0 and newer
+ *
+ * API Usage
+ * =========
+ *
+ * 1] Call kbase_gator_hwcnt_init_names() to return the list of short counter
+ * names for the GPU present in this device.
+ *
+ * 2] Create a kbase_gator_hwcnt_info structure and set the counter enables for
+ * the counters you want enabled. The enables can all be set for simplicity in
+ * most use cases, but disabling some will let you minimize bandwidth impact.
+ *
+ * 3] Call kbase_gator_hwcnt_init() using the above structure, to create a
+ * counter context. On successful return the DDK will have populated the
+ * structure with a variety of useful information.
+ *
+ * 4] Call kbase_gator_hwcnt_dump_irq() to queue a non-blocking request for a
+ * counter dump. If this returns a non-zero value the request has been queued,
+ * otherwise the driver has been unable to do so (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 5] Call kbase_gator_hwcnt_dump_complete() to test whether the  previously
+ * requested dump has been succesful. If this returns non-zero the counter dump
+ * has resolved, but the value of *success must also be tested as the dump
+ * may have not been successful. If it returns zero the counter dump was
+ * abandoned due to the device being busy (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 6] Process the counters stored in the buffer pointed to by ...
+ *
+ *        kbase_gator_hwcnt_info->kernel_dump_buffer
+ *
+ *    In pseudo code you can find all of the counters via this approach:
+ *
+ *
+ *        hwcnt_info # pointer to kbase_gator_hwcnt_info structure
+ *        hwcnt_name # pointer to name list
+ *
+ *        u32 * hwcnt_data = (u32*)hwcnt_info->kernel_dump_buffer
+ *
+ *        # Iterate over each 64-counter block in this GPU configuration
+ *        for( i = 0; i < hwcnt_info->nr_hwc_blocks; i++) {
+ *            hwc_type type = hwcnt_info->hwc_layout[i];
+ *
+ *            # Skip reserved type blocks - they contain no counters at all
+ *            if( type == RESERVED_BLOCK ) {
+ *                continue;
+ *            }
+ *
+ *            size_t name_offset = type * 64;
+ *            size_t data_offset = i * 64;
+ *
+ *            # Iterate over the names of the counters in this block type
+ *            for( j = 0; j < 64; j++) {
+ *                const char * name = hwcnt_name[name_offset+j];
+ *
+ *                # Skip empty name strings - there is no counter here
+ *                if( name[0] == '\0' ) {
+ *                    continue;
+ *                }
+ *
+ *                u32 data = hwcnt_data[data_offset+j];
+ *
+ *                printk( "COUNTER: %s DATA: %u\n", name, data );
+ *            }
+ *        }
+ *
+ *
+ *     Note that in most implementations you typically want to either SUM or
+ *     AVERAGE multiple instances of the same counter if, for example, you have
+ *     multiple shader cores or multiple L2 caches. The most sensible view for
+ *     analysis is to AVERAGE shader core counters, but SUM L2 cache and MMU
+ *     counters.
+ *
+ * 7] Goto 4, repeating until you want to stop collecting counters.
+ *
+ * 8] Release the dump resources by calling kbase_gator_hwcnt_term().
+ *
+ * 9] Release the name table resources by calling kbase_gator_hwcnt_term_names().
+ *    This function must only be called if init_names() returned a non-NULL value.
+ **/
+
+#define MALI_DDK_GATOR_API_VERSION 3
+
+#if !defined(MALI_TRUE)
+       #define MALI_TRUE                ((uint32_t)1)
+#endif
+
+#if !defined(MALI_FALSE)
+       #define MALI_FALSE               ((uint32_t)0)
+#endif
+
+enum hwc_type {
+       JM_BLOCK = 0,
+       TILER_BLOCK,
+       SHADER_BLOCK,
+       MMU_L2_BLOCK,
+       RESERVED_BLOCK
+};
+
+struct kbase_gator_hwcnt_info {
+
+       /* Passed from Gator to kbase */
+
+       /* the bitmask of enabled hardware counters for each counter block */
+       uint16_t bitmask[4];
+
+       /* Passed from kbase to Gator */
+
+       /* ptr to counter dump memory */
+       void *kernel_dump_buffer;
+
+       /* size of counter dump memory */
+       uint32_t size;
+
+       /* the ID of the Mali device */
+       uint32_t gpu_id;
+
+       /* the number of shader cores in the GPU */
+       uint32_t nr_cores;
+
+       /* the number of core groups */
+       uint32_t nr_core_groups;
+
+       /* the memory layout of the performance counters */
+       enum hwc_type *hwc_layout;
+
+       /* the total number of hardware couter blocks */
+       uint32_t nr_hwc_blocks;
+};
+
+/**
+ * @brief Opaque block of Mali data which Gator needs to return to the API later.
+ */
+struct kbase_gator_hwcnt_handles;
+
+/**
+ * @brief Initialize the resources Gator needs for performance profiling.
+ *
+ * @param in_out_info   A pointer to a structure containing the enabled counters passed from Gator and all the Mali
+ *                      specific information that will be returned to Gator. On entry Gator must have populated the
+ *                      'bitmask' field with the counters it wishes to enable for each class of counter block.
+ *                      Each entry in the array corresponds to a single counter class based on the "hwc_type"
+ *                      enumeration, and each bit corresponds to an enable for 4 sequential counters (LSB enables
+ *                      the first 4 counters in the block, and so on). See the GPU counter array as returned by
+ *                      kbase_gator_hwcnt_get_names() for the index values of each counter for the curernt GPU.
+ *
+ * @return              Pointer to an opaque handle block on success, NULL on error.
+ */
+extern struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info);
+
+/**
+ * @brief Free all resources once Gator has finished using performance counters.
+ *
+ * @param in_out_info       A pointer to a structure containing the enabled counters passed from Gator and all the
+ *                          Mali specific information that will be returned to Gator.
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ */
+extern void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief Poll whether a counter dump is successful.
+ *
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ * @param[out] success      Non-zero on success, zero on failure.
+ *
+ * @return                  Zero if the dump is still pending, non-zero if the dump has completed. Note that a
+ *                          completed dump may not have dumped succesfully, so the caller must test for both
+ *                          a completed and successful dump before processing counters.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_complete(struct kbase_gator_hwcnt_handles *opaque_handles, uint32_t * const success);
+
+/**
+ * @brief Request the generation of a new counter dump.
+ *
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ *
+ * @return                  Zero if the hardware device is busy and cannot handle the request, non-zero otherwise.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief This function is used to fetch the names table based on the Mali device in use.
+ *
+ * @param[out] total_number_of_counters The total number of counters short names in the Mali devices' list.
+ *
+ * @return                              Pointer to an array of strings of length *total_number_of_counters.
+ */
+extern const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_number_of_counters);
+
+/**
+ * @brief This function is used to terminate the use of the names table.
+ *
+ * This function must only be called if the initial call to kbase_gator_hwcnt_init_names returned a non-NULL value.
+ */
+extern void kbase_gator_hwcnt_term_names(void);
+
+#endif
index ff00d90cee78e79c1dff955ccfcff6ecbbc85a10..2bc0b037eee6c668e5948a4751678324dafb92a9 100644 (file)
@@ -23,83 +23,82 @@ extern "C"
 #define MAX_NUM_VP_CORES            (1)
 #define MAX_NUM_L2_CACHE_CORES      (1)
 
-enum counters
-{
-    /* Timeline activity */
-    ACTIVITY_VP_0 = 0,
-    ACTIVITY_FP_0,
-    ACTIVITY_FP_1,
-    ACTIVITY_FP_2,
-    ACTIVITY_FP_3,
-
-    /* L2 cache counters */
-    COUNTER_L2_0_C0,
-    COUNTER_L2_0_C1,
-
-    /* Vertex processor counters */
-    COUNTER_VP_0_C0,
-    COUNTER_VP_0_C1,
-
-    /* Fragment processor counters */
-    COUNTER_FP_0_C0,
-    COUNTER_FP_0_C1,
-    COUNTER_FP_1_C0,
-    COUNTER_FP_1_C1,
-    COUNTER_FP_2_C0,
-    COUNTER_FP_2_C1,
-    COUNTER_FP_3_C0,
-    COUNTER_FP_3_C1,
-
-    /* EGL Software Counters */
-    COUNTER_EGL_BLIT_TIME,
-
-    /* GLES Software Counters */
-    COUNTER_GLES_DRAW_ELEMENTS_CALLS,
-    COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
-    COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
-    COUNTER_GLES_DRAW_ARRAYS_CALLS,
-    COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
-    COUNTER_GLES_DRAW_POINTS,
-    COUNTER_GLES_DRAW_LINES,
-    COUNTER_GLES_DRAW_LINE_LOOP,
-    COUNTER_GLES_DRAW_LINE_STRIP,
-    COUNTER_GLES_DRAW_TRIANGLES,
-    COUNTER_GLES_DRAW_TRIANGLE_STRIP,
-    COUNTER_GLES_DRAW_TRIANGLE_FAN,
-    COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
-    COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
-    COUNTER_GLES_UPLOAD_TEXTURE_TIME,
-    COUNTER_GLES_UPLOAD_VBO_TIME,
-    COUNTER_GLES_NUM_FLUSHES,
-    COUNTER_GLES_NUM_VSHADERS_GENERATED,
-    COUNTER_GLES_NUM_FSHADERS_GENERATED,
-    COUNTER_GLES_VSHADER_GEN_TIME,
-    COUNTER_GLES_FSHADER_GEN_TIME,
-    COUNTER_GLES_INPUT_TRIANGLES,
-    COUNTER_GLES_VXCACHE_HIT,
-    COUNTER_GLES_VXCACHE_MISS,
-    COUNTER_GLES_VXCACHE_COLLISION,
-    COUNTER_GLES_CULLED_TRIANGLES,
-    COUNTER_GLES_CULLED_LINES,
-    COUNTER_GLES_BACKFACE_TRIANGLES,
-    COUNTER_GLES_GBCLIP_TRIANGLES,
-    COUNTER_GLES_GBCLIP_LINES,
-    COUNTER_GLES_TRIANGLES_DRAWN,
-    COUNTER_GLES_DRAWCALL_TIME,
-    COUNTER_GLES_TRIANGLES_COUNT,
-    COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
-    COUNTER_GLES_STRIP_TRIANGLES_COUNT,
-    COUNTER_GLES_FAN_TRIANGLES_COUNT,
-    COUNTER_GLES_LINES_COUNT,
-    COUNTER_GLES_INDEPENDENT_LINES_COUNT,
-    COUNTER_GLES_STRIP_LINES_COUNT,
-    COUNTER_GLES_LOOP_LINES_COUNT,
-
-    COUNTER_FILMSTRIP,
-    COUNTER_FREQUENCY,
-    COUNTER_VOLTAGE,
-
-    NUMBER_OF_EVENTS
+enum counters {
+       /* Timeline activity */
+       ACTIVITY_VP_0 = 0,
+       ACTIVITY_FP_0,
+       ACTIVITY_FP_1,
+       ACTIVITY_FP_2,
+       ACTIVITY_FP_3,
+
+       /* L2 cache counters */
+       COUNTER_L2_0_C0,
+       COUNTER_L2_0_C1,
+
+       /* Vertex processor counters */
+       COUNTER_VP_0_C0,
+       COUNTER_VP_0_C1,
+
+       /* Fragment processor counters */
+       COUNTER_FP_0_C0,
+       COUNTER_FP_0_C1,
+       COUNTER_FP_1_C0,
+       COUNTER_FP_1_C1,
+       COUNTER_FP_2_C0,
+       COUNTER_FP_2_C1,
+       COUNTER_FP_3_C0,
+       COUNTER_FP_3_C1,
+
+       /* EGL Software Counters */
+       COUNTER_EGL_BLIT_TIME,
+
+       /* GLES Software Counters */
+       COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+       COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+       COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+       COUNTER_GLES_DRAW_ARRAYS_CALLS,
+       COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+       COUNTER_GLES_DRAW_POINTS,
+       COUNTER_GLES_DRAW_LINES,
+       COUNTER_GLES_DRAW_LINE_LOOP,
+       COUNTER_GLES_DRAW_LINE_STRIP,
+       COUNTER_GLES_DRAW_TRIANGLES,
+       COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+       COUNTER_GLES_DRAW_TRIANGLE_FAN,
+       COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+       COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+       COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+       COUNTER_GLES_UPLOAD_VBO_TIME,
+       COUNTER_GLES_NUM_FLUSHES,
+       COUNTER_GLES_NUM_VSHADERS_GENERATED,
+       COUNTER_GLES_NUM_FSHADERS_GENERATED,
+       COUNTER_GLES_VSHADER_GEN_TIME,
+       COUNTER_GLES_FSHADER_GEN_TIME,
+       COUNTER_GLES_INPUT_TRIANGLES,
+       COUNTER_GLES_VXCACHE_HIT,
+       COUNTER_GLES_VXCACHE_MISS,
+       COUNTER_GLES_VXCACHE_COLLISION,
+       COUNTER_GLES_CULLED_TRIANGLES,
+       COUNTER_GLES_CULLED_LINES,
+       COUNTER_GLES_BACKFACE_TRIANGLES,
+       COUNTER_GLES_GBCLIP_TRIANGLES,
+       COUNTER_GLES_GBCLIP_LINES,
+       COUNTER_GLES_TRIANGLES_DRAWN,
+       COUNTER_GLES_DRAWCALL_TIME,
+       COUNTER_GLES_TRIANGLES_COUNT,
+       COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+       COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+       COUNTER_GLES_FAN_TRIANGLES_COUNT,
+       COUNTER_GLES_LINES_COUNT,
+       COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+       COUNTER_GLES_STRIP_LINES_COUNT,
+       COUNTER_GLES_LOOP_LINES_COUNT,
+
+       COUNTER_FILMSTRIP,
+       COUNTER_FREQUENCY,
+       COUNTER_VOLTAGE,
+
+       NUMBER_OF_EVENTS
 };
 
 #define FIRST_ACTIVITY_EVENT    ACTIVITY_VP_0
@@ -117,34 +116,31 @@ enum counters
 /**
  * Structure to pass performance counter data of a Mali core
  */
-typedef struct _mali_profiling_core_counters
-{
-    u32 source0;
-    u32 value0;
-    u32 source1;
-    u32 value1;
-} _mali_profiling_core_counters;
+struct _mali_profiling_core_counters {
+       u32 source0;
+       u32 value0;
+       u32 source1;
+       u32 value1;
+};
 
 /*
  * For compatibility with utgard.
  */
-typedef struct _mali_profiling_l2_counter_values
-{
-    struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
-} _mali_profiling_l2_counter_values;
+struct _mali_profiling_l2_counter_values {
+       struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
+};
 
-typedef struct _mali_profiling_mali_version
-{
-    u32 mali_product_id;
-    u32 mali_version_major;
-    u32 mali_version_minor;
-    u32 num_of_l2_cores;
-    u32 num_of_fp_cores;
-    u32 num_of_vp_cores;
-} _mali_profiling_mali_version;
+struct _mali_profiling_mali_version {
+       u32 mali_product_id;
+       u32 mali_version_major;
+       u32 mali_version_minor;
+       u32 num_of_l2_cores;
+       u32 num_of_fp_cores;
+       u32 num_of_vp_cores;
+};
 
 extern void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values);
-extern u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values);
+extern u32 _mali_profiling_get_l2_counters(struct _mali_profiling_l2_counter_values *values);
 
 /*
  * List of possible actions allowing DDK to be controlled by Streamline.
index 43c57604288037e4d3f453285b79a1f2e087a25b..d6465312628ed1a5479fff8611271201dc997749 100644 (file)
@@ -22,105 +22,104 @@ extern "C"
 #define MAX_NUM_VP_CORES 1
 
 /** The list of events supported by the Mali DDK. */
-typedef enum
-{
-    /* Vertex processor activity */
-    ACTIVITY_VP_0 = 0,
-
-    /* Fragment processor activity */
-    ACTIVITY_FP_0, /* 1 */
-    ACTIVITY_FP_1,
-    ACTIVITY_FP_2,
-    ACTIVITY_FP_3,
-    ACTIVITY_FP_4,
-    ACTIVITY_FP_5,
-    ACTIVITY_FP_6,
-    ACTIVITY_FP_7,
-
-    /* L2 cache counters */
-    COUNTER_L2_0_C0,
-    COUNTER_L2_0_C1,
-    COUNTER_L2_1_C0,
-    COUNTER_L2_1_C1,
-    COUNTER_L2_2_C0,
-    COUNTER_L2_2_C1,
-
-    /* Vertex processor counters */
-    COUNTER_VP_0_C0, /*15*/
-    COUNTER_VP_0_C1,
-
-    /* Fragment processor counters */
-    COUNTER_FP_0_C0,
-    COUNTER_FP_0_C1,
-    COUNTER_FP_1_C0,
-    COUNTER_FP_1_C1,
-    COUNTER_FP_2_C0,
-    COUNTER_FP_2_C1,
-    COUNTER_FP_3_C0,
-    COUNTER_FP_3_C1,
-    COUNTER_FP_4_C0,
-    COUNTER_FP_4_C1,
-    COUNTER_FP_5_C0,
-    COUNTER_FP_5_C1,
-    COUNTER_FP_6_C0,
-    COUNTER_FP_6_C1,
-    COUNTER_FP_7_C0,
-    COUNTER_FP_7_C1, /* 32 */
-
-    /*
-     * If more hardware counters are added, the _mali_osk_hw_counter_table
-     * below should also be updated.
-     */
-
-    /* EGL software counters */
-    COUNTER_EGL_BLIT_TIME,
-
-    /* GLES software counters */
-    COUNTER_GLES_DRAW_ELEMENTS_CALLS,
-    COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
-    COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
-    COUNTER_GLES_DRAW_ARRAYS_CALLS,
-    COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
-    COUNTER_GLES_DRAW_POINTS,
-    COUNTER_GLES_DRAW_LINES,
-    COUNTER_GLES_DRAW_LINE_LOOP,
-    COUNTER_GLES_DRAW_LINE_STRIP,
-    COUNTER_GLES_DRAW_TRIANGLES,
-    COUNTER_GLES_DRAW_TRIANGLE_STRIP,
-    COUNTER_GLES_DRAW_TRIANGLE_FAN,
-    COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
-    COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
-    COUNTER_GLES_UPLOAD_TEXTURE_TIME,
-    COUNTER_GLES_UPLOAD_VBO_TIME,
-    COUNTER_GLES_NUM_FLUSHES,
-    COUNTER_GLES_NUM_VSHADERS_GENERATED,
-    COUNTER_GLES_NUM_FSHADERS_GENERATED,
-    COUNTER_GLES_VSHADER_GEN_TIME,
-    COUNTER_GLES_FSHADER_GEN_TIME,
-    COUNTER_GLES_INPUT_TRIANGLES,
-    COUNTER_GLES_VXCACHE_HIT,
-    COUNTER_GLES_VXCACHE_MISS,
-    COUNTER_GLES_VXCACHE_COLLISION,
-    COUNTER_GLES_CULLED_TRIANGLES,
-    COUNTER_GLES_CULLED_LINES,
-    COUNTER_GLES_BACKFACE_TRIANGLES,
-    COUNTER_GLES_GBCLIP_TRIANGLES,
-    COUNTER_GLES_GBCLIP_LINES,
-    COUNTER_GLES_TRIANGLES_DRAWN,
-    COUNTER_GLES_DRAWCALL_TIME,
-    COUNTER_GLES_TRIANGLES_COUNT,
-    COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
-    COUNTER_GLES_STRIP_TRIANGLES_COUNT,
-    COUNTER_GLES_FAN_TRIANGLES_COUNT,
-    COUNTER_GLES_LINES_COUNT,
-    COUNTER_GLES_INDEPENDENT_LINES_COUNT,
-    COUNTER_GLES_STRIP_LINES_COUNT,
-    COUNTER_GLES_LOOP_LINES_COUNT,
-
-    /* Framebuffer capture pseudo-counter */
-    COUNTER_FILMSTRIP,
-
-    NUMBER_OF_EVENTS
+enum {
+       /* Vertex processor activity */
+       ACTIVITY_VP_0 = 0,
+
+       /* Fragment processor activity */
+       ACTIVITY_FP_0, /* 1 */
+       ACTIVITY_FP_1,
+       ACTIVITY_FP_2,
+       ACTIVITY_FP_3,
+       ACTIVITY_FP_4,
+       ACTIVITY_FP_5,
+       ACTIVITY_FP_6,
+       ACTIVITY_FP_7,
+
+       /* L2 cache counters */
+       COUNTER_L2_0_C0,
+       COUNTER_L2_0_C1,
+       COUNTER_L2_1_C0,
+       COUNTER_L2_1_C1,
+       COUNTER_L2_2_C0,
+       COUNTER_L2_2_C1,
+
+       /* Vertex processor counters */
+       COUNTER_VP_0_C0, /*15*/
+       COUNTER_VP_0_C1,
+
+       /* Fragment processor counters */
+       COUNTER_FP_0_C0,
+       COUNTER_FP_0_C1,
+       COUNTER_FP_1_C0,
+       COUNTER_FP_1_C1,
+       COUNTER_FP_2_C0,
+       COUNTER_FP_2_C1,
+       COUNTER_FP_3_C0,
+       COUNTER_FP_3_C1,
+       COUNTER_FP_4_C0,
+       COUNTER_FP_4_C1,
+       COUNTER_FP_5_C0,
+       COUNTER_FP_5_C1,
+       COUNTER_FP_6_C0,
+       COUNTER_FP_6_C1,
+       COUNTER_FP_7_C0,
+       COUNTER_FP_7_C1, /* 32 */
+
+       /*
+        * If more hardware counters are added, the _mali_osk_hw_counter_table
+        * below should also be updated.
+        */
+
+       /* EGL software counters */
+       COUNTER_EGL_BLIT_TIME,
+
+       /* GLES software counters */
+       COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+       COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+       COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+       COUNTER_GLES_DRAW_ARRAYS_CALLS,
+       COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+       COUNTER_GLES_DRAW_POINTS,
+       COUNTER_GLES_DRAW_LINES,
+       COUNTER_GLES_DRAW_LINE_LOOP,
+       COUNTER_GLES_DRAW_LINE_STRIP,
+       COUNTER_GLES_DRAW_TRIANGLES,
+       COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+       COUNTER_GLES_DRAW_TRIANGLE_FAN,
+       COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+       COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+       COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+       COUNTER_GLES_UPLOAD_VBO_TIME,
+       COUNTER_GLES_NUM_FLUSHES,
+       COUNTER_GLES_NUM_VSHADERS_GENERATED,
+       COUNTER_GLES_NUM_FSHADERS_GENERATED,
+       COUNTER_GLES_VSHADER_GEN_TIME,
+       COUNTER_GLES_FSHADER_GEN_TIME,
+       COUNTER_GLES_INPUT_TRIANGLES,
+       COUNTER_GLES_VXCACHE_HIT,
+       COUNTER_GLES_VXCACHE_MISS,
+       COUNTER_GLES_VXCACHE_COLLISION,
+       COUNTER_GLES_CULLED_TRIANGLES,
+       COUNTER_GLES_CULLED_LINES,
+       COUNTER_GLES_BACKFACE_TRIANGLES,
+       COUNTER_GLES_GBCLIP_TRIANGLES,
+       COUNTER_GLES_GBCLIP_LINES,
+       COUNTER_GLES_TRIANGLES_DRAWN,
+       COUNTER_GLES_DRAWCALL_TIME,
+       COUNTER_GLES_TRIANGLES_COUNT,
+       COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+       COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+       COUNTER_GLES_FAN_TRIANGLES_COUNT,
+       COUNTER_GLES_LINES_COUNT,
+       COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+       COUNTER_GLES_STRIP_LINES_COUNT,
+       COUNTER_GLES_LOOP_LINES_COUNT,
+
+       /* Framebuffer capture pseudo-counter */
+       COUNTER_FILMSTRIP,
+
+       NUMBER_OF_EVENTS
 } _mali_osk_counter_id;
 
 #define FIRST_ACTIVITY_EVENT    ACTIVITY_VP_0
@@ -138,21 +137,19 @@ typedef enum
 /**
  * Structure to pass performance counter data of a Mali core
  */
-typedef struct _mali_profiling_core_counters
-{
+struct _mali_profiling_core_counters {
        u32 source0;
        u32 value0;
        u32 source1;
        u32 value1;
-} _mali_profiling_core_counters;
+};
 
 /**
  * Structure to pass performance counter data of Mali L2 cache cores
  */
-typedef struct _mali_profiling_l2_counter_values
-{
+struct _mali_profiling_l2_counter_values {
        struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
-} _mali_profiling_l2_counter_values;
+};
 
 /**
  * Structure to pass data defining Mali instance in use:
@@ -164,15 +161,14 @@ typedef struct _mali_profiling_l2_counter_values
  * num_of_fp_cores - number of fragment processor cores
  * num_of_vp_cores - number of vertex processor cores
  */
-typedef struct _mali_profiling_mali_version
-{
+struct _mali_profiling_mali_version {
        u32 mali_product_id;
        u32 mali_version_major;
        u32 mali_version_minor;
        u32 num_of_l2_cores;
        u32 num_of_fp_cores;
        u32 num_of_vp_cores;
-} _mali_profiling_mali_version;
+};
 
 /*
  * List of possible actions to be controlled by Streamline.
@@ -186,7 +182,7 @@ typedef struct _mali_profiling_mali_version
 
 void _mali_profiling_control(u32 action, u32 value);
 
-u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values);
+u32 _mali_profiling_get_l2_counters(struct _mali_profiling_l2_counter_values *values);
 
 int _mali_profiling_set_event(u32 counter_id, s32 event_id);
 
diff --git a/drivers/gator/mali_midgard.mk b/drivers/gator/mali_midgard.mk
new file mode 100644 (file)
index 0000000..1b784d5
--- /dev/null
@@ -0,0 +1,39 @@
+# Defines for Mali-Midgard driver
+EXTRA_CFLAGS += -DMALI_USE_UMP=1 \
+                -DMALI_LICENSE_IS_GPL=1 \
+                -DMALI_BASE_TRACK_MEMLEAK=0 \
+                -DMALI_DEBUG=0 \
+                -DMALI_ERROR_INJECT_ON=0 \
+                -DMALI_CUSTOMER_RELEASE=1 \
+                -DMALI_UNIT_TEST=0 \
+                -DMALI_BACKEND_KERNEL=1 \
+                -DMALI_NO_MALI=0
+
+DDK_DIR ?= .
+ifneq ($(wildcard $(DDK_DIR)/drivers/gpu/arm/t6xx),)
+KBASE_DIR = $(DDK_DIR)/drivers/gpu/arm/t6xx/kbase
+OSK_DIR = $(DDK_DIR)/drivers/gpu/arm/t6xx/kbase/osk
+endif
+
+ifneq ($(wildcard $(DDK_DIR)/drivers/gpu/arm/midgard),)
+KBASE_DIR = $(DDK_DIR)/drivers/gpu/arm/midgard
+OSK_DIR = $(DDK_DIR)/drivers/gpu/arm/midgard/osk
+EXTRA_CFLAGS += -DMALI_DIR_MIDGARD=1
+endif
+
+ifneq ($(wildcard $(DDK_DIR)/drivers/gpu/arm/midgard/mali_kbase_gator_api.h),)
+EXTRA_CFLAGS += -DMALI_SIMPLE_API=1
+endif
+
+UMP_DIR = $(DDK_DIR)/include/linux
+
+# Include directories in the DDK
+EXTRA_CFLAGS += -I$(KBASE_DIR)/ \
+                -I$(KBASE_DIR)/.. \
+                -I$(OSK_DIR)/.. \
+                -I$(UMP_DIR)/.. \
+                -I$(DDK_DIR)/include \
+                -I$(KBASE_DIR)/osk/src/linux/include \
+                -I$(KBASE_DIR)/platform_dummy \
+                -I$(KBASE_DIR)/src
+
diff --git a/drivers/gator/mali_t6xx.mk b/drivers/gator/mali_t6xx.mk
deleted file mode 100644 (file)
index 059d47a..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-# Defines for Mali-T6xx driver
-EXTRA_CFLAGS += -DMALI_USE_UMP=1 \
-                -DMALI_LICENSE_IS_GPL=1 \
-                -DMALI_BASE_TRACK_MEMLEAK=0 \
-                -DMALI_DEBUG=0 \
-                -DMALI_ERROR_INJECT_ON=0 \
-                -DMALI_CUSTOMER_RELEASE=1 \
-                -DMALI_UNIT_TEST=0 \
-                -DMALI_BACKEND_KERNEL=1 \
-                -DMALI_NO_MALI=0
-
-DDK_DIR ?= .
-ifneq ($(wildcard $(DDK_DIR)/drivers/gpu/arm/t6xx),)
-KBASE_DIR = $(DDK_DIR)/drivers/gpu/arm/t6xx/kbase
-OSK_DIR = $(DDK_DIR)/drivers/gpu/arm/t6xx/kbase/osk
-endif
-
-ifneq ($(wildcard $(DDK_DIR)/drivers/gpu/arm/midgard),)
-KBASE_DIR = $(DDK_DIR)/drivers/gpu/arm/midgard
-OSK_DIR = $(DDK_DIR)/drivers/gpu/arm/midgard/osk
-EXTRA_CFLAGS += -DMALI_DIR_MIDGARD=1
-endif
-
-UMP_DIR = $(DDK_DIR)/include/linux
-
-# Include directories in the DDK
-EXTRA_CFLAGS += -I$(KBASE_DIR)/ \
-                -I$(KBASE_DIR)/.. \
-                -I$(OSK_DIR)/.. \
-                -I$(UMP_DIR)/.. \
-                -I$(DDK_DIR)/include \
-                -I$(KBASE_DIR)/osk/src/linux/include \
-                -I$(KBASE_DIR)/platform_dummy \
-                -I$(KBASE_DIR)/src
-
index f60fd7bd11839f0511bf2a436f1ebc79330f27c4..96f874a508e26f68f895572e607eea2c57d09f24 100644 (file)
@@ -100,7 +100,7 @@ static int ast_detect_chip(struct drm_device *dev)
                        }
                        ast->vga2_clone = false;
                } else {
-                       ast->chip = 2000;
+                       ast->chip = AST2000;
                        DRM_INFO("AST 2000 detected\n");
                }
        }
index 7fc9f7272b56e7e9ebe846b650e9e2f18f28cc5f..e8f6418b6dec9984f33eb9e9350d53255d3ff2ef 100644 (file)
@@ -1012,8 +1012,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
                        srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
                        data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
                        data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
-                       data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
-                       data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
+                       data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
+                       data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
 
                        writel(data32.ul, dstxor);
                        csum += data32.ul;
index 95070b2124c6b8ccd0db297d990e7c5958561c0b..49acec1550460e4da1ed687990281b07dacca4ae 100644 (file)
@@ -657,7 +657,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
        DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
 }
 
-static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
 {
        DRM_DEBUG_KMS("Falling back to manually reading VBT from "
                      "VBIOS ROM for %s\n",
index 4a809969c5ac58bfda721fe1a9ec75725ea1391b..53435a9d847e0e5e653ec225ee484329e6c640bd 100644 (file)
@@ -702,7 +702,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
        .destroy = intel_encoder_destroy,
 };
 
-static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
 {
        DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
        return 1;
index f77d42f74427dc7ed8462cabc6fde1ac9723f73f..08e8e18b3f85597371d110900ab122d2d3062347 100644 (file)
@@ -694,7 +694,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
        .destroy = intel_encoder_destroy,
 };
 
-static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
 {
        DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
        return 1;
index 629527d205de88dbe1463b9a127987ceafde9dc1..4605c3877c955cc7374583df3b37784e1cd34056 100644 (file)
@@ -396,6 +396,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                }
        }
 
+       /* Enforce ordering by reading HEAD register back */
+       I915_READ_HEAD(ring);
+
        /* Initialize the ring. This must happen _after_ we've cleared the ring
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
index a202d8d08c56a792f888426659b6fe8ea7ed5673..7c4e3126df277421e6c8bc92bc39c86565143cc4 100644 (file)
@@ -856,6 +856,10 @@ intel_enable_tv(struct intel_encoder *encoder)
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       /* Prevents vblank waits from timing out in intel_tv_detect_type() */
+       intel_wait_for_vblank(encoder->base.dev,
+                             to_intel_crtc(encoder->base.crtc)->pipe);
+
        I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
 }
 
index 2d9b9d7a7992111cc1d4bd0ff3e60254bf52dcb5..f3edd2841f2df42a38db7d13c63c71d5bb122658 100644 (file)
@@ -124,6 +124,7 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
               struct dcb_output *outp)
 {
        u16 dcb = dcb_outp(bios, idx, ver, len);
+       memset(outp, 0x00, sizeof(*outp));
        if (dcb) {
                if (*ver >= 0x20) {
                        u32 conn = nv_ro32(bios, dcb + 0x00);
index 9b794c933c811dde3289f760be63bf1e026cd71c..b5df614660a8f1304d9cbaa0379e4d25160de416 100644 (file)
@@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
 static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
                struct page **pages, uint32_t npages, uint32_t roll)
 {
-       dma_addr_t pat_pa = 0;
+       dma_addr_t pat_pa = 0, data_pa = 0;
        uint32_t *data;
        struct pat *pat;
        struct refill_engine *engine = txn->engine_handle;
@@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
                        .lut_id = engine->tcm->lut_id,
                };
 
-       data = alloc_dma(txn, 4*i, &pat->data_pa);
+       data = alloc_dma(txn, 4*i, &data_pa);
+       /* FIXME: what if data_pa is more than 32-bit ? */
+       pat->data_pa = data_pa;
 
        while (i--) {
                int n = i + roll;
index ebbdf4132e9cb2175fabd6422ac17365eaa10d4a..2272c66f1842327b70a1c16efbcef01251ab8b08 100644 (file)
@@ -806,7 +806,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
                        omap_obj->paddr = tiler_ssptr(block);
                        omap_obj->block = block;
 
-                       DBG("got paddr: %08x", omap_obj->paddr);
+                       DBG("got paddr: %pad", &omap_obj->paddr);
                }
 
                omap_obj->paddr_cnt++;
@@ -1004,9 +1004,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
        if (obj->map_list.map)
                off = (uint64_t)obj->map_list.hash.key;
 
-       seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+       seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
                        omap_obj->flags, obj->name, obj->refcount.refcount.counter,
-                       off, omap_obj->paddr, omap_obj->paddr_cnt,
+                       off, &omap_obj->paddr, omap_obj->paddr_cnt,
                        omap_obj->vaddr, omap_obj->roll);
 
        if (omap_obj->flags & OMAP_BO_TILED) {
@@ -1489,8 +1489,8 @@ void omap_gem_init(struct drm_device *dev)
                        entry->paddr = tiler_ssptr(block);
                        entry->block = block;
 
-                       DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
-                                       entry->paddr,
+                       DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
+                                       &entry->paddr,
                                        usergart[i].stride_pfn << PAGE_SHIFT);
                }
        }
index 8d225d7ff4e300319211fd8ec80eeccfb4e8fdc2..6d01c2ad842869132ca50c9c56b40ae0a0b37224 100644 (file)
@@ -146,8 +146,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
        DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
                        info->out_width, info->out_height,
                        info->screen_width);
-       DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
-                       info->paddr, info->p_uv_addr);
+       DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
+                       &info->paddr, &info->p_uv_addr);
 
        /* TODO: */
        ilace = false;
index 21393dc4700a09697b7551d274b18a96ff5938ab..f4b6b89b98f3ee18fb14ed9887adc4c7aa21a1c4 100644 (file)
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
 
        pending = xchg(&qdev->ram_header->int_pending, 0);
 
+       if (!pending)
+               return IRQ_NONE;
+
        atomic_inc(&qdev->irq_received);
 
        if (pending & QXL_INTERRUPT_DISPLAY) {
index 5802d74863543866176346c09e9929129b41690d..1b564d7e419138d18f7a9c4ff6534de8fd9a3de8 100644 (file)
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
        struct backlight_properties props;
        struct radeon_backlight_privdata *pdata;
        struct radeon_encoder_atom_dig *dig;
-       u8 backlight_level;
        char bl_name[16];
 
        /* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
 
        pdata->encoder = radeon_encoder;
 
-       backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
-
        dig = radeon_encoder->enc_priv;
        dig->bl_dev = bd;
 
        bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+       /* Set a reasonable default here if the level is 0 otherwise
+        * fbdev will attempt to turn the backlight on after console
+        * unblanking and it will try and restore 0 which turns the backlight
+        * off again.
+        */
+       if (bd->props.brightness == 0)
+               bd->props.brightness = RADEON_MAX_BL_LEVEL;
        bd->props.power = FB_BLANK_UNBLANK;
        backlight_update_status(bd);
 
index e62a9ce3e4dc51b4fd840f54b2bfdbdc07c804d2..ead08a49bec0fc16962ba3cc0ece2f593395048f 100644 (file)
@@ -2379,6 +2379,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                        tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
                                        WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
                        } else {
                                tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
index efb06e34aed732ca8cece5e3475a31b38c4ddf30..ba2ab9a9b9885a90dcceba9deb5a71c64e007a9e 100644 (file)
@@ -463,6 +463,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                }
        }
 
+       /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+       if ((dev->pdev->device == 0x9805) &&
+           (dev->pdev->subsystem_vendor == 0x1734) &&
+           (dev->pdev->subsystem_device == 0x11bd)) {
+               if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+                       return false;
+       }
 
        return true;
 }
@@ -1908,7 +1915,7 @@ static const char *thermal_controller_names[] = {
        "adm1032",
        "adm1030",
        "max6649",
-       "lm64",
+       "lm63", /* lm64 */
        "f75375",
        "asc7xxx",
 };
@@ -1919,7 +1926,7 @@ static const char *pp_lib_thermal_controller_names[] = {
        "adm1032",
        "adm1030",
        "max6649",
-       "lm64",
+       "lm63", /* lm64 */
        "f75375",
        "RV6xx",
        "RV770",
index 06ccfe477650926ae67743eff95dc1f2efa71675..a84de32a91f57dbcddfeeefaf60310dad02c29e9 100644 (file)
@@ -688,6 +688,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
        struct radeon_device *rdev = dev->dev_private;
        int ret = 0;
 
+       /* don't leak the edid if we already fetched it in detect() */
+       if (radeon_connector->edid)
+               goto got_edid;
+
        /* on hw with routers, select right port */
        if (radeon_connector->router.ddc_valid)
                radeon_router_select_ddc_port(radeon_connector);
@@ -727,6 +731,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
                        radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
        }
        if (radeon_connector->edid) {
+got_edid:
                drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
                ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
                drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
index 2b5461bcd9fb9da8fcaa0a980e1a32f9ef4161aa..f5ddd35507965598818af3a28a2d7899cec877e0 100644 (file)
@@ -78,6 +78,7 @@ static int modeset_init(struct drm_device *dev)
        if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
                /* oh nos! */
                dev_err(dev->dev, "no encoders/connectors found\n");
+               drm_mode_config_cleanup(dev);
                return -ENXIO;
        }
 
@@ -116,6 +117,7 @@ static int tilcdc_unload(struct drm_device *dev)
        struct tilcdc_drm_private *priv = dev->dev_private;
        struct tilcdc_module *mod, *cur;
 
+       drm_fbdev_cma_fini(priv->fbdev);
        drm_kms_helper_poll_fini(dev);
        drm_mode_config_cleanup(dev);
        drm_vblank_cleanup(dev);
@@ -169,33 +171,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
        dev->dev_private = priv;
 
        priv->wq = alloc_ordered_workqueue("tilcdc", 0);
+       if (!priv->wq) {
+               ret = -ENOMEM;
+               goto fail_free_priv;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(dev->dev, "failed to get memory resource\n");
                ret = -EINVAL;
-               goto fail;
+               goto fail_free_wq;
        }
 
        priv->mmio = ioremap_nocache(res->start, resource_size(res));
        if (!priv->mmio) {
                dev_err(dev->dev, "failed to ioremap\n");
                ret = -ENOMEM;
-               goto fail;
+               goto fail_free_wq;
        }
 
        priv->clk = clk_get(dev->dev, "fck");
        if (IS_ERR(priv->clk)) {
                dev_err(dev->dev, "failed to get functional clock\n");
                ret = -ENODEV;
-               goto fail;
+               goto fail_iounmap;
        }
 
        priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
        if (IS_ERR(priv->clk)) {
                dev_err(dev->dev, "failed to get display clock\n");
                ret = -ENODEV;
-               goto fail;
+               goto fail_put_clk;
        }
 
 #ifdef CONFIG_CPU_FREQ
@@ -205,7 +211,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
                        CPUFREQ_TRANSITION_NOTIFIER);
        if (ret) {
                dev_err(dev->dev, "failed to register cpufreq notifier\n");
-               goto fail;
+               goto fail_put_disp_clk;
        }
 #endif
 
@@ -237,13 +243,13 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
        ret = modeset_init(dev);
        if (ret < 0) {
                dev_err(dev->dev, "failed to initialize mode setting\n");
-               goto fail;
+               goto fail_cpufreq_unregister;
        }
 
        ret = drm_vblank_init(dev, 1);
        if (ret < 0) {
                dev_err(dev->dev, "failed to initialize vblank\n");
-               goto fail;
+               goto fail_mode_config_cleanup;
        }
 
        pm_runtime_get_sync(dev->dev);
@@ -251,7 +257,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
        pm_runtime_put_sync(dev->dev);
        if (ret < 0) {
                dev_err(dev->dev, "failed to install IRQ handler\n");
-               goto fail;
+               goto fail_vblank_cleanup;
        }
 
        platform_set_drvdata(pdev, dev);
@@ -259,13 +265,48 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
        priv->fbdev = drm_fbdev_cma_init(dev, 16,
                        dev->mode_config.num_crtc,
                        dev->mode_config.num_connector);
+       if (IS_ERR(priv->fbdev)) {
+               ret = PTR_ERR(priv->fbdev);
+               goto fail_irq_uninstall;
+       }
 
        drm_kms_helper_poll_init(dev);
 
        return 0;
 
-fail:
-       tilcdc_unload(dev);
+fail_irq_uninstall:
+       pm_runtime_get_sync(dev->dev);
+       drm_irq_uninstall(dev);
+       pm_runtime_put_sync(dev->dev);
+
+fail_vblank_cleanup:
+       drm_vblank_cleanup(dev);
+
+fail_mode_config_cleanup:
+       drm_mode_config_cleanup(dev);
+
+fail_cpufreq_unregister:
+       pm_runtime_disable(dev->dev);
+#ifdef CONFIG_CPU_FREQ
+       cpufreq_unregister_notifier(&priv->freq_transition,
+                       CPUFREQ_TRANSITION_NOTIFIER);
+fail_put_disp_clk:
+       clk_put(priv->disp_clk);
+#endif
+
+fail_put_clk:
+       clk_put(priv->clk);
+
+fail_iounmap:
+       iounmap(priv->mmio);
+
+fail_free_wq:
+       flush_workqueue(priv->wq);
+       destroy_workqueue(priv->wq);
+
+fail_free_priv:
+       dev->dev_private = NULL;
+       kfree(priv);
        return ret;
 }
 
@@ -596,10 +637,10 @@ static int __init tilcdc_drm_init(void)
 static void __exit tilcdc_drm_fini(void)
 {
        DBG("fini");
-       tilcdc_tfp410_fini();
-       tilcdc_slave_fini();
-       tilcdc_panel_fini();
        platform_driver_unregister(&tilcdc_platform_driver);
+       tilcdc_panel_fini();
+       tilcdc_slave_fini();
+       tilcdc_tfp410_fini();
 }
 
 late_initcall(tilcdc_drm_init);
index 09176654fddb9ccc2aba3a697ec58192516c6245..779d508616d3085882ed63e46fb1e50f38090b56 100644 (file)
@@ -151,6 +151,7 @@ struct panel_connector {
 static void panel_connector_destroy(struct drm_connector *connector)
 {
        struct panel_connector *panel_connector = to_panel_connector(connector);
+       drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(panel_connector);
 }
@@ -285,10 +286,8 @@ static void panel_destroy(struct tilcdc_module *mod)
 {
        struct panel_module *panel_mod = to_panel_module(mod);
 
-       if (panel_mod->timings) {
+       if (panel_mod->timings)
                display_timings_release(panel_mod->timings);
-               kfree(panel_mod->timings);
-       }
 
        tilcdc_module_cleanup(mod);
        kfree(panel_mod->info);
index db1d2fc9dfb51dbdb7e066fcab4feee2cc6fcb7c..5d6c597a5d69988dd5306eb830aede3de89db756 100644 (file)
@@ -142,6 +142,7 @@ struct slave_connector {
 static void slave_connector_destroy(struct drm_connector *connector)
 {
        struct slave_connector *slave_connector = to_slave_connector(connector);
+       drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(slave_connector);
 }
index a36788fbcd98416d37781fe20cf76999b977656a..986131dd9f471ec78a4c2e6e726197e14df6c68b 100644 (file)
@@ -168,6 +168,7 @@ struct tfp410_connector {
 static void tfp410_connector_destroy(struct drm_connector *connector)
 {
        struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+       drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(tfp410_connector);
 }
index b8b394319b45947facd37036817b73ac39a2661c..de1a753b1d563549a652987872a01d995c1782e3 100644 (file)
@@ -1006,9 +1006,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
 static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
                                  struct shrink_control *sc)
 {
-       static atomic_t start_pool = ATOMIC_INIT(0);
+       static unsigned start_pool;
        unsigned idx = 0;
-       unsigned pool_offset = atomic_add_return(1, &start_pool);
+       unsigned pool_offset;
        unsigned shrink_pages = sc->nr_to_scan;
        struct device_pools *p;
 
@@ -1016,7 +1016,9 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
                return 0;
 
        mutex_lock(&_manager->lock);
-       pool_offset = pool_offset % _manager->npools;
+       if (!_manager->npools)
+               goto out;
+       pool_offset = ++start_pool % _manager->npools;
        list_for_each_entry(p, &_manager->pools, pools) {
                unsigned nr_free;
 
@@ -1033,6 +1035,7 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
                         p->pool->dev_name, p->pool->name, current->pid,
                         nr_free, shrink_pages);
        }
+out:
        mutex_unlock(&_manager->lock);
        /* return estimated number of unused pages in pool */
        return ttm_dma_pool_get_num_unused_pages();
index 3eb148667d6382f003969757db0b9dd26555f909..89664933861fd27d4bda5bc963eb6f2f3a369dc7 100644 (file)
@@ -163,8 +163,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 
        mutex_lock(&dev_priv->hw_mutex);
 
+       vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
        while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
-               vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+               ;
 
        dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 
index d72b0c46acf82be5454f4ee3ef4bc35f9460e7d6..1b8a10f88606535b1da2bbdd115ac455ca05e18f 100755 (executable)
 #include <linux/err.h>
 #include <linux/hrtimer.h>
 #include <linux/switch.h>
-#include <linux/input.h>
 #include <linux/debugfs.h>
 #include <linux/wakelock.h>
-#include <asm/gpio.h>
+#include <linux/gpio.h>
 #include <asm/atomic.h>
-#include <asm/mach-types.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
 #include <linux/spi/spi.h>
index cb119058670081246925ab6dfe53156a43dfc158..4f8d53555f75634ce8354e6402d1c20c6a642e3d 100755 (executable)
@@ -26,7 +26,6 @@
 #include <linux/err.h>
 #include <linux/hrtimer.h>
 #include <linux/switch.h>
-#include <linux/input.h>
 #include <linux/debugfs.h>
 #include <linux/wakelock.h>
 #include <linux/pm.h>
 #include <linux/iio/consumer.h>
 #include <linux/adc.h>
 #include <linux/wakelock.h>
+#include <linux/gpio.h>
 
-#include <asm/gpio.h>
 #include <asm/atomic.h>
-#include <asm/mach-types.h>
 
 #include "rk_headset.h"
 
index 1bdcccc54a1dda0e04d16fc9fbfe2d3d8e1e22b2..f745d2c1325ec8872a376e668cdd71b3c9c523f3 100644 (file)
@@ -28,7 +28,7 @@
 static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
+       if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
                hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
                rdesc[11] = rdesc[16] = 0xff;
                rdesc[12] = rdesc[17] = 0x03;
index 6af90dbdc3d45e1295db182588c6a8bbae2978ca..843f2dd55200a4c92d53652d84472800f2f4f35f 100644 (file)
@@ -280,7 +280,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                 *   - change the button usage range to 4-7 for the extra
                 *     buttons
                 */
-               if (*rsize >= 74 &&
+               if (*rsize >= 75 &&
                        rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
                        rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
                        rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
index 06eb45fa6331fee64152754a5ab9bb2275e3a61c..12fc48c968e6964282be69e6939e2f58d19fe0ba 100644 (file)
@@ -345,14 +345,14 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        struct usb_device_descriptor *udesc;
        __u16 bcdDevice, rev_maj, rev_min;
 
-       if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
+       if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 &&
                        rdesc[84] == 0x8c && rdesc[85] == 0x02) {
                hid_info(hdev,
                         "fixing up Logitech keyboard report descriptor\n");
                rdesc[84] = rdesc[89] = 0x4d;
                rdesc[85] = rdesc[90] = 0x10;
        }
-       if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
+       if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 &&
                        rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
                        rdesc[49] == 0x81 && rdesc[50] == 0x06) {
                hid_info(hdev,
index 1be9156a395055375f76e2688371ae20fee04e9b..d4c6d9f85ca517816141366453da03fb5ae70ff5 100644 (file)
@@ -237,13 +237,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
                return;
        }
 
-       if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
-           (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
-               dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
-                       __func__, dj_report->device_index);
-               return;
-       }
-
        if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
                /* The device is already known. No need to reallocate it. */
                dbg_hid("%s: device is already known\n", __func__);
@@ -686,7 +679,6 @@ static int logi_dj_raw_event(struct hid_device *hdev,
        struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
        struct dj_report *dj_report = (struct dj_report *) data;
        unsigned long flags;
-       bool report_processed = false;
 
        dbg_hid("%s, size:%d\n", __func__, size);
 
@@ -714,27 +706,41 @@ static int logi_dj_raw_event(struct hid_device *hdev,
         * anything else with it.
         */
 
+       /* case 1) */
+       if (data[0] != REPORT_ID_DJ_SHORT)
+               return false;
+
+       if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+           (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+               /*
+                * Device index is wrong, bail out.
+                * This driver can ignore safely the receiver notifications,
+                * so ignore those reports too.
+                */
+               if (dj_report->device_index != DJ_RECEIVER_INDEX)
+                       dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+                               __func__, dj_report->device_index);
+               return false;
+       }
+
        spin_lock_irqsave(&djrcv_dev->lock, flags);
-       if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
-               switch (dj_report->report_type) {
-               case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
-               case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
-                       logi_dj_recv_queue_notification(djrcv_dev, dj_report);
-                       break;
-               case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
-                       if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
-                           STATUS_LINKLOSS) {
-                               logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
-                       }
-                       break;
-               default:
-                       logi_dj_recv_forward_report(djrcv_dev, dj_report);
+       switch (dj_report->report_type) {
+       case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+       case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+               logi_dj_recv_queue_notification(djrcv_dev, dj_report);
+               break;
+       case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
+               if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
+                   STATUS_LINKLOSS) {
+                       logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
                }
-               report_processed = true;
+               break;
+       default:
+               logi_dj_recv_forward_report(djrcv_dev, dj_report);
        }
        spin_unlock_irqrestore(&djrcv_dev->lock, flags);
 
-       return report_processed;
+       return true;
 }
 
 static int logi_dj_probe(struct hid_device *hdev,
index 4a4000340ce1ed8cf6be1f81ee8e0f21d8ba5f5a..daeb0aa4bee99a60f3391c0ab4a56803b37a2568 100644 (file)
@@ -27,6 +27,7 @@
 
 #define DJ_MAX_PAIRED_DEVICES                  6
 #define DJ_MAX_NUMBER_NOTIFICATIONS            8
+#define DJ_RECEIVER_INDEX                      0
 #define DJ_DEVICE_INDEX_MIN                    1
 #define DJ_DEVICE_INDEX_MAX                    6
 
index 5bc37343eb22b3de7f3cca6163c6f1fd9a10dd4b..c24f3dfd9367e94034c36065a3c6520c7d111193 100644 (file)
@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
                if (size < 4 || ((size - 4) % 9) != 0)
                        return 0;
                npoints = (size - 4) / 9;
+               if (npoints > 15) {
+                       hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
+                                       size);
+                       return 0;
+               }
                msc->ntouches = 0;
                for (ii = 0; ii < npoints; ii++)
                        magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
                if (size < 6 || ((size - 6) % 8) != 0)
                        return 0;
                npoints = (size - 6) / 8;
+               if (npoints > 15) {
+                       hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
+                                       size);
+                       return 0;
+               }
                msc->ntouches = 0;
                for (ii = 0; ii < npoints; ii++)
                        magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
index 9e14c00eb1b6bb105ffd1326dc802183cfeb9a1a..25daf28b26bdf6b4d921e501bb49d4646e9aed55 100644 (file)
@@ -24,7 +24,7 @@
 static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
+       if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
                hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
                rdesc[30] = 0x0c;
        }
index 736b2502df4f8b00473889f6abf0ac78dcf72fe3..6aca4f2554bf4d748df6fc629276704e740ea40e 100644 (file)
@@ -25,7 +25,7 @@
 static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
+       if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
                        rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
                        rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
                hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
index acbb021065ece8287c9d3ea433c860afc0711855..020df3c2e8b42717c62bbe0470aa47845535e4a5 100644 (file)
@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
        if (!data)
                return 1;
 
+       if (size > 64) {
+               hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
+                               size);
+               return 0;
+       }
+
        if (report->id == REPORT_KEY_STATE) {
                if (data->input_keys)
                        ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
index 87fc91e1c8de4980d2f8e8721f476b6d21959adf..91072fa54663e747908dd09bb11b431eaa4208c5 100644 (file)
@@ -24,7 +24,7 @@
 static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
+       if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
                        rdesc[106] == 0x03) {
                hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
                rdesc[105] = rdesc[110] = 0x03;
index 0b122f8c7005b75262d88fd0c017e8b3e0faa9aa..92f34de7aee93f78ec582996dd281533059b82f8 100644 (file)
@@ -199,8 +199,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
        ret = vmbus_post_msg(open_msg,
                               sizeof(struct vmbus_channel_open_channel));
 
-       if (ret != 0)
+       if (ret != 0) {
+               err = ret;
                goto error1;
+       }
 
        t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
        if (t == 0) {
@@ -392,7 +394,6 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
        u32 next_gpadl_handle;
        unsigned long flags;
        int ret = 0;
-       int t;
 
        next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
        atomic_inc(&vmbus_connection.next_gpadl_handle);
@@ -439,9 +440,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
 
                }
        }
-       t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
-       BUG_ON(t == 0);
-
+       wait_for_completion(&msginfo->waitevent);
 
        /* At this point, we received the gpadl created msg */
        *gpadl_handle = gpadlmsg->gpadl;
@@ -464,7 +463,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
        struct vmbus_channel_gpadl_teardown *msg;
        struct vmbus_channel_msginfo *info;
        unsigned long flags;
-       int ret, t;
+       int ret;
 
        info = kmalloc(sizeof(*info) +
                       sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
@@ -486,11 +485,12 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
        ret = vmbus_post_msg(msg,
                               sizeof(struct vmbus_channel_gpadl_teardown));
 
-       BUG_ON(ret != 0);
-       t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
-       BUG_ON(t == 0);
+       if (ret)
+               goto post_msg_err;
+
+       wait_for_completion(&info->waitevent);
 
-       /* Received a torndown response */
+post_msg_err:
        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
        list_del(&info->msglistentry);
        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
index b9f5d295cbec7d69687e699232b180e746310a8d..a3b5558087683360559d6c4c59f65b0985e06790 100644 (file)
@@ -393,10 +393,21 @@ int vmbus_post_msg(void *buffer, size_t buflen)
         * insufficient resources. Retry the operation a couple of
         * times before giving up.
         */
-       while (retries < 3) {
-               ret =  hv_post_message(conn_id, 1, buffer, buflen);
-               if (ret != HV_STATUS_INSUFFICIENT_BUFFERS)
+       while (retries < 10) {
+               ret = hv_post_message(conn_id, 1, buffer, buflen);
+
+               switch (ret) {
+               case HV_STATUS_INSUFFICIENT_BUFFERS:
+                       ret = -ENOMEM;
+               case -ENOMEM:
+                       break;
+               case HV_STATUS_SUCCESS:
                        return ret;
+               default:
+                       pr_err("hv_post_msg() failed; error code:%d\n", ret);
+                       return -EINVAL;
+               }
+
                retries++;
                msleep(100);
        }
index ed50e9e83c61a8ec8690c757876a0300096f59d4..0e8c1ea4dd5335a7440492638ea311e9ae83d7dc 100644 (file)
@@ -111,6 +111,15 @@ kvp_work_func(struct work_struct *dummy)
        kvp_respond_to_host(NULL, HV_E_FAIL);
 }
 
+static void poll_channel(struct vmbus_channel *channel)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&channel->inbound_lock, flags);
+       hv_kvp_onchannelcallback(channel);
+       spin_unlock_irqrestore(&channel->inbound_lock, flags);
+}
+
 static int kvp_handle_handshake(struct hv_kvp_msg *msg)
 {
        int ret = 1;
@@ -139,7 +148,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
                kvp_register(dm_reg_value);
                kvp_transaction.active = false;
                if (kvp_transaction.kvp_context)
-                       hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
+                       poll_channel(kvp_transaction.kvp_context);
        }
        return ret;
 }
@@ -552,6 +561,7 @@ response_done:
 
        vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
                                VM_PKT_DATA_INBAND, 0);
+       poll_channel(channel);
 
 }
 
@@ -585,7 +595,7 @@ void hv_kvp_onchannelcallback(void *context)
                return;
        }
 
-       vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+       vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
                         &requestid);
 
        if (recvlen > 0) {
index 2f561c5dfe249085c89e6328b390c0d414e332ce..64c778f7756f8fce2554ef552788ae926993ca51 100644 (file)
@@ -279,7 +279,7 @@ static int util_probe(struct hv_device *dev,
                (struct hv_util_service *)dev_id->driver_data;
        int ret;
 
-       srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
+       srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
        if (!srv->recv_buffer)
                return -ENOMEM;
        if (srv->util_init) {
index 2798246ad81470988fadd920dee5e9282f463445..3930a7e7a56d538ee57ba3e72a65e46b5779de3b 100644 (file)
@@ -184,7 +184,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
                }
 
                channel = be32_to_cpup(property);
-               if (channel > ADS1015_CHANNELS) {
+               if (channel >= ADS1015_CHANNELS) {
                        dev_err(&client->dev,
                                "invalid channel index %d on %s\n",
                                channel, node->full_name);
@@ -198,6 +198,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
                                dev_err(&client->dev,
                                        "invalid gain on %s\n",
                                        node->full_name);
+                               return -EINVAL;
                        }
                }
 
@@ -208,6 +209,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
                                dev_err(&client->dev,
                                        "invalid data_rate on %s\n",
                                        node->full_name);
+                               return -EINVAL;
                        }
                }
 
index 58637355c1f66c1ed876f9363ab84d7ae989def7..79610bdf1d352b08db9b49e80607dba566a505db 100644 (file)
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->temp_max[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->pwm_tmin[attr->index] = temp;
index 960fac3fb16648ce327eabf2d84018e42c871cb0..48044b044b7a673fcb305b04d64ee7413d4e069a 100644 (file)
@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
                                      struct device_attribute *devattr,
                                      char *buf)
 {
-       return sprintf(buf, "da9052-hwmon\n");
+       return sprintf(buf, "da9052\n");
 }
 
 static ssize_t show_label(struct device *dev,
index 029ecabc4380dddae08fc62fb15d67c111caaaf2..1b275a2881d6749b18a9e57bd7272de5e1d2e889 100644 (file)
@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
                                      struct device_attribute *devattr,
                                      char *buf)
 {
-       return sprintf(buf, "da9055-hwmon\n");
+       return sprintf(buf, "da9055\n");
 }
 
 static ssize_t show_label(struct device *dev,
index 4ae3fff13f4498dbef26281679cfc6b9dc0bfe48..bea0a344fab57b4f39b855d2997256b5d1e01b93 100644 (file)
@@ -247,8 +247,8 @@ struct dme1737_data {
        u8  pwm_acz[3];
        u8  pwm_freq[6];
        u8  pwm_rr[2];
-       u8  zone_low[3];
-       u8  zone_abs[3];
+       s8  zone_low[3];
+       s8  zone_abs[3];
        u8  zone_hyst[2];
        u32 alarms;
 };
@@ -277,7 +277,7 @@ static inline int IN_FROM_REG(int reg, int nominal, int res)
        return (reg * nominal + (3 << (res - 3))) / (3 << (res - 2));
 }
 
-static inline int IN_TO_REG(int val, int nominal)
+static inline int IN_TO_REG(long val, int nominal)
 {
        return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
 }
@@ -293,7 +293,7 @@ static inline int TEMP_FROM_REG(int reg, int res)
        return (reg * 1000) >> (res - 8);
 }
 
-static inline int TEMP_TO_REG(int val)
+static inline int TEMP_TO_REG(long val)
 {
        return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
 }
@@ -308,7 +308,7 @@ static inline int TEMP_RANGE_FROM_REG(int reg)
        return TEMP_RANGE[(reg >> 4) & 0x0f];
 }
 
-static int TEMP_RANGE_TO_REG(int val, int reg)
+static int TEMP_RANGE_TO_REG(long val, int reg)
 {
        int i;
 
@@ -331,7 +331,7 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix)
        return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000;
 }
 
-static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
+static inline int TEMP_HYST_TO_REG(long val, int ix, int reg)
 {
        int hyst = clamp_val((val + 500) / 1000, 0, 15);
 
@@ -347,7 +347,7 @@ static inline int FAN_FROM_REG(int reg, int tpc)
                return (reg == 0 || reg == 0xffff) ? 0 : 90000 * 60 / reg;
 }
 
-static inline int FAN_TO_REG(int val, int tpc)
+static inline int FAN_TO_REG(long val, int tpc)
 {
        if (tpc) {
                return clamp_val(val / tpc, 0, 0xffff);
@@ -379,7 +379,7 @@ static inline int FAN_TYPE_FROM_REG(int reg)
        return (edge > 0) ? 1 << (edge - 1) : 0;
 }
 
-static inline int FAN_TYPE_TO_REG(int val, int reg)
+static inline int FAN_TYPE_TO_REG(long val, int reg)
 {
        int edge = (val == 4) ? 3 : val;
 
@@ -402,7 +402,7 @@ static int FAN_MAX_FROM_REG(int reg)
        return 1000 + i * 500;
 }
 
-static int FAN_MAX_TO_REG(int val)
+static int FAN_MAX_TO_REG(long val)
 {
        int i;
 
@@ -460,7 +460,7 @@ static inline int PWM_ACZ_FROM_REG(int reg)
        return acz[(reg >> 5) & 0x07];
 }
 
-static inline int PWM_ACZ_TO_REG(int val, int reg)
+static inline int PWM_ACZ_TO_REG(long val, int reg)
 {
        int acz = (val == 4) ? 2 : val - 1;
 
@@ -476,7 +476,7 @@ static inline int PWM_FREQ_FROM_REG(int reg)
        return PWM_FREQ[reg & 0x0f];
 }
 
-static int PWM_FREQ_TO_REG(int val, int reg)
+static int PWM_FREQ_TO_REG(long val, int reg)
 {
        int i;
 
@@ -510,7 +510,7 @@ static inline int PWM_RR_FROM_REG(int reg, int ix)
        return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0;
 }
 
-static int PWM_RR_TO_REG(int val, int ix, int reg)
+static int PWM_RR_TO_REG(long val, int ix, int reg)
 {
        int i;
 
@@ -528,7 +528,7 @@ static inline int PWM_RR_EN_FROM_REG(int reg, int ix)
        return PWM_RR_FROM_REG(reg, ix) ? 1 : 0;
 }
 
-static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg)
+static inline int PWM_RR_EN_TO_REG(long val, int ix, int reg)
 {
        int en = (ix == 1) ? 0x80 : 0x08;
 
@@ -1481,13 +1481,16 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
                       const char *buf, size_t count)
 {
        struct dme1737_data *data = dev_get_drvdata(dev);
-       long val;
+       unsigned long val;
        int err;
 
-       err = kstrtol(buf, 10, &val);
+       err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
 
+       if (val > 255)
+               return -EINVAL;
+
        data->vrm = val;
        return count;
 }
index 3104149795c582e8ceac8780aca390059eafc721..ce1d82762ba6aee61a65c9bc422dfd3191a5d0da 100644 (file)
@@ -172,7 +172,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
        return -EINVAL;
 }
 
-static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm)
+static int rpm_to_speed_index(struct gpio_fan_data *fan_data, unsigned long rpm)
 {
        struct gpio_fan_speed *speed = fan_data->speed;
        int i;
index a2f3b4a365e4bbafa17385df4fdca298a7ece0e5..b879427e9a46e601215f1c3542c7d000bb4cd39b 100644 (file)
@@ -108,7 +108,7 @@ static inline int FAN_FROM_REG(u8 val, int div)
  * TEMP: mC (-128C to +127C)
  * REG: 1C/bit, two's complement
  */
-static inline s8 TEMP_TO_REG(int val)
+static inline s8 TEMP_TO_REG(long val)
 {
        int nval = clamp_val(val, -128000, 127000) ;
        return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000;
index 3894c408fda3cedc1230742cc5c0a9a653227cd5..b9d6e7d0ba37c3207a0fe414c9199c93fb2f0111 100644 (file)
@@ -158,7 +158,7 @@ static inline u16 FAN_TO_REG(unsigned long val)
 
 /* Temperature is reported in .001 degC increments */
 #define TEMP_TO_REG(val)       \
-               clamp_val(SCALE(val, 1000, 1), -127, 127)
+               DIV_ROUND_CLOSEST(clamp_val((val), -127000, 127000), 1000)
 #define TEMPEXT_FROM_REG(val, ext)     \
                SCALE(((val) << 4) + (ext), 16, 1000)
 #define TEMP_FROM_REG(val)     ((val) * 1000)
@@ -192,7 +192,7 @@ static const int lm85_range_map[] = {
        13300, 16000, 20000, 26600, 32000, 40000, 53300, 80000
 };
 
-static int RANGE_TO_REG(int range)
+static int RANGE_TO_REG(long range)
 {
        int i;
 
@@ -214,7 +214,7 @@ static const int adm1027_freq_map[8] = { /* 1 Hz */
        11, 15, 22, 29, 35, 44, 59, 88
 };
 
-static int FREQ_TO_REG(const int *map, int freq)
+static int FREQ_TO_REG(const int *map, unsigned long freq)
 {
        int i;
 
@@ -463,6 +463,9 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
        if (err)
                return err;
 
+       if (val > 255)
+               return -EINVAL;
+
        data->vrm = val;
        return count;
 }
index 72a889702f0dc091b895f653ce39a540aa40bf3b..9ec7d2e2542cdb5c4078c8e056aa46e2dd39c95e 100644 (file)
@@ -159,7 +159,7 @@ static inline int TEMP_FROM_REG(s8 val)
 {
        return val * 830 + 52120;
 }
-static inline s8 TEMP_TO_REG(int val)
+static inline s8 TEMP_TO_REG(long val)
 {
        int nval = clamp_val(val, -54120, 157530) ;
        return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830;
index efee4c59239fcff8aa7b675c01cb5b9ac6bab5bd..34b9a601ad078c394513e67a5dccc345c6c02fef 100644 (file)
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
  */
 static inline s8 TEMP_TO_REG(int val)
 {
-       return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
+       return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
 }
 
 static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
+       if (val > 255)
+               return -EINVAL;
 
        data->vrm = val;
        return count;
index 6bb839b688be4f17dd0b435f46bf483e054d2071..09324d0178d5721b8dd5aabd43a02b983c3aa492 100644 (file)
@@ -102,6 +102,7 @@ struct at91_twi_dev {
        unsigned twi_cwgr_reg;
        struct at91_twi_pdata *pdata;
        bool use_dma;
+       bool recv_len_abort;
        struct at91_twi_dma dma;
 };
 
@@ -211,7 +212,7 @@ static void at91_twi_write_data_dma_callback(void *data)
        struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
 
        dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
-                        dev->buf_len, DMA_MEM_TO_DEV);
+                        dev->buf_len, DMA_TO_DEVICE);
 
        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 }
@@ -268,12 +269,24 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
        *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
        --dev->buf_len;
 
+       /* return if aborting, we only needed to read RHR to clear RXRDY*/
+       if (dev->recv_len_abort)
+               return;
+
        /* handle I2C_SMBUS_BLOCK_DATA */
        if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
-               dev->msg->flags &= ~I2C_M_RECV_LEN;
-               dev->buf_len += *dev->buf;
-               dev->msg->len = dev->buf_len + 1;
-               dev_dbg(dev->dev, "received block length %d\n", dev->buf_len);
+               /* ensure length byte is a valid value */
+               if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
+                       dev->msg->flags &= ~I2C_M_RECV_LEN;
+                       dev->buf_len += *dev->buf;
+                       dev->msg->len = dev->buf_len + 1;
+                       dev_dbg(dev->dev, "received block length %d\n",
+                                        dev->buf_len);
+               } else {
+                       /* abort and send the stop by reading one more byte */
+                       dev->recv_len_abort = true;
+                       dev->buf_len = 1;
+               }
        }
 
        /* send stop if second but last byte has been read */
@@ -290,7 +303,7 @@ static void at91_twi_read_data_dma_callback(void *data)
        struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
 
        dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
-                        dev->buf_len, DMA_DEV_TO_MEM);
+                        dev->buf_len, DMA_FROM_DEVICE);
 
        /* The last two bytes have to be read without using dma */
        dev->buf += dev->buf_len - 2;
@@ -422,8 +435,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
                }
        }
 
-       ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
-                                                       dev->adapter.timeout);
+       ret = wait_for_completion_timeout(&dev->cmd_complete,
+                                            dev->adapter.timeout);
        if (ret == 0) {
                dev_err(dev->dev, "controller timed out\n");
                at91_init_twi_bus(dev);
@@ -445,6 +458,12 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
                ret = -EIO;
                goto error;
        }
+       if (dev->recv_len_abort) {
+               dev_err(dev->dev, "invalid smbus block length recvd\n");
+               ret = -EPROTO;
+               goto error;
+       }
+
        dev_dbg(dev->dev, "transfer complete\n");
 
        return 0;
@@ -501,6 +520,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
        dev->buf_len = m_start->len;
        dev->buf = m_start->buf;
        dev->msg = m_start;
+       dev->recv_len_abort = false;
 
        ret = at91_do_twi_transfer(dev);
 
index f0d6335ae08760d418f56ced0eaec227772fa1ce..05d2733ef48cbb1e059e9dd7c2ce80e8f0225de5 100644 (file)
@@ -477,7 +477,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
                goto error_free_irq;
 
        /* select default trigger */
-       indio_dev->trig = sigma_delta->trig;
+       indio_dev->trig = iio_trigger_get(sigma_delta->trig);
 
        return 0;
 
index 8fc3a97eb266e6302367653248017c3b6078a021..8d8ca6f1e16a5d602b182355fc1f051738dc48c2 100644 (file)
@@ -49,7 +49,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
                goto iio_trigger_register_error;
        }
-       indio_dev->trig = sdata->trig;
+       indio_dev->trig = iio_trigger_get(sdata->trig);
 
        return 0;
 
index 6c43af9bb0a4474097f2def7ec15d973a7238b47..14917fae2d9d192a61fb1de346e3122488fd3c2d 100644 (file)
@@ -135,7 +135,7 @@ int itg3200_probe_trigger(struct iio_dev *indio_dev)
                goto error_free_irq;
 
        /* select default trigger */
-       indio_dev->trig = st->trig;
+       indio_dev->trig = iio_trigger_get(st->trig);
 
        return 0;
 
index 03b9372c1212a99c5a6a2452cc081f7395a259ca..926fccea8de0233bc955da2ea7c5dc903098f76f 100644 (file)
@@ -135,7 +135,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
        ret = iio_trigger_register(st->trig);
        if (ret)
                goto error_free_irq;
-       indio_dev->trig = st->trig;
+       indio_dev->trig = iio_trigger_get(st->trig);
 
        return 0;
 
index d344cf3ac9e3f0d3c14dff4831935759a5962623..e13c5f4b12cb01b7db1aa3aac0638d28bfb595f8 100644 (file)
@@ -849,7 +849,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
 
        /* Now we have the two masks, work from least sig and build up sizes */
        for_each_set_bit(out_ind,
-                        indio_dev->active_scan_mask,
+                        buffer->scan_mask,
                         indio_dev->masklength) {
                in_ind = find_next_bit(indio_dev->active_scan_mask,
                                       indio_dev->masklength,
index 1e8e94d4db7dec54e620f5ce822b1dd0b71293cc..4fc88e617acfd6c7e2d42e804936b8ddf7d1de5c 100644 (file)
@@ -178,7 +178,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
                        index = of_property_match_string(np, "io-channel-names",
                                                         name);
                chan = of_iio_channel_get(np, index);
-               if (!IS_ERR(chan))
+               if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
                        break;
                else if (name && index >= 0) {
                        pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
index 16f0d6df239f0be75e64c388b38b3760ace5d901..3ce3769c08238e1725a544f23711e9d1de76c381 100644 (file)
@@ -40,7 +40,8 @@
 #define ST_MAGN_FS_AVL_5600MG                  5600
 #define ST_MAGN_FS_AVL_8000MG                  8000
 #define ST_MAGN_FS_AVL_8100MG                  8100
-#define ST_MAGN_FS_AVL_10000MG                 10000
+#define ST_MAGN_FS_AVL_12000MG                 12000
+#define ST_MAGN_FS_AVL_16000MG                 16000
 
 /* CUSTOM VALUES FOR SENSOR 1 */
 #define ST_MAGN_1_WAI_EXP                      0x3c
 #define ST_MAGN_1_FS_AVL_4700_VAL              0x05
 #define ST_MAGN_1_FS_AVL_5600_VAL              0x06
 #define ST_MAGN_1_FS_AVL_8100_VAL              0x07
-#define ST_MAGN_1_FS_AVL_1300_GAIN_XY          1100
-#define ST_MAGN_1_FS_AVL_1900_GAIN_XY          855
-#define ST_MAGN_1_FS_AVL_2500_GAIN_XY          670
-#define ST_MAGN_1_FS_AVL_4000_GAIN_XY          450
-#define ST_MAGN_1_FS_AVL_4700_GAIN_XY          400
-#define ST_MAGN_1_FS_AVL_5600_GAIN_XY          330
-#define ST_MAGN_1_FS_AVL_8100_GAIN_XY          230
-#define ST_MAGN_1_FS_AVL_1300_GAIN_Z           980
-#define ST_MAGN_1_FS_AVL_1900_GAIN_Z           760
-#define ST_MAGN_1_FS_AVL_2500_GAIN_Z           600
-#define ST_MAGN_1_FS_AVL_4000_GAIN_Z           400
-#define ST_MAGN_1_FS_AVL_4700_GAIN_Z           355
-#define ST_MAGN_1_FS_AVL_5600_GAIN_Z           295
-#define ST_MAGN_1_FS_AVL_8100_GAIN_Z           205
+#define ST_MAGN_1_FS_AVL_1300_GAIN_XY          909
+#define ST_MAGN_1_FS_AVL_1900_GAIN_XY          1169
+#define ST_MAGN_1_FS_AVL_2500_GAIN_XY          1492
+#define ST_MAGN_1_FS_AVL_4000_GAIN_XY          2222
+#define ST_MAGN_1_FS_AVL_4700_GAIN_XY          2500
+#define ST_MAGN_1_FS_AVL_5600_GAIN_XY          3030
+#define ST_MAGN_1_FS_AVL_8100_GAIN_XY          4347
+#define ST_MAGN_1_FS_AVL_1300_GAIN_Z           1020
+#define ST_MAGN_1_FS_AVL_1900_GAIN_Z           1315
+#define ST_MAGN_1_FS_AVL_2500_GAIN_Z           1666
+#define ST_MAGN_1_FS_AVL_4000_GAIN_Z           2500
+#define ST_MAGN_1_FS_AVL_4700_GAIN_Z           2816
+#define ST_MAGN_1_FS_AVL_5600_GAIN_Z           3389
+#define ST_MAGN_1_FS_AVL_8100_GAIN_Z           4878
 #define ST_MAGN_1_MULTIREAD_BIT                        false
 
 /* CUSTOM VALUES FOR SENSOR 2 */
 #define ST_MAGN_2_FS_MASK                      0x60
 #define ST_MAGN_2_FS_AVL_4000_VAL              0x00
 #define ST_MAGN_2_FS_AVL_8000_VAL              0x01
-#define ST_MAGN_2_FS_AVL_10000_VAL             0x02
-#define ST_MAGN_2_FS_AVL_4000_GAIN             430
-#define ST_MAGN_2_FS_AVL_8000_GAIN             230
-#define ST_MAGN_2_FS_AVL_10000_GAIN            230
+#define ST_MAGN_2_FS_AVL_12000_VAL             0x02
+#define ST_MAGN_2_FS_AVL_16000_VAL             0x03
+#define ST_MAGN_2_FS_AVL_4000_GAIN             146
+#define ST_MAGN_2_FS_AVL_8000_GAIN             292
+#define ST_MAGN_2_FS_AVL_12000_GAIN            438
+#define ST_MAGN_2_FS_AVL_16000_GAIN            584
 #define ST_MAGN_2_MULTIREAD_BIT                        false
 #define ST_MAGN_2_OUT_X_L_ADDR                 0x28
 #define ST_MAGN_2_OUT_Y_L_ADDR                 0x2a
@@ -252,9 +255,14 @@ static const struct st_sensors st_magn_sensors[] = {
                                        .gain = ST_MAGN_2_FS_AVL_8000_GAIN,
                                },
                                [2] = {
-                                       .num = ST_MAGN_FS_AVL_10000MG,
-                                       .value = ST_MAGN_2_FS_AVL_10000_VAL,
-                                       .gain = ST_MAGN_2_FS_AVL_10000_GAIN,
+                                       .num = ST_MAGN_FS_AVL_12000MG,
+                                       .value = ST_MAGN_2_FS_AVL_12000_VAL,
+                                       .gain = ST_MAGN_2_FS_AVL_12000_GAIN,
+                               },
+                               [3] = {
+                                       .num = ST_MAGN_FS_AVL_16000MG,
+                                       .value = ST_MAGN_2_FS_AVL_16000_VAL,
+                                       .gain = ST_MAGN_2_FS_AVL_16000_GAIN,
                                },
                        },
                },
index c47c2034ca71f9a95f3153fbc31756fc34b1fde0..4293e89bbbddff0f3e571fd965db591b1c772dc4 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/completion.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/sysctl.h>
 
 #include <rdma/iw_cm.h>
 #include <rdma/ib_addr.h>
@@ -65,6 +66,20 @@ struct iwcm_work {
        struct list_head free_list;
 };
 
+static unsigned int default_backlog = 256;
+
+static struct ctl_table_header *iwcm_ctl_table_hdr;
+static struct ctl_table iwcm_ctl_table[] = {
+       {
+               .procname       = "default_backlog",
+               .data           = &default_backlog,
+               .maxlen         = sizeof(default_backlog),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       { }
+};
+
 /*
  * The following services provide a mechanism for pre-allocating iwcm_work
  * elements.  The design pre-allocates them  based on the cm_id type:
@@ -419,6 +434,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 
+       if (!backlog)
+               backlog = default_backlog;
+
        ret = alloc_work_entries(cm_id_priv, backlog);
        if (ret)
                return ret;
@@ -1024,11 +1042,20 @@ static int __init iw_cm_init(void)
        if (!iwcm_wq)
                return -ENOMEM;
 
+       iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
+                                                iwcm_ctl_table);
+       if (!iwcm_ctl_table_hdr) {
+               pr_err("iw_cm: couldn't register sysctl paths\n");
+               destroy_workqueue(iwcm_wq);
+               return -ENOMEM;
+       }
+
        return 0;
 }
 
 static void __exit iw_cm_cleanup(void)
 {
+       unregister_net_sysctl_table(iwcm_ctl_table_hdr);
        destroy_workqueue(iwcm_wq);
 }
 
index 0e93152384f022ae59788e042d938a09fd5d178e..acb3865710c2c0a7848359004458fdd71d360c5e 100644 (file)
@@ -404,7 +404,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        init_completion(&isert_conn->conn_wait);
        init_completion(&isert_conn->conn_wait_comp_err);
        kref_init(&isert_conn->conn_kref);
-       kref_get(&isert_conn->conn_kref);
        mutex_init(&isert_conn->conn_mutex);
 
        cma_id->context = isert_conn;
@@ -530,7 +529,9 @@ isert_connect_release(struct isert_conn *isert_conn)
 static void
 isert_connected_handler(struct rdma_cm_id *cma_id)
 {
-       return;
+       struct isert_conn *isert_conn = cma_id->context;
+
+       kref_get(&isert_conn->conn_kref);
 }
 
 static void
@@ -582,7 +583,6 @@ isert_disconnect_work(struct work_struct *work)
 
 wake_up:
        complete(&isert_conn->conn_wait);
-       isert_put_conn(isert_conn);
 }
 
 static void
@@ -2265,6 +2265,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        wait_for_completion(&isert_conn->conn_wait_comp_err);
 
        wait_for_completion(&isert_conn->conn_wait);
+       isert_put_conn(isert_conn);
 }
 
 static void isert_free_conn(struct iscsi_conn *conn)
index 1954daac0b593c39aded2ce786738a44552b07ca..35dd5ff662f1e11275aacfb0eb588faa861379d8 100644 (file)
@@ -93,6 +93,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
 
 static struct scsi_transport_template *ib_srp_transport_template;
+static struct workqueue_struct *srp_remove_wq;
 
 static struct ib_client srp_client = {
        .name   = "srp",
@@ -456,7 +457,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
        spin_unlock_irq(&target->lock);
 
        if (changed)
-               queue_work(system_long_wq, &target->remove_work);
+               queue_work(srp_remove_wq, &target->remove_work);
 
        return changed;
 }
@@ -2530,9 +2531,10 @@ static void srp_remove_one(struct ib_device *device)
                spin_unlock(&host->target_lock);
 
                /*
-                * Wait for target port removal tasks.
+                * Wait for tl_err and target port removal tasks.
                 */
                flush_workqueue(system_long_wq);
+               flush_workqueue(srp_remove_wq);
 
                kfree(host);
        }
@@ -2577,16 +2579,22 @@ static int __init srp_init_module(void)
                indirect_sg_entries = cmd_sg_entries;
        }
 
+       srp_remove_wq = create_workqueue("srp_remove");
+       if (IS_ERR(srp_remove_wq)) {
+               ret = PTR_ERR(srp_remove_wq);
+               goto out;
+       }
+
+       ret = -ENOMEM;
        ib_srp_transport_template =
                srp_attach_transport(&ib_srp_transport_functions);
        if (!ib_srp_transport_template)
-               return -ENOMEM;
+               goto destroy_wq;
 
        ret = class_register(&srp_class);
        if (ret) {
                pr_err("couldn't register class infiniband_srp\n");
-               srp_release_transport(ib_srp_transport_template);
-               return ret;
+               goto release_tr;
        }
 
        ib_sa_register_client(&srp_sa_client);
@@ -2594,13 +2602,22 @@ static int __init srp_init_module(void)
        ret = ib_register_client(&srp_client);
        if (ret) {
                pr_err("couldn't register IB client\n");
-               srp_release_transport(ib_srp_transport_template);
-               ib_sa_unregister_client(&srp_sa_client);
-               class_unregister(&srp_class);
-               return ret;
+               goto unreg_sa;
        }
 
-       return 0;
+out:
+       return ret;
+
+unreg_sa:
+       ib_sa_unregister_client(&srp_sa_client);
+       class_unregister(&srp_class);
+
+release_tr:
+       srp_release_transport(ib_srp_transport_template);
+
+destroy_wq:
+       destroy_workqueue(srp_remove_wq);
+       goto out;
 }
 
 static void __exit srp_cleanup_module(void)
@@ -2609,6 +2626,7 @@ static void __exit srp_cleanup_module(void)
        ib_sa_unregister_client(&srp_sa_client);
        class_unregister(&srp_class);
        srp_release_transport(ib_srp_transport_template);
+       destroy_workqueue(srp_remove_wq);
 }
 
 module_init(srp_init_module);
index 66984e272c459460b76b4fa0165497d79841cb71..a161021c452615e9d2fb39169e9bb170e529f317 100644 (file)
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
 }
 
 static int input_get_disposition(struct input_dev *dev,
-                         unsigned int type, unsigned int code, int value)
+                         unsigned int type, unsigned int code, int *pval)
 {
        int disposition = INPUT_IGNORE_EVENT;
+       int value = *pval;
 
        switch (type) {
 
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
                break;
        }
 
+       *pval = value;
        return disposition;
 }
 
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
 {
        int disposition;
 
-       disposition = input_get_disposition(dev, type, code, value);
+       disposition = input_get_disposition(dev, type, code, &value);
 
        if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
                dev->event(dev, type, code, value);
index 2dd1d0dd4f7de03233752e57704ff60c17e6d992..6f5d79569136f3adf0bf9248988b107426b85e9c 100644 (file)
@@ -1791,14 +1791,6 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
-               },
-               .callback = atkbd_deactivate_fixup,
-       },
-       {
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
                },
                .callback = atkbd_deactivate_fixup,
        },
index eaaccde8221013c4faec1ef932732d463139136f..7fbf7247e65f5879faa006d19340d8711dc34883 100644 (file)
@@ -27,9 +27,10 @@ struct keyreset_state {
        int restart_requested;
        int (*reset_fn)(void);
        struct platform_device *pdev_child;
+       struct work_struct restart_work;
 };
 
-static void do_restart(void)
+static void do_restart(struct work_struct *unused)
 {
        sys_sync();
        kernel_restart(NULL);
@@ -44,7 +45,7 @@ static void do_reset_fn(void *priv)
                state->restart_requested = state->reset_fn();
        } else {
                pr_info("keyboard reset\n");
-               do_restart();
+               schedule_work(&state->restart_work);
                state->restart_requested = 1;
        }
 }
@@ -69,6 +70,7 @@ static int keyreset_probe(struct platform_device *pdev)
        if (!state->pdev_child)
                return -ENOMEM;
        state->pdev_child->dev.parent = &pdev->dev;
+       INIT_WORK(&state->restart_work, do_restart);
 
        keyp = pdata->keys_down;
        while ((key = *keyp++)) {
index 7c5d72a6a26a3400fbedfcbe19e37e1abb298f1f..19e070f16e6beb7a220faaa50dbcf2ea478fe571 100644 (file)
@@ -873,7 +873,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
 
-       if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
+       /*
+        * Check if we are dealing with a bare PS/2 packet, presumably from
+        * a device connected to the external PS/2 port. Because bare PS/2
+        * protocol does not have enough constant bits to self-synchronize
+        * properly we only do this if the device is fully synchronized.
+        */
+       if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
                if (psmouse->pktcnt == 3) {
                        alps_report_bare_ps2_packet(psmouse, psmouse->packet,
                                                    true);
@@ -1816,6 +1822,9 @@ int alps_init(struct psmouse *psmouse)
        /* We are having trouble resyncing ALPS touchpads so disable it for now */
        psmouse->resync_time = 0;
 
+       /* Allow 2 invalid packets without resetting device */
+       psmouse->resetafter = psmouse->pktsize * 2;
+
        return 0;
 
 init_fail:
index 1913301df08f079425a5d6ae5ceb11124c45e3aa..85e75239c81404f9e31b92b8fad2779a095bdb00 100644 (file)
@@ -1223,6 +1223,13 @@ static bool elantech_is_signature_valid(const unsigned char *param)
        if (param[1] == 0)
                return true;
 
+       /*
+        * Some models have a revision higher then 20. Meaning param[2] may
+        * be 10 or 20, skip the rates check for these.
+        */
+       if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
+               return true;
+
        for (i = 0; i < ARRAY_SIZE(rates); i++)
                if (param[2] == rates[i])
                        return false;
index f36f7b88f2603d3cbe16eb2a88dffa63091362cd..d1c47d135c071749458007d678f497140437e755 100644 (file)
@@ -549,10 +549,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                         ((buf[0] & 0x04) >> 1) |
                         ((buf[3] & 0x04) >> 2));
 
+               if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+                       SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
+                   hw->w == 2) {
+                       synaptics_parse_agm(buf, priv, hw);
+                       return 1;
+               }
+
+               hw->x = (((buf[3] & 0x10) << 8) |
+                        ((buf[1] & 0x0f) << 8) |
+                        buf[4]);
+               hw->y = (((buf[3] & 0x20) << 7) |
+                        ((buf[1] & 0xf0) << 4) |
+                        buf[5]);
+               hw->z = buf[2];
+
                hw->left  = (buf[0] & 0x01) ? 1 : 0;
                hw->right = (buf[0] & 0x02) ? 1 : 0;
 
-               if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
+               if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
+                       /*
+                        * ForcePads, like Clickpads, use middle button
+                        * bits to report primary button clicks.
+                        * Unfortunately they report primary button not
+                        * only when user presses on the pad above certain
+                        * threshold, but also when there are more than one
+                        * finger on the touchpad, which interferes with
+                        * out multi-finger gestures.
+                        */
+                       if (hw->z == 0) {
+                               /* No contacts */
+                               priv->press = priv->report_press = false;
+                       } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) {
+                               /*
+                                * Single-finger touch with pressure above
+                                * the threshold. If pressure stays long
+                                * enough, we'll start reporting primary
+                                * button. We rely on the device continuing
+                                * sending data even if finger does not
+                                * move.
+                                */
+                               if  (!priv->press) {
+                                       priv->press_start = jiffies;
+                                       priv->press = true;
+                               } else if (time_after(jiffies,
+                                               priv->press_start +
+                                                       msecs_to_jiffies(50))) {
+                                       priv->report_press = true;
+                               }
+                       } else {
+                               priv->press = false;
+                       }
+
+                       hw->left = priv->report_press;
+
+               } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
                        /*
                         * Clickpad's button is transmitted as middle button,
                         * however, since it is primary button, we will report
@@ -571,21 +622,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                        hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
                }
 
-               if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
-                       SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
-                   hw->w == 2) {
-                       synaptics_parse_agm(buf, priv, hw);
-                       return 1;
-               }
-
-               hw->x = (((buf[3] & 0x10) << 8) |
-                        ((buf[1] & 0x0f) << 8) |
-                        buf[4]);
-               hw->y = (((buf[3] & 0x20) << 7) |
-                        ((buf[1] & 0xf0) << 4) |
-                        buf[5]);
-               hw->z = buf[2];
-
                if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
                    ((buf[0] ^ buf[3]) & 0x02)) {
                        switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
index e594af0b264b7f147569d77fe5a1d84825ed1a9e..fb2e076738ae3bee7fdd844de7e93dd3fa5146c6 100644 (file)
  * 2   0x08    image sensor            image sensor tracks 5 fingers, but only
  *                                     reports 2.
  * 2   0x20    report min              query 0x0f gives min coord reported
+ * 2   0x80    forcepad                forcepad is a variant of clickpad that
+ *                                     does not have physical buttons but rather
+ *                                     uses pressure above certain threshold to
+ *                                     report primary clicks. Forcepads also have
+ *                                     clickpad bit set.
  */
 #define SYN_CAP_CLICKPAD(ex0c)         ((ex0c) & 0x100000) /* 1-button ClickPad */
 #define SYN_CAP_CLICKPAD2BTN(ex0c)     ((ex0c) & 0x000100) /* 2-button ClickPad */
@@ -86,6 +91,7 @@
 #define SYN_CAP_ADV_GESTURE(ex0c)      ((ex0c) & 0x080000)
 #define SYN_CAP_REDUCED_FILTERING(ex0c)        ((ex0c) & 0x000400)
 #define SYN_CAP_IMAGE_SENSOR(ex0c)     ((ex0c) & 0x000800)
+#define SYN_CAP_FORCEPAD(ex0c)         ((ex0c) & 0x008000)
 
 /* synaptics modes query bits */
 #define SYN_MODE_ABSOLUTE(m)           ((m) & (1 << 7))
@@ -177,6 +183,11 @@ struct synaptics_data {
         */
        struct synaptics_hw_state agm;
        bool agm_pending;                       /* new AGM packet received */
+
+       /* ForcePad handling */
+       unsigned long                           press_start;
+       bool                                    press;
+       bool                                    report_press;
 };
 
 void synaptics_module_init(void);
index 960d44f2ee030f35f7579bb2f0784d0694f53d73..cd05bca2fec546abec2c68ba5b6ff0f909ee7773 100755 (executable)
@@ -1656,7 +1656,7 @@ int sensor_probe(struct i2c_client *client, const struct i2c_device_id *devid)
        int result = 0;\r
        int type = 0;\r
        \r
-       dev_info(&client->adapter->dev, "%s: %s,0x%x\n", __func__, devid->name,(unsigned int)client);\r
+       dev_info(&client->adapter->dev, "%s: %s,%p\n", __func__, devid->name, client);\r
 \r
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {\r
                result = -ENODEV;\r
index 0ec9abbe31fec3af5248808fce517fc863ff75b2..ce715b1bee46880bdb34b5360522367bc390b835 100644 (file)
@@ -99,6 +99,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"),
                },
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+               },
+       },
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
@@ -458,6 +464,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
                },
        },
+       {
+               /* Avatar AVIU-145A6 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
+               },
+       },
        { }
 };
 
@@ -601,6 +614,30 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
                },
        },
+       {
+               /* Fujitsu A544 laptop */
+               /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
+               },
+       },
+       {
+               /* Fujitsu AH544 laptop */
+               /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
+               },
+       },
+       {
+               /* Fujitsu U574 laptop */
+               /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
+               },
+       },
        { }
 };
 
index 8755f5f3ad37c2218de60b96782c0863615a705e..e4ecf3b647943f6894d8a47fe28da0b5205a6661 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/serio.h>
 #include <linux/tty.h>
+#include <linux/compat.h>
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
 MODULE_DESCRIPTION("Input device TTY line discipline");
@@ -196,28 +197,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
        return 0;
 }
 
+static void serport_set_type(struct tty_struct *tty, unsigned long type)
+{
+       struct serport *serport = tty->disc_data;
+
+       serport->id.proto = type & 0x000000ff;
+       serport->id.id    = (type & 0x0000ff00) >> 8;
+       serport->id.extra = (type & 0x00ff0000) >> 16;
+}
+
 /*
  * serport_ldisc_ioctl() allows to set the port protocol, and device ID
  */
 
-static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg)
+static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
+                              unsigned int cmd, unsigned long arg)
 {
-       struct serport *serport = (struct serport*) tty->disc_data;
-       unsigned long type;
-
        if (cmd == SPIOCSTYPE) {
+               unsigned long type;
+
                if (get_user(type, (unsigned long __user *) arg))
                        return -EFAULT;
 
-               serport->id.proto = type & 0x000000ff;
-               serport->id.id    = (type & 0x0000ff00) >> 8;
-               serport->id.extra = (type & 0x00ff0000) >> 16;
+               serport_set_type(tty, type);
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_SPIOCSTYPE      _IOW('q', 0x01, compat_ulong_t)
+static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
+                                      struct file *file,
+                                      unsigned int cmd, unsigned long arg)
+{
+       if (cmd == COMPAT_SPIOCSTYPE) {
+               void __user *uarg = compat_ptr(arg);
+               compat_ulong_t compat_type;
+
+               if (get_user(compat_type, (compat_ulong_t __user *)uarg))
+                       return -EFAULT;
 
+               serport_set_type(tty, compat_type);
                return 0;
        }
 
        return -EINVAL;
 }
+#endif
 
 static void serport_ldisc_write_wakeup(struct tty_struct * tty)
 {
@@ -241,6 +269,9 @@ static struct tty_ldisc_ops serport_ldisc = {
        .close =        serport_ldisc_close,
        .read =         serport_ldisc_read,
        .ioctl =        serport_ldisc_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = serport_ldisc_compat_ioctl,
+#endif
        .receive_buf =  serport_ldisc_receive,
        .write_wakeup = serport_ldisc_write_wakeup
 };
index 8aa6c851950a39b6151275f41dc4e0c30348515a..e0d4fe707ae6e6bebf24c53101110db3be724a2b 100755 (executable)
@@ -289,12 +289,11 @@ static int  goodix_read_version(struct rk_ts_data *ts, char **version)
        char *version_data;\r
        char *p;\r
 \r
-       *version = (char *)vmalloc(18);\r
+       *version = (char *)vzalloc(18);\r
        version_data = *version;\r
        if(!version_data)\r
                return -ENOMEM;\r
        p = version_data;\r
-       memset(version_data, 0, sizeof(version_data));\r
        version_data[0]=240;    \r
        ret=goodix_i2c_read_bytes(ts->client,version_data, 17);\r
        if (ret < 0) \r
index 6f849cbcac6f041f39de89ba4dcd94f1fb970d17..dfb401cba73364c57121760fd1871570fa3d2da2 100644 (file)
@@ -3187,14 +3187,16 @@ free_domains:
 
 static void cleanup_domain(struct protection_domain *domain)
 {
-       struct iommu_dev_data *dev_data, *next;
+       struct iommu_dev_data *entry;
        unsigned long flags;
 
        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
 
-       list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
-               __detach_device(dev_data);
-               atomic_set(&dev_data->bind, 0);
+       while (!list_empty(&domain->dev_list)) {
+               entry = list_first_entry(&domain->dev_list,
+                                        struct iommu_dev_data, list);
+               __detach_device(entry);
+               atomic_set(&entry->bind, 0);
        }
 
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
index 51fe7e0e5d8c48e6d9c4d3fea3d933ca0460f6d4..85e46a34b3a151baac1c72d23e3957f3b2e997b1 100755 (executable)
@@ -484,8 +484,12 @@ static bool rockchip_iommu_reset(void __iomem *base, const char *dbgname)
 
 static inline void rockchip_pgtable_flush(void *vastart, void *vaend)
 {
+#ifdef CONFIG_ARM
        dmac_flush_range(vastart, vaend);
        outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend));
+#elif defined(CONFIG_ARM64)
+       __dma_flush_range(vastart, vaend);
+#endif
 }
 
 static void dump_pagetbl(dma_addr_t fault_address, u32 addr_dte)
index 4a33351c25dce61c805a9c99258f893792a1ec2a..7709b1dbf6cc04600ff810202465a6e398deb10d 100644 (file)
@@ -10,6 +10,11 @@ config ARM_GIC
 config GIC_NON_BANKED
        bool
 
+config ARM_GIC_V3
+       bool
+       select IRQ_DOMAIN
+       select MULTI_IRQ_HANDLER
+
 config ARM_VIC
        bool
        select IRQ_DOMAIN
index cda4cb5f7327b77534a45776eb39b06f3c36a43c..bf4667b34306b329f730040a1dd33c7523280d34 100644 (file)
@@ -9,7 +9,8 @@ obj-$(CONFIG_METAG)                     += irq-metag-ext.o
 obj-$(CONFIG_METAG_PERFCOUNTER_IRQS)   += irq-metag.o
 obj-$(CONFIG_ARCH_SUNXI)               += irq-sun4i.o
 obj-$(CONFIG_ARCH_SPEAR3XX)            += spear-shirq.o
-obj-$(CONFIG_ARM_GIC)                  += irq-gic.o
+obj-$(CONFIG_ARM_GIC)                  += irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_V3)               += irq-gic-v3.o irq-gic-common.o
 obj-$(CONFIG_ARM_VIC)                  += irq-vic.o
 obj-$(CONFIG_SIRF_IRQ)                 += irq-sirfsoc.o
 obj-$(CONFIG_RENESAS_INTC_IRQPIN)      += irq-renesas-intc-irqpin.o
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
new file mode 100644 (file)
index 0000000..60ac704
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include "irq-gic-common.h"
+
+void gic_configure_irq(unsigned int irq, unsigned int type,
+                      void __iomem *base, void (*sync_access)(void))
+{
+       u32 enablemask = 1 << (irq % 32);
+       u32 enableoff = (irq / 32) * 4;
+       u32 confmask = 0x2 << ((irq % 16) * 2);
+       u32 confoff = (irq / 16) * 4;
+       bool enabled = false;
+       u32 val;
+
+       /*
+        * Read current configuration register, and insert the config
+        * for "irq", depending on "type".
+        */
+       val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+       if (type == IRQ_TYPE_LEVEL_HIGH)
+               val &= ~confmask;
+       else if (type == IRQ_TYPE_EDGE_RISING)
+               val |= confmask;
+
+       /*
+        * As recommended by the spec, disable the interrupt before changing
+        * the configuration
+        */
+       if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
+               writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
+               if (sync_access)
+                       sync_access();
+               enabled = true;
+       }
+
+       /*
+        * Write back the new configuration, and possibly re-enable
+        * the interrupt.
+        */
+       writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+
+       if (enabled)
+               writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+
+       if (sync_access)
+               sync_access();
+}
+
+void __init gic_dist_config(void __iomem *base, int gic_irqs,
+                           void (*sync_access)(void))
+{
+       unsigned int i;
+
+       /*
+        * Set all global interrupts to be level triggered, active low.
+        */
+       for (i = 32; i < gic_irqs; i += 16)
+               writel_relaxed(0, base + GIC_DIST_CONFIG + i / 4);
+
+       /*
+        * Set priority on all global interrupts.
+        */
+       for (i = 32; i < gic_irqs; i += 4)
+               writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i);
+
+       /*
+        * Disable all interrupts.  Leave the PPI and SGIs alone
+        * as they are enabled by redistributor registers.
+        */
+       for (i = 32; i < gic_irqs; i += 32)
+               writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i / 8);
+
+       if (sync_access)
+               sync_access();
+}
+
+void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
+{
+       int i;
+
+       /*
+        * Deal with the banked PPI and SGI interrupts - disable all
+        * PPI interrupts, ensure all SGI interrupts are enabled.
+        */
+       writel_relaxed(0xffff0000, base + GIC_DIST_ENABLE_CLEAR);
+       writel_relaxed(0x0000ffff, base + GIC_DIST_ENABLE_SET);
+
+       /*
+        * Set priority on PPI and SGI interrupts
+        */
+       for (i = 0; i < 32; i += 4)
+               writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
+
+       if (sync_access)
+               sync_access();
+}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
new file mode 100644 (file)
index 0000000..b41f024
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _IRQ_GIC_COMMON_H
+#define _IRQ_GIC_COMMON_H
+
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+
+void gic_configure_irq(unsigned int irq, unsigned int type,
+                       void __iomem *base, void (*sync_access)(void));
+void gic_dist_config(void __iomem *base, int gic_irqs,
+                    void (*sync_access)(void));
+void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
+
+#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
new file mode 100644 (file)
index 0000000..57eaa5a
--- /dev/null
@@ -0,0 +1,692 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/cputype.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+
+#include "irq-gic-common.h"
+#include "irqchip.h"
+
+struct gic_chip_data {
+       void __iomem            *dist_base;
+       void __iomem            **redist_base;
+       void __percpu __iomem   **rdist;
+       struct irq_domain       *domain;
+       u64                     redist_stride;
+       u32                     redist_regions;
+       unsigned int            irq_nr;
+};
+
+static struct gic_chip_data gic_data __read_mostly;
+
+#define gic_data_rdist()               (this_cpu_ptr(gic_data.rdist))
+#define gic_data_rdist_rd_base()       (*gic_data_rdist())
+#define gic_data_rdist_sgi_base()      (gic_data_rdist_rd_base() + SZ_64K)
+
+/* Our default, arbitrary priority value. Linux only uses one anyway. */
+#define DEFAULT_PMR_VALUE      0xf0
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+       return d->hwirq;
+}
+
+static inline int gic_irq_in_rdist(struct irq_data *d)
+{
+       return gic_irq(d) < 32;
+}
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+       if (gic_irq_in_rdist(d))        /* SGI+PPI -> SGI_base for this CPU */
+               return gic_data_rdist_sgi_base();
+
+       if (d->hwirq <= 1023)           /* SPI -> dist_base */
+               return gic_data.dist_base;
+
+       if (d->hwirq >= 8192)
+               BUG();          /* LPI Detected!!! */
+
+       return NULL;
+}
+
+static void gic_do_wait_for_rwp(void __iomem *base)
+{
+       u32 count = 1000000;    /* 1s! */
+
+       while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+               count--;
+               if (!count) {
+                       pr_err_ratelimited("RWP timeout, gone fishing\n");
+                       return;
+               }
+               cpu_relax();
+               udelay(1);
+       };
+}
+
+/* Wait for completion of a distributor change */
+static void gic_dist_wait_for_rwp(void)
+{
+       gic_do_wait_for_rwp(gic_data.dist_base);
+}
+
+/* Wait for completion of a redistributor change */
+static void gic_redist_wait_for_rwp(void)
+{
+       gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+}
+
+/* Low level accessors */
+static u64 gic_read_iar(void)
+{
+       u64 irqstat;
+
+       asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+       return irqstat;
+}
+
+static void gic_write_pmr(u64 val)
+{
+       asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
+}
+
+static void gic_write_ctlr(u64 val)
+{
+       asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
+       isb();
+}
+
+static void gic_write_grpen1(u64 val)
+{
+       asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
+       isb();
+}
+
+static void gic_write_sgi1r(u64 val)
+{
+       asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+}
+
+static void gic_enable_sre(void)
+{
+       u64 val;
+
+       asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
+       val |= ICC_SRE_EL1_SRE;
+       asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
+       isb();
+
+       /*
+        * Need to check that the SRE bit has actually been set. If
+        * not, it means that SRE is disabled at EL2. We're going to
+        * die painfully, and there is nothing we can do about it.
+        *
+        * Kindly inform the luser.
+        */
+       asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
+       if (!(val & ICC_SRE_EL1_SRE))
+               pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+}
+
+static void gic_enable_redist(void)
+{
+       void __iomem *rbase;
+       u32 count = 1000000;    /* 1s! */
+       u32 val;
+
+       rbase = gic_data_rdist_rd_base();
+
+       /* Wake up this CPU redistributor */
+       val = readl_relaxed(rbase + GICR_WAKER);
+       val &= ~GICR_WAKER_ProcessorSleep;
+       writel_relaxed(val, rbase + GICR_WAKER);
+
+       while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
+               count--;
+               if (!count) {
+                       pr_err_ratelimited("redist didn't wake up...\n");
+                       return;
+               }
+               cpu_relax();
+               udelay(1);
+       };
+}
+
+/*
+ * Routines to disable, enable, EOI and route interrupts
+ */
+static void gic_poke_irq(struct irq_data *d, u32 offset)
+{
+       u32 mask = 1 << (gic_irq(d) % 32);
+       void (*rwp_wait)(void);
+       void __iomem *base;
+
+       if (gic_irq_in_rdist(d)) {
+               base = gic_data_rdist_sgi_base();
+               rwp_wait = gic_redist_wait_for_rwp;
+       } else {
+               base = gic_data.dist_base;
+               rwp_wait = gic_dist_wait_for_rwp;
+       }
+
+       writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
+       rwp_wait();
+}
+
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+       u32 mask = 1 << (gic_irq(d) % 32);
+       void __iomem *base;
+
+       if (gic_irq_in_rdist(d))
+               base = gic_data_rdist_sgi_base();
+       else
+               base = gic_data.dist_base;
+
+       return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+       gic_poke_irq(d, GICD_ICENABLER);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+       gic_poke_irq(d, GICD_ISENABLER);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+       gic_write_eoir(gic_irq(d));
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+       unsigned int irq = gic_irq(d);
+       void (*rwp_wait)(void);
+       void __iomem *base;
+
+       /* Interrupt configuration for SGIs can't be changed */
+       if (irq < 16)
+               return -EINVAL;
+
+       if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+               return -EINVAL;
+
+       if (gic_irq_in_rdist(d)) {
+               base = gic_data_rdist_sgi_base();
+               rwp_wait = gic_redist_wait_for_rwp;
+       } else {
+               base = gic_data.dist_base;
+               rwp_wait = gic_dist_wait_for_rwp;
+       }
+
+       gic_configure_irq(irq, type, base, rwp_wait);
+
+       return 0;
+}
+
+static u64 gic_mpidr_to_affinity(u64 mpidr)
+{
+       u64 aff;
+
+       aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
+              MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+              MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
+              MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+       return aff;
+}
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+       u64 irqnr;
+
+       do {
+               irqnr = gic_read_iar();
+
+               if (likely(irqnr > 15 && irqnr < 1020)) {
+                       u64 irq = irq_find_mapping(gic_data.domain, irqnr);
+                       if (likely(irq)) {
+                               handle_IRQ(irq, regs);
+                               continue;
+                       }
+
+                       WARN_ONCE(true, "Unexpected SPI received!\n");
+                       gic_write_eoir(irqnr);
+               }
+               if (irqnr < 16) {
+                       gic_write_eoir(irqnr);
+#ifdef CONFIG_SMP
+                       handle_IPI(irqnr, regs);
+#else
+                       WARN_ONCE(true, "Unexpected SGI received!\n");
+#endif
+                       continue;
+               }
+       } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
+}
+
+static void __init gic_dist_init(void)
+{
+       unsigned int i;
+       u64 affinity;
+       void __iomem *base = gic_data.dist_base;
+
+       /* Disable the distributor */
+       writel_relaxed(0, base + GICD_CTLR);
+       gic_dist_wait_for_rwp();
+
+       gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
+
+       /* Enable distributor with ARE, Group1 */
+       writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
+                      base + GICD_CTLR);
+
+       /*
+        * Set all global interrupts to the boot CPU only. ARE must be
+        * enabled.
+        */
+       affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
+       for (i = 32; i < gic_data.irq_nr; i++)
+               writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
+}
+
+static int gic_populate_rdist(void)
+{
+       u64 mpidr = cpu_logical_map(smp_processor_id());
+       u64 typer;
+       u32 aff;
+       int i;
+
+       /*
+        * Convert affinity to a 32bit value that can be matched to
+        * GICR_TYPER bits [63:32].
+        */
+       aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
+              MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+              MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+              MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+       for (i = 0; i < gic_data.redist_regions; i++) {
+               void __iomem *ptr = gic_data.redist_base[i];
+               u32 reg;
+
+               reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
+               if (reg != GIC_PIDR2_ARCH_GICv3 &&
+                   reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
+                       pr_warn("No redistributor present @%p\n", ptr);
+                       break;
+               }
+
+               do {
+                       typer = readq_relaxed(ptr + GICR_TYPER);
+                       if ((typer >> 32) == aff) {
+                               gic_data_rdist_rd_base() = ptr;
+                               pr_info("CPU%d: found redistributor %llx @%p\n",
+                                       smp_processor_id(),
+                                       (unsigned long long)mpidr, ptr);
+                               return 0;
+                       }
+
+                       if (gic_data.redist_stride) {
+                               ptr += gic_data.redist_stride;
+                       } else {
+                               ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
+                               if (typer & GICR_TYPER_VLPIS)
+                                       ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
+                       }
+               } while (!(typer & GICR_TYPER_LAST));
+       }
+
+       /* We couldn't even deal with ourselves... */
+       WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
+            smp_processor_id(), (unsigned long long)mpidr);
+       return -ENODEV;
+}
+
+static void gic_cpu_init(void)
+{
+       void __iomem *rbase;
+
+       /* Register ourselves with the rest of the world */
+       if (gic_populate_rdist())
+               return;
+
+       gic_enable_redist();
+
+       rbase = gic_data_rdist_sgi_base();
+
+       gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+
+       /* Enable system registers */
+       gic_enable_sre();
+
+       /* Set priority mask register */
+       gic_write_pmr(DEFAULT_PMR_VALUE);
+
+       /* EOI deactivates interrupt too (mode 0) */
+       gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
+
+       /* ... and let's hit the road... */
+       gic_write_grpen1(1);
+}
+
+#ifdef CONFIG_SMP
+static int gic_secondary_init(struct notifier_block *nfb,
+                             unsigned long action, void *hcpu)
+{
+       if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+               gic_cpu_init();
+       return NOTIFY_OK;
+}
+
+/*
+ * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
+ * priority because the GIC needs to be up before the ARM generic timers.
+ */
+static struct notifier_block gic_cpu_notifier = {
+       .notifier_call = gic_secondary_init,
+       .priority = 100,
+};
+
+static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
+                                  u64 cluster_id)
+{
+       int cpu = *base_cpu;
+       u64 mpidr = cpu_logical_map(cpu);
+       u16 tlist = 0;
+
+       while (cpu < nr_cpu_ids) {
+               /*
+                * If we ever get a cluster of more than 16 CPUs, just
+                * scream and skip that CPU.
+                */
+               if (WARN_ON((mpidr & 0xff) >= 16))
+                       goto out;
+
+               tlist |= 1 << (mpidr & 0xf);
+
+               cpu = cpumask_next(cpu, mask);
+               if (cpu == nr_cpu_ids)
+                       goto out;
+
+               mpidr = cpu_logical_map(cpu);
+
+               if (cluster_id != (mpidr & ~0xffUL)) {
+                       cpu--;
+                       goto out;
+               }
+       }
+out:
+       *base_cpu = cpu;
+       return tlist;
+}
+
+static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
+{
+       u64 val;
+
+       val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48        |
+              MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32        |
+              irq << 24                                        |
+              MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16        |
+              tlist);
+
+       pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+       gic_write_sgi1r(val);
+}
+
+static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+       int cpu;
+
+       if (WARN_ON(irq >= 16))
+               return;
+
+       /*
+        * Ensure that stores to Normal memory are visible to the
+        * other CPUs before issuing the IPI.
+        */
+       smp_wmb();
+
+       for_each_cpu_mask(cpu, *mask) {
+               u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
+               u16 tlist;
+
+               tlist = gic_compute_target_list(&cpu, mask, cluster_id);
+               gic_send_sgi(cluster_id, tlist, irq);
+       }
+
+       /* Force the above writes to ICC_SGI1R_EL1 to be executed */
+       isb();
+}
+
+static void gic_smp_init(void)
+{
+       set_smp_cross_call(gic_raise_softirq);
+       register_cpu_notifier(&gic_cpu_notifier);
+}
+
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+                           bool force)
+{
+       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       void __iomem *reg;
+       int enabled;
+       u64 val;
+
+       if (gic_irq_in_rdist(d))
+               return -EINVAL;
+
+       /* If interrupt was enabled, disable it first */
+       enabled = gic_peek_irq(d, GICD_ISENABLER);
+       if (enabled)
+               gic_mask_irq(d);
+
+       reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
+       val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
+
+       writeq_relaxed(val, reg);
+
+       /*
+        * If the interrupt was enabled, enabled it again. Otherwise,
+        * just wait for the distributor to have digested our changes.
+        */
+       if (enabled)
+               gic_unmask_irq(d);
+       else
+               gic_dist_wait_for_rwp();
+
+       return IRQ_SET_MASK_OK;
+}
+#else
+#define gic_set_affinity       NULL
+#define gic_smp_init()         do { } while(0)
+#endif
+
+static struct irq_chip gic_chip = {
+       .name                   = "GICv3",
+       .irq_mask               = gic_mask_irq,
+       .irq_unmask             = gic_unmask_irq,
+       .irq_eoi                = gic_eoi_irq,
+       .irq_set_type           = gic_set_type,
+       .irq_set_affinity       = gic_set_affinity,
+};
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+                             irq_hw_number_t hw)
+{
+       /* SGIs are private to the core kernel */
+       if (hw < 16)
+               return -EPERM;
+       /* PPIs */
+       if (hw < 32) {
+               irq_set_percpu_devid(irq);
+               irq_set_chip_and_handler(irq, &gic_chip,
+                                        handle_percpu_devid_irq);
+               set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+       }
+       /* SPIs */
+       if (hw >= 32 && hw < gic_data.irq_nr) {
+               irq_set_chip_and_handler(irq, &gic_chip,
+                                        handle_fasteoi_irq);
+               set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+       }
+       irq_set_chip_data(irq, d->host_data);
+       return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+                               struct device_node *controller,
+                               const u32 *intspec, unsigned int intsize,
+                               unsigned long *out_hwirq, unsigned int *out_type)
+{
+       if (d->of_node != controller)
+               return -EINVAL;
+       if (intsize < 3)
+               return -EINVAL;
+
+       switch(intspec[0]) {
+       case 0:                 /* SPI */
+               *out_hwirq = intspec[1] + 32;
+               break;
+       case 1:                 /* PPI */
+               *out_hwirq = intspec[1] + 16;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+       return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+       .map = gic_irq_domain_map,
+       .xlate = gic_irq_domain_xlate,
+};
+
+static int __init gic_of_init(struct device_node *node, struct device_node *parent)
+{
+       void __iomem *dist_base;
+       void __iomem **redist_base;
+       u64 redist_stride;
+       u32 redist_regions;
+       u32 reg;
+       int gic_irqs;
+       int err;
+       int i;
+
+       dist_base = of_iomap(node, 0);
+       if (!dist_base) {
+               pr_err("%s: unable to map gic dist registers\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+
+       reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+       if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
+               pr_err("%s: no distributor detected, giving up\n",
+                       node->full_name);
+               err = -ENODEV;
+               goto out_unmap_dist;
+       }
+
+       if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
+               redist_regions = 1;
+
+       redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
+       if (!redist_base) {
+               err = -ENOMEM;
+               goto out_unmap_dist;
+       }
+
+       for (i = 0; i < redist_regions; i++) {
+               redist_base[i] = of_iomap(node, 1 + i);
+               if (!redist_base[i]) {
+                       pr_err("%s: couldn't map region %d\n",
+                              node->full_name, i);
+                       err = -ENODEV;
+                       goto out_unmap_rdist;
+               }
+       }
+
+       if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
+               redist_stride = 0;
+
+       gic_data.dist_base = dist_base;
+       gic_data.redist_base = redist_base;
+       gic_data.redist_regions = redist_regions;
+       gic_data.redist_stride = redist_stride;
+
+       /*
+        * Find out how many interrupts are supported.
+        * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+        */
+       gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
+       gic_irqs = (gic_irqs + 1) * 32;
+       if (gic_irqs > 1020)
+               gic_irqs = 1020;
+       gic_data.irq_nr = gic_irqs;
+
+       gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
+                                             &gic_data);
+       gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
+
+       if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
+               err = -ENOMEM;
+               goto out_free;
+       }
+
+       set_handle_irq(gic_handle_irq);
+
+       gic_smp_init();
+       gic_dist_init();
+       gic_cpu_init();
+
+       return 0;
+
+out_free:
+       if (gic_data.domain)
+               irq_domain_remove(gic_data.domain);
+       free_percpu(gic_data.rdist);
+out_unmap_rdist:
+       for (i = 0; i < redist_regions; i++)
+               if (redist_base[i])
+                       iounmap(redist_base[i]);
+       kfree(redist_base);
+out_unmap_dist:
+       iounmap(dist_base);
+       return err;
+}
+
+IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
index 8cb21def5508e7de41f41788b607051624e46b27..ce24a7e86349bf3549abcc133ad276c27c67f69c 100644 (file)
@@ -1,6 +1,4 @@
 /*
- *  linux/arch/arm/common/gic.c
- *
  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify
 #include <linux/irqchip/arm-gic.h>
 #include <trace/events/arm-ipi.h>
 
+#include <asm/cputype.h>
 #include <asm/irq.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
 
+#include "irq-gic-common.h"
 #include "irqchip.h"
 
 union gic_base {
        void __iomem *common_base;
-       void __percpu __iomem **percpu_base;
+       void __percpu * __iomem *percpu_base;
 };
 
 struct gic_chip_data {
@@ -189,12 +189,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
 {
        void __iomem *base = gic_dist_base(d);
        unsigned int gicirq = gic_irq(d);
-       u32 enablemask = 1 << (gicirq % 32);
-       u32 enableoff = (gicirq / 32) * 4;
-       u32 confmask = 0x2 << ((gicirq % 16) * 2);
-       u32 confoff = (gicirq / 16) * 4;
-       bool enabled = false;
-       u32 val;
 
        /* Interrupt configuration for SGIs can't be changed */
        if (gicirq < 16)
@@ -208,25 +202,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
        if (gic_arch_extn.irq_set_type)
                gic_arch_extn.irq_set_type(d, type);
 
-       val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
-       if (type == IRQ_TYPE_LEVEL_HIGH)
-               val &= ~confmask;
-       else if (type == IRQ_TYPE_EDGE_RISING)
-               val |= confmask;
-
-       /*
-        * As recommended by the spec, disable the interrupt before changing
-        * the configuration
-        */
-       if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
-               writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
-               enabled = true;
-       }
-
-       writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
-
-       if (enabled)
-               writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+       gic_configure_irq(gicirq, type, base, NULL);
 
        raw_spin_unlock(&irq_controller_lock);
 
@@ -311,7 +287,7 @@ static int gic_set_wake(struct irq_data *d, unsigned int on)
 #define gic_set_wake   NULL
 #endif
 
-static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
 {
        u32 irqstat, irqnr;
        struct gic_chip_data *gic = &gic_data[0];
@@ -319,7 +295,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
 
        do {
                irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
-               irqnr = irqstat & ~0x1c00;
+               irqnr = irqstat & GICC_IAR_INT_ID_MASK;
 
                if (likely(irqnr > 15 && irqnr < 1021)) {
                        irqnr = irq_find_mapping(gic->domain, irqnr);
@@ -414,12 +390,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
 
        writel_relaxed(0, base + GIC_DIST_CTRL);
 
-       /*
-        * Set all global interrupts to be level triggered, active low.
-        */
-       for (i = 32; i < gic_irqs; i += 16)
-               writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
-
        /*
         * Set all global interrupts to this CPU only.
         */
@@ -429,25 +399,14 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
        for (i = 32; i < gic_irqs; i += 4)
                writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
 
-       /*
-        * Set priority on all global interrupts.
-        */
-       for (i = 32; i < gic_irqs; i += 4)
-               writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
-
-       /*
-        * Disable all interrupts.  Leave the PPI and SGIs alone
-        * as these enables are banked registers.
-        */
-       for (i = 32; i < gic_irqs; i += 32)
-               writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
+       gic_dist_config(base, gic_irqs, NULL);
 
 #ifdef CONFIG_FIQ_DEBUGGER
        // set all the interrupt to non-secure state
        for (i = 0; i < gic_irqs; i += 32) {
                writel_relaxed(0xffffffff, base + GIC_DIST_IGROUP + i * 4 / 32);
        }
-       dsb();
+       dsb(sy);
        writel_relaxed(3, base + GIC_DIST_CTRL);
 #else
        writel_relaxed(1, base + GIC_DIST_CTRL);
@@ -476,18 +435,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
                if (i != cpu)
                        gic_cpu_map[i] &= ~cpu_mask;
 
-       /*
-        * Deal with the banked PPI and SGI interrupts - disable all
-        * PPI interrupts, ensure all SGI interrupts are enabled.
-        */
-       writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
-       writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
-
-       /*
-        * Set priority on PPI and SGI interrupts
-        */
-       for (i = 0; i < 32; i += 4)
-               writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+       gic_cpu_config(dist_base, NULL);
 
        writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
 #ifdef CONFIG_FIQ_DEBUGGER
@@ -701,7 +649,7 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
 #endif
 
 #ifdef CONFIG_SMP
-void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 {
        int cpu;
        unsigned long flags, map = 0;
@@ -716,9 +664,9 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 
        /*
         * Ensure that stores to Normal memory are visible to the
-        * other CPUs before issuing the IPI.
+        * other CPUs before they observe us issuing the IPI.
         */
-       dsb();
+       dmb(ishst);
 
        /* this always happens on GIC0 */
 #ifdef CONFIG_FIQ_DEBUGGER
@@ -764,12 +712,12 @@ int gic_get_cpu_id(unsigned int cpu)
                return -1;
        cpu_bit = gic_cpu_map[cpu];
        if (cpu_bit & (cpu_bit - 1))
-              return -1;
+               return -1;
        return __ffs(cpu_bit);
 }
 
 /*
- * gic_migrate_target - migrate IRQs to another PU interface
+ * gic_migrate_target - migrate IRQs to another CPU interface
  *
  * @new_cpu_id: the CPU target ID to migrate IRQs to
  *
@@ -780,10 +728,10 @@ int gic_get_cpu_id(unsigned int cpu)
  */
 void gic_migrate_target(unsigned int new_cpu_id)
 {
-       unsigned int old_cpu_id, gic_irqs, gic_nr = 0;
+       unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
        void __iomem *dist_base;
        int i, ror_val, cpu = smp_processor_id();
-       u32 val, old_mask, active_mask;
+       u32 val, cur_target_mask, active_mask;
 
        if (gic_nr >= MAX_GIC_NR)
                BUG();
@@ -793,21 +741,27 @@ void gic_migrate_target(unsigned int new_cpu_id)
                return;
        gic_irqs = gic_data[gic_nr].gic_irqs;
 
-       old_cpu_id = __ffs(gic_cpu_map[cpu]);
-       old_mask = 0x01010101 << old_cpu_id;
-       ror_val = (old_cpu_id - new_cpu_id) & 31;
+       cur_cpu_id = __ffs(gic_cpu_map[cpu]);
+       cur_target_mask = 0x01010101 << cur_cpu_id;
+       ror_val = (cur_cpu_id - new_cpu_id) & 31;
 
        raw_spin_lock(&irq_controller_lock);
 
+       /* Update the target interface for this logical CPU */
        gic_cpu_map[cpu] = 1 << new_cpu_id;
 
+       /*
+        * Find all the peripheral interrupts targetting the current
+        * CPU interface and migrate them to the new CPU interface.
+        * We skip DIST_TARGET 0 to 7 as they are read-only.
+        */
        for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
                val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
-               active_mask = val & old_mask;
+               active_mask = val & cur_target_mask;
                if (active_mask) {
                        val &= ~active_mask;
                        val |= ror32(active_mask, ror_val);
-                       writel_relaxed(val, dist_base + GIC_DIST_TARGET + i * 4);
+                       writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
                }
        }
 
@@ -815,7 +769,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
 
        /*
         * Now let's migrate and clear any potential SGIs that might be
-        * pending for us (old_cpu_id).  Since GIC_DIST_SGI_PENDING_SET
+        * pending for us (cur_cpu_id).  Since GIC_DIST_SGI_PENDING_SET
         * is a banked register, we can only forward the SGI using
         * GIC_DIST_SOFTINT.  The original SGI source is lost but Linux
         * doesn't use that information anyway.
@@ -863,7 +817,7 @@ void __init gic_init_physaddr(struct device_node *node)
 }
 
 #else
-#define gic_init_physaddr(node)  do { } while(0)
+#define gic_init_physaddr(node)  do { } while (0)
 #endif
 
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
@@ -878,16 +832,25 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
                irq_set_chip_and_handler(irq, &gic_chip,
                                         handle_fasteoi_irq);
                set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+               gic_routable_irq_domain_ops->map(d, irq, hw);
        }
        irq_set_chip_data(irq, d->host_data);
        return 0;
 }
 
+static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+       gic_routable_irq_domain_ops->unmap(d, irq);
+}
+
 static int gic_irq_domain_xlate(struct irq_domain *d,
                                struct device_node *controller,
                                const u32 *intspec, unsigned int intsize,
                                unsigned long *out_hwirq, unsigned int *out_type)
 {
+       unsigned long ret = 0;
+
        if (d->of_node != controller)
                return -EINVAL;
        if (intsize < 3)
@@ -897,11 +860,20 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
        *out_hwirq = intspec[1] + 16;
 
        /* For SPIs, we need to add 16 more to get the GIC irq ID number */
-       if (!intspec[0])
-               *out_hwirq += 16;
+       if (!intspec[0]) {
+               ret = gic_routable_irq_domain_ops->xlate(d, controller,
+                                                        intspec,
+                                                        intsize,
+                                                        out_hwirq,
+                                                        out_type);
+
+               if (IS_ERR_VALUE(ret))
+                       return ret;
+       }
 
        *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
-       return 0;
+
+       return ret;
 }
 
 #ifdef CONFIG_SMP
@@ -930,11 +902,43 @@ static struct notifier_block __cpuinitdata gic_cpu_notifier = {
 };
 #endif
 
-const struct irq_domain_ops gic_irq_domain_ops = {
+static const struct irq_domain_ops gic_irq_domain_ops = {
        .map = gic_irq_domain_map,
+       .unmap = gic_irq_domain_unmap,
        .xlate = gic_irq_domain_xlate,
 };
 
+/* Default functions for routable irq domain */
+static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq,
+                             irq_hw_number_t hw)
+{
+       return 0;
+}
+
+static void gic_routable_irq_domain_unmap(struct irq_domain *d,
+                                         unsigned int irq)
+{
+}
+
+static int gic_routable_irq_domain_xlate(struct irq_domain *d,
+                               struct device_node *controller,
+                               const u32 *intspec, unsigned int intsize,
+                               unsigned long *out_hwirq,
+                               unsigned int *out_type)
+{
+       *out_hwirq += 16;
+       return 0;
+}
+
+const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
+       .map = gic_routable_irq_domain_map,
+       .unmap = gic_routable_irq_domain_unmap,
+       .xlate = gic_routable_irq_domain_xlate,
+};
+
+const struct irq_domain_ops *gic_routable_irq_domain_ops =
+                                       &gic_default_routable_irq_domain_ops;
+
 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
                           void __iomem *dist_base, void __iomem *cpu_base,
                           u32 percpu_offset, struct device_node *node)
@@ -942,6 +946,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
        irq_hw_number_t hwirq_base;
        struct gic_chip_data *gic;
        int gic_irqs, irq_base, i;
+       int nr_routable_irqs;
 
        BUG_ON(gic_nr >= MAX_GIC_NR);
 
@@ -960,7 +965,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
                }
 
                for_each_possible_cpu(cpu) {
-                       unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+                       u32 mpidr = cpu_logical_map(cpu);
+                       u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+                       unsigned long offset = percpu_offset * core_id;
                        *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
                        *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
                }
@@ -1007,23 +1014,35 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
        gic->gic_irqs = gic_irqs;
 
        gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
-       irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
-       if (IS_ERR_VALUE(irq_base)) {
-               WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
-                    irq_start);
-               irq_base = irq_start;
+
+       if (of_property_read_u32(node, "arm,routable-irqs",
+                                &nr_routable_irqs)) {
+               irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
+                                          numa_node_id());
+               if (IS_ERR_VALUE(irq_base)) {
+                       WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+                            irq_start);
+                       irq_base = irq_start;
+               }
+
+               gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+                                       hwirq_base, &gic_irq_domain_ops, gic);
+       } else {
+               gic->domain = irq_domain_add_linear(node, nr_routable_irqs,
+                                                   &gic_irq_domain_ops,
+                                                   gic);
        }
-       gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
-                                   hwirq_base, &gic_irq_domain_ops, gic);
+
        if (WARN_ON(!gic->domain))
                return;
 
+       if (gic_nr == 0) {
 #ifdef CONFIG_SMP
-       set_smp_cross_call(gic_raise_softirq);
-       register_cpu_notifier(&gic_cpu_notifier);
+               set_smp_cross_call(gic_raise_softirq);
+               register_cpu_notifier(&gic_cpu_notifier);
 #endif
-
-       set_handle_irq(gic_handle_irq);
+               set_handle_irq(gic_handle_irq);
+       }
 
        gic_chip.flags |= gic_arch_extn.flags;
        gic_dist_init(gic);
@@ -1034,7 +1053,8 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
 #ifdef CONFIG_OF
 static int gic_cnt __initdata;
 
-int __init gic_of_init(struct device_node *node, struct device_node *parent)
+static int __init
+gic_of_init(struct device_node *node, struct device_node *parent)
 {
        void __iomem *cpu_base;
        void __iomem *dist_base;
@@ -1064,8 +1084,10 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
        gic_cnt++;
        return 0;
 }
+IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
 
index f0a3347b644190b4ca76b199daf9903f32c2578a..516923926335285ce5628c205291eb22859952f3 100644 (file)
@@ -700,7 +700,7 @@ void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start)
         * interrupts are enabled.  We always leave interrupts enabled while
         * running the Guest.
         */
-       regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
+       regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
 
        /*
         * The "Extended Instruction Pointer" register says where the Guest is
index 607dd91e87a16b9a851ce103ece8a30a30c3b2ba..afcb430508eceac7887ee436ec19c15fcfad89cd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Mailbox: Common code for Mailbox controllers and users
  *
- * Copyright (C) 2014 Linaro Ltd.
+ * Copyright (C) 2013-2014 Linaro Ltd.
  * Author: Jassi Brar <jassisinghbrar@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/bitops.h>
 #include <linux/mailbox_client.h>
 #include <linux/mailbox_controller.h>
 
-#define TXDONE_BY_IRQ  (1 << 0) /* controller has remote RTR irq */
-#define TXDONE_BY_POLL (1 << 1) /* controller can read status of last TX */
-#define TXDONE_BY_ACK  (1 << 2) /* S/W ACK recevied by Client ticks the TX */
+#define TXDONE_BY_IRQ  BIT(0) /* controller has remote RTR irq */
+#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
+#define TXDONE_BY_ACK  BIT(2) /* S/W ACK recevied by Client ticks the TX */
 
 static LIST_HEAD(mbox_cons);
 static DEFINE_MUTEX(con_mutex);
 
-static int _add_to_rbuf(struct mbox_chan *chan, void *mssg)
+static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
 {
        int idx;
        unsigned long flags;
@@ -37,7 +38,7 @@ static int _add_to_rbuf(struct mbox_chan *chan, void *mssg)
        /* See if there is any space left */
        if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
                spin_unlock_irqrestore(&chan->lock, flags);
-               return -ENOMEM;
+               return -ENOBUFS;
        }
 
        idx = chan->msg_free;
@@ -54,7 +55,7 @@ static int _add_to_rbuf(struct mbox_chan *chan, void *mssg)
        return idx;
 }
 
-static void _msg_submit(struct mbox_chan *chan)
+static void msg_submit(struct mbox_chan *chan)
 {
        unsigned count, idx;
        unsigned long flags;
@@ -63,10 +64,8 @@ static void _msg_submit(struct mbox_chan *chan)
 
        spin_lock_irqsave(&chan->lock, flags);
 
-       if (!chan->msg_count || chan->active_req) {
-               spin_unlock_irqrestore(&chan->lock, flags);
-               return;
-       }
+       if (!chan->msg_count || chan->active_req)
+               goto exit;
 
        count = chan->msg_count;
        idx = chan->msg_free;
@@ -83,7 +82,7 @@ static void _msg_submit(struct mbox_chan *chan)
                chan->active_req = data;
                chan->msg_count--;
        }
-
+exit:
        spin_unlock_irqrestore(&chan->lock, flags);
 }
 
@@ -98,13 +97,14 @@ static void tx_tick(struct mbox_chan *chan, int r)
        spin_unlock_irqrestore(&chan->lock, flags);
 
        /* Submit next message */
-       _msg_submit(chan);
+       msg_submit(chan);
 
        /* Notify the client */
+       if (mssg && chan->cl->tx_done)
+               chan->cl->tx_done(chan->cl, mssg, r);
+
        if (chan->cl->tx_block)
                complete(&chan->tx_complete);
-       else if (mssg && chan->cl->tx_done)
-               chan->cl->tx_done(chan->cl, mssg, r);
 }
 
 static void poll_txdone(unsigned long data)
@@ -125,15 +125,15 @@ static void poll_txdone(unsigned long data)
        }
 
        if (resched)
-               mod_timer(&mbox->poll,
-                       jiffies + msecs_to_jiffies(mbox->period));
+               mod_timer(&mbox->poll, jiffies +
+                               msecs_to_jiffies(mbox->txpoll_period));
 }
 
 /**
  * mbox_chan_received_data - A way for controller driver to push data
  *                             received from remote to the upper layer.
  * @chan: Pointer to the mailbox channel on which RX happened.
- * @data: Client specific message typecasted as void *
+ * @mssg: Client specific message typecasted as void *
  *
  * After startup and before shutdown any data received on the chan
  * is passed on to the API via atomic mbox_chan_received_data().
@@ -160,7 +160,8 @@ EXPORT_SYMBOL_GPL(mbox_chan_received_data);
 void mbox_chan_txdone(struct mbox_chan *chan, int r)
 {
        if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
-               pr_err("Controller can't run the TX ticker\n");
+               dev_err(chan->mbox->dev,
+                      "Controller can't run the TX ticker\n");
                return;
        }
 
@@ -180,7 +181,7 @@ EXPORT_SYMBOL_GPL(mbox_chan_txdone);
 void mbox_client_txdone(struct mbox_chan *chan, int r)
 {
        if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
-               pr_err("Client can't run the TX ticker\n");
+               dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
                return;
        }
 
@@ -227,8 +228,6 @@ EXPORT_SYMBOL_GPL(mbox_client_peek_data);
  * is not queued, a negative token is returned. Upon failure or successful
  * TX, the API calls 'tx_done' from atomic context, from which the client
  * could submit yet another request.
- *  In blocking mode, 'tx_done' is not called, effectively making the
- * queue length 1.
  * The pointer to message should be preserved until it is sent
  * over the chan, i.e, tx_done() is made.
  * This function could be called from atomic context as it simply
@@ -245,15 +244,13 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
        if (!chan || !chan->cl)
                return -EINVAL;
 
-       t = _add_to_rbuf(chan, mssg);
+       t = add_to_rbuf(chan, mssg);
        if (t < 0) {
-               pr_err("Try increasing MBOX_TX_QUEUE_LEN\n");
+               dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
                return t;
        }
 
-       _msg_submit(chan);
-
-       init_completion(&chan->tx_complete);
+       msg_submit(chan);
 
        if (chan->txdone_method == TXDONE_BY_POLL)
                poll_txdone((unsigned long)chan->mbox);
@@ -262,7 +259,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
                unsigned long wait;
                int ret;
 
-               if (!chan->cl->tx_tout) /* wait for ever */
+               if (!chan->cl->tx_tout) /* wait forever */
                        wait = msecs_to_jiffies(3600000);
                else
                        wait = msecs_to_jiffies(chan->cl->tx_tout);
@@ -281,6 +278,7 @@ EXPORT_SYMBOL_GPL(mbox_send_message);
 /**
  * mbox_request_channel - Request a mailbox channel.
  * @cl: Identity of the client requesting the channel.
+ * @index: Index of mailbox specifier in 'mboxes' property.
  *
  * The Client specifies its requirements and capabilities while asking for
  * a mailbox channel. It can't be called from atomic context.
@@ -294,64 +292,42 @@ EXPORT_SYMBOL_GPL(mbox_send_message);
  * Return: Pointer to the channel assigned to the client if successful.
  *             ERR_PTR for request failure.
  */
-struct mbox_chan *mbox_request_channel(struct mbox_client *cl)
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
 {
        struct device *dev = cl->dev;
        struct mbox_controller *mbox;
        struct of_phandle_args spec;
        struct mbox_chan *chan;
        unsigned long flags;
-       int count, i, ret;
+       int ret;
 
        if (!dev || !dev->of_node) {
-               pr_err("%s: No owner device node\n", __func__);
-               return ERR_PTR(-ENODEV);
-       }
-
-       count = of_property_count_strings(dev->of_node, "mbox-names");
-       if (count < 0) {
-               pr_err("%s: mbox-names property of node '%s' missing\n",
-                       __func__, dev->of_node->full_name);
+               pr_debug("%s: No owner device node\n", __func__);
                return ERR_PTR(-ENODEV);
        }
 
        mutex_lock(&con_mutex);
 
-       ret = -ENODEV;
-       for (i = 0; i < count; i++) {
-               const char *s;
-
-               if (of_property_read_string_index(dev->of_node,
-                                               "mbox-names", i, &s))
-                       continue;
-
-               if (strcmp(cl->chan_name, s))
-                       continue;
-
-               if (of_parse_phandle_with_args(dev->of_node,
-                                        "mbox", "#mbox-cells", i, &spec))
-                       continue;
-
-               chan = NULL;
-               list_for_each_entry(mbox, &mbox_cons, node)
-                       if (mbox->dev->of_node == spec.np) {
-                               chan = mbox->of_xlate(mbox, &spec);
-                               break;
-                       }
-
-               of_node_put(spec.np);
-
-               if (!chan)
-                       continue;
+       if (of_parse_phandle_with_args(dev->of_node, "mboxes",
+                                      "#mbox-cells", index, &spec)) {
+               dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+               mutex_unlock(&con_mutex);
+               return ERR_PTR(-ENODEV);
+       }
 
-               ret = -EBUSY;
-               if (!chan->cl && try_module_get(mbox->dev->driver->owner))
+       chan = NULL;
+       list_for_each_entry(mbox, &mbox_cons, node)
+               if (mbox->dev->of_node == spec.np) {
+                       chan = mbox->of_xlate(mbox, &spec);
                        break;
-       }
+               }
 
-       if (i == count) {
+       of_node_put(spec.np);
+
+       if (!chan || chan->cl || !try_module_get(mbox->dev->driver->owner)) {
+               dev_dbg(dev, "%s: mailbox not free\n", __func__);
                mutex_unlock(&con_mutex);
-               return ERR_PTR(ret);
+               return ERR_PTR(-EBUSY);
        }
 
        spin_lock_irqsave(&chan->lock, flags);
@@ -361,14 +337,14 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl)
        chan->cl = cl;
        init_completion(&chan->tx_complete);
 
-       if (chan->txdone_method == TXDONE_BY_POLL
-                       && cl->knows_txdone)
+       if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
                chan->txdone_method |= TXDONE_BY_ACK;
+
        spin_unlock_irqrestore(&chan->lock, flags);
 
        ret = chan->mbox->ops->startup(chan);
        if (ret) {
-               pr_err("Unable to startup the chan (%d)\n", ret);
+               dev_err(dev, "Unable to startup the chan (%d)\n", ret);
                mbox_free_channel(chan);
                chan = ERR_PTR(ret);
        }
@@ -406,7 +382,7 @@ EXPORT_SYMBOL_GPL(mbox_free_channel);
 
 static struct mbox_chan *
 of_mbox_index_xlate(struct mbox_controller *mbox,
-                               const struct of_phandle_args *sp)
+                   const struct of_phandle_args *sp)
 {
        int ind = sp->args[0];
 
@@ -420,7 +396,7 @@ of_mbox_index_xlate(struct mbox_controller *mbox,
  * mbox_controller_register - Register the mailbox controller
  * @mbox:      Pointer to the mailbox controller.
  *
- * The controller driver registers its communication chans
+ * The controller driver registers its communication channels
  */
 int mbox_controller_register(struct mbox_controller *mbox)
 {
@@ -445,6 +421,7 @@ int mbox_controller_register(struct mbox_controller *mbox)
 
        for (i = 0; i < mbox->num_chans; i++) {
                struct mbox_chan *chan = &mbox->chans[i];
+
                chan->cl = NULL;
                chan->mbox = mbox;
                chan->txdone_method = txdone;
@@ -463,7 +440,7 @@ int mbox_controller_register(struct mbox_controller *mbox)
 EXPORT_SYMBOL_GPL(mbox_controller_register);
 
 /**
- * mbox_controller_unregister - UnRegister the mailbox controller
+ * mbox_controller_unregister - Unregister the mailbox controller
  * @mbox:      Pointer to the mailbox controller.
  */
 void mbox_controller_unregister(struct mbox_controller *mbox)
index a6e985fcceb854ad039d1be87b769169d478cf6a..c9b4ca9e0696312d174b0122d62fe354fb0ab0a9 100644 (file)
@@ -462,6 +462,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
        c->n_buffers[dirty]++;
        b->list_mode = dirty;
        list_move(&b->lru_list, &c->lru[dirty]);
+       b->last_accessed = jiffies;
 }
 
 /*----------------------------------------------------------------
index a33e07f4222e716402d626322abc636866d889fd..de737ba1d3519126da16f464dba34dd6fa4b9ce6 100644 (file)
@@ -384,6 +384,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
 
        disk_super = dm_block_data(sblock);
 
+       /* Verify the data block size hasn't changed */
+       if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
+               DMERR("changing the data block size (from %u to %llu) is not supported",
+                     le32_to_cpu(disk_super->data_block_size),
+                     (unsigned long long)cmd->data_block_size);
+               r = -EINVAL;
+               goto bad;
+       }
+
        r = __check_incompat_features(disk_super, cmd);
        if (r < 0)
                goto bad;
index 5177ba54559bf2a65ebe0302a1f6fbe00931f0cf..7409d79729eebbb37b11d28c81e1a7fc8fe3fd95 100644 (file)
@@ -1506,6 +1506,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        unsigned int key_size, opt_params;
        unsigned long long tmpll;
        int ret;
+       size_t iv_size_padding;
        struct dm_arg_set as;
        const char *opt_string;
        char dummy;
@@ -1542,12 +1543,23 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        cc->dmreq_start = sizeof(struct ablkcipher_request);
        cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
-       cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
-       cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
-                          ~(crypto_tfm_ctx_alignment() - 1);
+       cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
+
+       if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
+               /* Allocate the padding exactly */
+               iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
+                               & crypto_ablkcipher_alignmask(any_tfm(cc));
+       } else {
+               /*
+                * If the cipher requires greater alignment than kmalloc
+                * alignment, we don't know the exact position of the
+                * initialization vector. We must assume worst case.
+                */
+               iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
+       }
 
        cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
-                       sizeof(struct dm_crypt_request) + cc->iv_size);
+                       sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
        if (!cc->req_pool) {
                ti->error = "Cannot allocate crypt request mempool";
                goto bad;
index 08d9a207259afdd6fc5ef92afeb403357c1e8a1c..c69d0b787746c26676645851da828301f355bf53 100644 (file)
@@ -272,7 +272,7 @@ int dm_ulog_tfr_init(void)
 
        r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
        if (r) {
-               cn_del_callback(&ulog_cn_id);
+               kfree(prealloced_cn_msg);
                return r;
        }
 
index 2dea49c4279e23db60239b7c20b8f08040599ae6..84cddccc024978f50790395c3c04c9ba1d8343f2 100644 (file)
@@ -785,8 +785,7 @@ struct dm_raid_superblock {
        __le32 layout;
        __le32 stripe_sectors;
 
-       __u8 pad[452];          /* Round struct to 512 bytes. */
-                               /* Always set to 0 when writing. */
+       /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
 } __packed;
 
 static int read_disk_sb(struct md_rdev *rdev, int size)
@@ -823,7 +822,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
                    test_bit(Faulty, &(rs->dev[i].rdev.flags)))
                        failed_devices |= (1ULL << i);
 
-       memset(sb, 0, sizeof(*sb));
+       memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
 
        sb->magic = cpu_to_le32(DM_RAID_MAGIC);
        sb->features = cpu_to_le32(0);  /* No features yet */
@@ -858,7 +857,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
        uint64_t events_sb, events_refsb;
 
        rdev->sb_start = 0;
-       rdev->sb_size = sizeof(*sb);
+       rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
+       if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
+               DMERR("superblock size of a logical block is no longer valid");
+               return -EINVAL;
+       }
 
        ret = read_disk_sb(rdev, rdev->sb_size);
        if (ret)
index 5f49d704f275d6b6da43762e82fd1cc5e0167a5a..3b1503dc1f13e4eeb309c4bf1c6437f4684be325 100644 (file)
@@ -591,6 +591,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
 
        disk_super = dm_block_data(sblock);
 
+       /* Verify the data block size hasn't changed */
+       if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
+               DMERR("changing the data block size (from %u to %llu) is not supported",
+                     le32_to_cpu(disk_super->data_block_size),
+                     (unsigned long long)pmd->data_block_size);
+               r = -EINVAL;
+               goto bad_unlock_sblock;
+       }
+
        r = __check_incompat_features(disk_super, pmd);
        if (r < 0)
                goto bad_unlock_sblock;
index 37d367bb9aa8976653d5bbc80b29d0dfbf037372..bf2b80d5c4707a64210b5e57deb785069dc7d921 100644 (file)
@@ -42,6 +42,12 @@ struct btree_node {
 } __packed;
 
 
+/*
+ * Locks a block using the btree node validator.
+ */
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+                struct dm_block **result);
+
 void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
                  struct dm_btree_value_type *vt);
 
index cf9fd676ae444ad29f8fb6f6ef58d85706438092..1b5e13ec7f96a670ed7a9b5b472a5d2ee95a7dff 100644 (file)
@@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = {
 
 /*----------------------------------------------------------------*/
 
-static int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
                 struct dm_block **result)
 {
        return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
index 35865425e4b4443e925014fd918655ef51c07d57..0a7592e88811e8aa45d9b89225bc98a459e0ee64 100644 (file)
@@ -812,22 +812,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
  * FIXME: We shouldn't use a recursive algorithm when we have limited stack
  * space.  Also this only works for single level trees.
  */
-static int walk_node(struct ro_spine *s, dm_block_t block,
+static int walk_node(struct dm_btree_info *info, dm_block_t block,
                     int (*fn)(void *context, uint64_t *keys, void *leaf),
                     void *context)
 {
        int r;
        unsigned i, nr;
+       struct dm_block *node;
        struct btree_node *n;
        uint64_t keys;
 
-       r = ro_step(s, block);
-       n = ro_node(s);
+       r = bn_read_lock(info, block, &node);
+       if (r)
+               return r;
+
+       n = dm_block_data(node);
 
        nr = le32_to_cpu(n->header.nr_entries);
        for (i = 0; i < nr; i++) {
                if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
-                       r = walk_node(s, value64(n, i), fn, context);
+                       r = walk_node(info, value64(n, i), fn, context);
                        if (r)
                                goto out;
                } else {
@@ -839,7 +843,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block,
        }
 
 out:
-       ro_pop(s);
+       dm_tm_unlock(info->tm, node);
        return r;
 }
 
@@ -847,15 +851,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
                  int (*fn)(void *context, uint64_t *keys, void *leaf),
                  void *context)
 {
-       int r;
-       struct ro_spine spine;
-
        BUG_ON(info->levels > 1);
-
-       init_ro_spine(&spine, info);
-       r = walk_node(&spine, root, fn, context);
-       exit_ro_spine(&spine);
-
-       return r;
+       return walk_node(info, root, fn, context);
 }
 EXPORT_SYMBOL_GPL(dm_btree_walk);
index 75771b2077c00f28950169956f9ac4baf21cbe4d..e885dbf08c40d7967d9c588e46fd259ebed1a236 100644 (file)
@@ -1406,12 +1406,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
                mddev->degraded++;
                set_bit(Faulty, &rdev->flags);
                spin_unlock_irqrestore(&conf->device_lock, flags);
-               /*
-                * if recovery is running, make sure it aborts.
-                */
-               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        } else
                set_bit(Faulty, &rdev->flags);
+       /*
+        * if recovery is running, make sure it aborts.
+        */
+       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
        printk(KERN_ALERT
               "md/raid1:%s: Disk failure on %s, disabling device.\n"
@@ -2051,7 +2051,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
                        d--;
                        rdev = conf->mirrors[d].rdev;
                        if (rdev &&
-                           test_bit(In_sync, &rdev->flags))
+                           !test_bit(Faulty, &rdev->flags))
                                r1_sync_page_io(rdev, sect, s,
                                                conf->tmppage, WRITE);
                }
@@ -2063,7 +2063,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
                        d--;
                        rdev = conf->mirrors[d].rdev;
                        if (rdev &&
-                           test_bit(In_sync, &rdev->flags)) {
+                           !test_bit(Faulty, &rdev->flags)) {
                                if (r1_sync_page_io(rdev, sect, s,
                                                    conf->tmppage, READ)) {
                                        atomic_add(s, &rdev->corrected_errors);
index d2f8cd332b4a628dba9b18c3469c880deacb76d4..a1ea2a75391240548b5283c74dd8314745d9649d 100644 (file)
@@ -1681,11 +1681,11 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
                spin_lock_irqsave(&conf->device_lock, flags);
                mddev->degraded++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
-               /*
-                * if recovery is running, make sure it aborts.
-                */
-               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        }
+       /*
+        * If recovery is running, make sure it aborts.
+        */
+       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        set_bit(Blocked, &rdev->flags);
        set_bit(Faulty, &rdev->flags);
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -2948,6 +2948,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                 */
                if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
                        end_reshape(conf);
+                       close_sync(conf);
                        return 0;
                }
 
@@ -4398,7 +4399,7 @@ read_more:
        read_bio->bi_private = r10_bio;
        read_bio->bi_end_io = end_sync_read;
        read_bio->bi_rw = READ;
-       read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
+       read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
        read_bio->bi_flags |= 1 << BIO_UPTODATE;
        read_bio->bi_vcnt = 0;
        read_bio->bi_size = 0;
index 5e3c25d4562c2e497d8b50d6c21d9dfd75e2bdb4..2332b5ced0dd0e9069237779e5edee246352c9aa 100644 (file)
 #include "raid0.h"
 #include "bitmap.h"
 
+static bool devices_handle_discard_safely = false;
+module_param(devices_handle_discard_safely, bool, 0644);
+MODULE_PARM_DESC(devices_handle_discard_safely,
+                "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
 /*
  * Stripe cache
  */
@@ -3561,6 +3565,8 @@ static void handle_stripe(struct stripe_head *sh)
                                set_bit(R5_Wantwrite, &dev->flags);
                                if (prexor)
                                        continue;
+                               if (s.failed > 1)
+                                       continue;
                                if (!test_bit(R5_Insync, &dev->flags) ||
                                    ((i == sh->pd_idx || i == sh->qd_idx)  &&
                                     s.failed == 0))
@@ -5609,7 +5615,7 @@ static int run(struct mddev *mddev)
                mddev->queue->limits.discard_granularity = stripe;
                /*
                 * unaligned part of discard request will be ignored, so can't
-                * guarantee discard_zerors_data
+                * guarantee discard_zeroes_data
                 */
                mddev->queue->limits.discard_zeroes_data = 0;
 
@@ -5634,6 +5640,18 @@ static int run(struct mddev *mddev)
                            !bdev_get_queue(rdev->bdev)->
                                                limits.discard_zeroes_data)
                                discard_supported = false;
+                       /* Unfortunately, discard_zeroes_data is not currently
+                        * a guarantee - just a hint.  So we only allow DISCARD
+                        * if the sysadmin has confirmed that only safe devices
+                        * are in use by setting a module parameter.
+                        */
+                       if (!devices_handle_discard_safely) {
+                               if (discard_supported) {
+                                       pr_info("md/raid456: discard support disabled due to uncertainty.\n");
+                                       pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
+                               }
+                               discard_supported = false;
+                       }
                }
 
                if (discard_supported &&
index 1e344b033277c3ccf362b163aa3069f431327346..22e8c2032f6d8280d2e302fe2b21baadd3f127eb 100644 (file)
@@ -864,6 +864,13 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
        memcpy(&state->frontend.ops, &ds3000_ops,
                        sizeof(struct dvb_frontend_ops));
        state->frontend.demodulator_priv = state;
+
+       /*
+        * Some devices like T480 starts with voltage on. Be sure
+        * to turn voltage off during init, as this can otherwise
+        * interfere with Unicable SCR systems.
+        */
+       ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
        return &state->frontend;
 
 error3:
index 36eb27d3fdf1ee5321a9f273a9b229b6bab6c2c4..def7812d7b226dcf98efac417f4c84e30db595d7 100644 (file)
@@ -667,6 +667,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        int ret, i;
        u8 mode, rolloff, pilot, inversion, div;
+       fe_modulation_t modulation;
 
        dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d modulation=%d " \
                "frequency=%d symbol_rate=%d inversion=%d pilot=%d " \
@@ -701,10 +702,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
 
        switch (c->delivery_system) {
        case SYS_DVBS:
+               modulation = QPSK;
                rolloff = 0;
                pilot = 2;
                break;
        case SYS_DVBS2:
+               modulation = c->modulation;
+
                switch (c->rolloff) {
                case ROLLOFF_20:
                        rolloff = 2;
@@ -749,7 +753,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
 
        for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
                if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
-                       c->modulation == TDA10071_MODCOD[i].modulation &&
+                       modulation == TDA10071_MODCOD[i].modulation &&
                        c->fec_inner == TDA10071_MODCOD[i].fec) {
                        mode = TDA10071_MODCOD[i].val;
                        dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
index 28b5121881f5792397ff0a674220caa4b4d7108a..09f4387dbc49dc0f3b1b51126524edec9df16063 100644 (file)
@@ -293,7 +293,7 @@ static int tda7432_s_ctrl(struct v4l2_ctrl *ctrl)
                if (t->mute->val) {
                        lf |= TDA7432_MUTE;
                        lr |= TDA7432_MUTE;
-                       lf |= TDA7432_MUTE;
+                       rf |= TDA7432_MUTE;
                        rr |= TDA7432_MUTE;
                }
                /* Mute & update balance*/
index 79715f9feb0a4215ec26ce5ed114608c565792c7..fdb5840f034ba7915c610e4c8a3bf9d6ccebf914 100644 (file)
@@ -106,8 +106,6 @@ static long media_device_enum_entities(struct media_device *mdev,
        if (ent->name) {
                strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
                u_ent.name[sizeof(u_ent.name) - 1] = '\0';
-       } else {
-               memset(u_ent.name, 0, sizeof(u_ent.name));
        }
        u_ent.type = ent->type;
        u_ent.revision = ent->revision;
index 16e89f026bca39fea89df7076851079be537ca0e..018cb904533026b0e831d10cf877fe42e6df0c34 100644 (file)
@@ -1092,6 +1092,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
                setup.addr = ADDR_UNSET;
                setup.type = cx->options.tuner;
                setup.mode_mask = T_ANALOG_TV;  /* matches TV tuners */
+               setup.config = NULL;
                if (cx->options.radio > 0)
                        setup.mode_mask |= T_RADIO;
                setup.tuner_callback = (setup.type == TUNER_XC2028) ?
index 2018befabb5ab65f8d833f5d7052823c3e9af9ca..e71decbfd0afdf495c28a407930eb6768233d06c 100644 (file)
@@ -93,7 +93,7 @@ struct xc4000_priv {
        struct firmware_description *firm;
        int     firm_size;
        u32     if_khz;
-       u32     freq_hz;
+       u32     freq_hz, freq_offset;
        u32     bandwidth;
        u8      video_standard;
        u8      rf_mode;
@@ -1157,14 +1157,14 @@ static int xc4000_set_params(struct dvb_frontend *fe)
        case SYS_ATSC:
                dprintk(1, "%s() VSB modulation\n", __func__);
                priv->rf_mode = XC_RF_MODE_AIR;
-               priv->freq_hz = c->frequency - 1750000;
+               priv->freq_offset = 1750000;
                priv->video_standard = XC4000_DTV6;
                type = DTV6;
                break;
        case SYS_DVBC_ANNEX_B:
                dprintk(1, "%s() QAM modulation\n", __func__);
                priv->rf_mode = XC_RF_MODE_CABLE;
-               priv->freq_hz = c->frequency - 1750000;
+               priv->freq_offset = 1750000;
                priv->video_standard = XC4000_DTV6;
                type = DTV6;
                break;
@@ -1173,23 +1173,23 @@ static int xc4000_set_params(struct dvb_frontend *fe)
                dprintk(1, "%s() OFDM\n", __func__);
                if (bw == 0) {
                        if (c->frequency < 400000000) {
-                               priv->freq_hz = c->frequency - 2250000;
+                               priv->freq_offset = 2250000;
                        } else {
-                               priv->freq_hz = c->frequency - 2750000;
+                               priv->freq_offset = 2750000;
                        }
                        priv->video_standard = XC4000_DTV7_8;
                        type = DTV78;
                } else if (bw <= 6000000) {
                        priv->video_standard = XC4000_DTV6;
-                       priv->freq_hz = c->frequency - 1750000;
+                       priv->freq_offset = 1750000;
                        type = DTV6;
                } else if (bw <= 7000000) {
                        priv->video_standard = XC4000_DTV7;
-                       priv->freq_hz = c->frequency - 2250000;
+                       priv->freq_offset = 2250000;
                        type = DTV7;
                } else {
                        priv->video_standard = XC4000_DTV8;
-                       priv->freq_hz = c->frequency - 2750000;
+                       priv->freq_offset = 2750000;
                        type = DTV8;
                }
                priv->rf_mode = XC_RF_MODE_AIR;
@@ -1200,6 +1200,8 @@ static int xc4000_set_params(struct dvb_frontend *fe)
                goto fail;
        }
 
+       priv->freq_hz = c->frequency - priv->freq_offset;
+
        dprintk(1, "%s() frequency=%d (compensated)\n",
                __func__, priv->freq_hz);
 
@@ -1520,7 +1522,7 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
 {
        struct xc4000_priv *priv = fe->tuner_priv;
 
-       *freq = priv->freq_hz;
+       *freq = priv->freq_hz + priv->freq_offset;
 
        if (debug) {
                mutex_lock(&priv->lock);
index 5cd09a681b6a297038bfbefa4a9c0a8a8067c075..b2d9e9cb97f70e264a2b939302c40c7d00c9a9d0 100644 (file)
@@ -55,7 +55,7 @@ struct xc5000_priv {
 
        u32 if_khz;
        u16 xtal_khz;
-       u32 freq_hz;
+       u32 freq_hz, freq_offset;
        u32 bandwidth;
        u8  video_standard;
        u8  rf_mode;
@@ -755,13 +755,13 @@ static int xc5000_set_params(struct dvb_frontend *fe)
        case SYS_ATSC:
                dprintk(1, "%s() VSB modulation\n", __func__);
                priv->rf_mode = XC_RF_MODE_AIR;
-               priv->freq_hz = freq - 1750000;
+               priv->freq_offset = 1750000;
                priv->video_standard = DTV6;
                break;
        case SYS_DVBC_ANNEX_B:
                dprintk(1, "%s() QAM modulation\n", __func__);
                priv->rf_mode = XC_RF_MODE_CABLE;
-               priv->freq_hz = freq - 1750000;
+               priv->freq_offset = 1750000;
                priv->video_standard = DTV6;
                break;
        case SYS_ISDBT:
@@ -776,15 +776,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
                switch (bw) {
                case 6000000:
                        priv->video_standard = DTV6;
-                       priv->freq_hz = freq - 1750000;
+                       priv->freq_offset = 1750000;
                        break;
                case 7000000:
                        priv->video_standard = DTV7;
-                       priv->freq_hz = freq - 2250000;
+                       priv->freq_offset = 2250000;
                        break;
                case 8000000:
                        priv->video_standard = DTV8;
-                       priv->freq_hz = freq - 2750000;
+                       priv->freq_offset = 2750000;
                        break;
                default:
                        printk(KERN_ERR "xc5000 bandwidth not set!\n");
@@ -798,15 +798,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
                priv->rf_mode = XC_RF_MODE_CABLE;
                if (bw <= 6000000) {
                        priv->video_standard = DTV6;
-                       priv->freq_hz = freq - 1750000;
+                       priv->freq_offset = 1750000;
                        b = 6;
                } else if (bw <= 7000000) {
                        priv->video_standard = DTV7;
-                       priv->freq_hz = freq - 2250000;
+                       priv->freq_offset = 2250000;
                        b = 7;
                } else {
                        priv->video_standard = DTV7_8;
-                       priv->freq_hz = freq - 2750000;
+                       priv->freq_offset = 2750000;
                        b = 8;
                }
                dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__,
@@ -817,6 +817,8 @@ static int xc5000_set_params(struct dvb_frontend *fe)
                return -EINVAL;
        }
 
+       priv->freq_hz = freq - priv->freq_offset;
+
        dprintk(1, "%s() frequency=%d (compensated to %d)\n",
                __func__, freq, priv->freq_hz);
 
@@ -1067,7 +1069,7 @@ static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq)
 {
        struct xc5000_priv *priv = fe->tuner_priv;
        dprintk(1, "%s()\n", __func__);
-       *freq = priv->freq_hz;
+       *freq = priv->freq_hz + priv->freq_offset;
        return 0;
 }
 
index 75ac9947cdaca48aab2a7dd06805a6cf195be1ac..98e1b937b500d40fbe6c3e2db23b672ff9256d61 100644 (file)
@@ -788,11 +788,27 @@ static int au0828_i2s_init(struct au0828_dev *dev)
 
 /*
  * Auvitek au0828 analog stream enable
- * Please set interface0 to AS5 before enable the stream
  */
 static int au0828_analog_stream_enable(struct au0828_dev *d)
 {
+       struct usb_interface *iface;
+       int ret;
+
        dprintk(1, "au0828_analog_stream_enable called\n");
+
+       iface = usb_ifnum_to_if(d->usbdev, 0);
+       if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
+               dprintk(1, "Changing intf#0 to alt 5\n");
+               /* set au0828 interface0 to AS5 here again */
+               ret = usb_set_interface(d->usbdev, 0, 5);
+               if (ret < 0) {
+                       printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
+                       return -EBUSY;
+               }
+       }
+
+       /* FIXME: size should be calculated using d->width, d->height */
+
        au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
        au0828_writereg(d, 0x106, 0x00);
        /* set x position */
@@ -1003,15 +1019,6 @@ static int au0828_v4l2_open(struct file *filp)
                return -ERESTARTSYS;
        }
        if (dev->users == 0) {
-               /* set au0828 interface0 to AS5 here again */
-               ret = usb_set_interface(dev->usbdev, 0, 5);
-               if (ret < 0) {
-                       mutex_unlock(&dev->lock);
-                       printk(KERN_INFO "Au0828 can't set alternate to 5!\n");
-                       kfree(fh);
-                       return -EBUSY;
-               }
-
                au0828_analog_stream_enable(dev);
                au0828_analog_stream_reset(dev);
 
@@ -1253,13 +1260,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
                }
        }
 
-       /* set au0828 interface0 to AS5 here again */
-       ret = usb_set_interface(dev->usbdev, 0, 5);
-       if (ret < 0) {
-               printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
-               return -EBUSY;
-       }
-
        au0828_analog_stream_enable(dev);
 
        return 0;
index 32d60e5546bcbd7ec474dae36a345c493c31165e..a2737b4b090b60bd54aea26f5ee5d86f944db3c9 100644 (file)
@@ -696,13 +696,16 @@ static int em28xx_stop_streaming(struct vb2_queue *vq)
        }
 
        spin_lock_irqsave(&dev->slock, flags);
+       if (dev->usb_ctl.vid_buf != NULL) {
+               vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR);
+               dev->usb_ctl.vid_buf = NULL;
+       }
        while (!list_empty(&vidq->active)) {
                struct em28xx_buffer *buf;
                buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
                list_del(&buf->list);
                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
        }
-       dev->usb_ctl.vid_buf = NULL;
        spin_unlock_irqrestore(&dev->slock, flags);
 
        return 0;
@@ -724,13 +727,16 @@ int em28xx_stop_vbi_streaming(struct vb2_queue *vq)
        }
 
        spin_lock_irqsave(&dev->slock, flags);
+       if (dev->usb_ctl.vbi_buf != NULL) {
+               vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR);
+               dev->usb_ctl.vbi_buf = NULL;
+       }
        while (!list_empty(&vbiq->active)) {
                struct em28xx_buffer *buf;
                buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
                list_del(&buf->list);
                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
        }
-       dev->usb_ctl.vbi_buf = NULL;
        spin_unlock_irqrestore(&dev->slock, flags);
 
        return 0;
index 6008c8d546a32e925ed73ba346fb4233510822a4..20d9c15a305d5ca972f4e834b6547e80aefec579 100644 (file)
@@ -945,6 +945,7 @@ static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x093a, 0x2620)},
        {USB_DEVICE(0x093a, 0x2621)},
        {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
+       {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
        {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
        {USB_DEVICE(0x093a, 0x2625)},
        {USB_DEVICE(0x093a, 0x2626)},
index 774ba0e820beaee594eadb6f99d9d79a17d75a37..eed70a4d24e6b264de1f1a48b579b6de4978ebdc 100644 (file)
@@ -81,7 +81,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
 }
 
 /*=========================================================================*/
-/* bufffer bits */
+/* buffer bits */
 
 /* function expects dev->io_mutex to be hold by caller */
 int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -921,7 +921,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
        case V4L2_CID_MPEG_AUDIO_ENCODING:
                if (dev->flags & HDPVR_FLAG_AC3_CAP) {
                        opt->audio_codec = ctrl->val;
-                       return hdpvr_set_audio(dev, opt->audio_input,
+                       return hdpvr_set_audio(dev, opt->audio_input + 1,
                                              opt->audio_codec);
                }
                return 0;
@@ -1191,7 +1191,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
        v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
                V4L2_CID_MPEG_AUDIO_ENCODING,
                ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
-               0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
+               0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
        v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
                V4L2_CID_MPEG_VIDEO_ENCODING,
                V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
index 5c45c9d0712ddf949f0ac571b5649111585046ae..9c29552aedec2e7b1a08447619671cd65c170b77 100644 (file)
@@ -156,6 +156,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc
                   0x00, 0x00, 0x00, 0x00,
                   0x00, 0x00 };
 
+       if (cmd->msg_len > sizeof(b) - 4)
+               return -EINVAL;
+
        memcpy(&b[4], cmd->msg, cmd->msg_len);
 
        state->config->send_command(fe, 0x72,
index 3fed63f4e02641538287cf244410037aac3b60a4..ec9a4fa3bc86641e9d148ff9efd2a3361cd38bd0 100644 (file)
@@ -485,16 +485,13 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
        /* Bits that must be zero to be aligned */
        unsigned int mask = ~((1 << align) - 1);
 
+       /* Clamp to aligned min and max */
+       x = clamp(x, (min + ~mask) & mask, max & mask);
+
        /* Round to nearest aligned value */
        if (align)
                x = (x + (1 << (align - 1))) & mask;
 
-       /* Clamp to aligned value of min and max */
-       if (x < min)
-               x = (min + ~mask) & mask;
-       else if (x > max)
-               x = max & mask;
-
        return x;
 }
 
index e3bdc3be91e12822bcf92c4538433522a2aaa886..5e47ba479e53889984756dc726789dc8b8b4a48d 100644 (file)
@@ -666,6 +666,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
         * to the userspace.
         */
        req->count = allocated_buffers;
+       q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
 
        return 0;
 }
@@ -714,6 +715,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
                memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
                memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
                q->memory = create->memory;
+               q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
        }
 
        num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
@@ -1355,6 +1357,7 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
         * dequeued in dqbuf.
         */
        list_add_tail(&vb->queued_entry, &q->queued_list);
+       q->waiting_for_buffers = false;
        vb->state = VB2_BUF_STATE_QUEUED;
 
        /*
@@ -1724,6 +1727,7 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
         * and videobuf, effectively returning control over them to userspace.
         */
        __vb2_queue_cancel(q);
+       q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
 
        dprintk(3, "Streamoff successful\n");
        return 0;
@@ -2009,9 +2013,16 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
        }
 
        /*
-        * There is nothing to wait for if no buffers have already been queued.
+        * There is nothing to wait for if the queue isn't streaming.
         */
-       if (list_empty(&q->queued_list))
+       if (!vb2_is_streaming(q))
+               return res | POLLERR;
+       /*
+        * For compatibility with vb1: if QBUF hasn't been called yet, then
+        * return POLLERR as well. This only affects capture queues, output
+        * queues will always initialize waiting_for_buffers to false.
+        */
+       if (q->waiting_for_buffers)
                return res | POLLERR;
 
        if (list_empty(&q->done_list))
index 982872864bbfd14c64681e912dbcef29913322f9..746203d851193a42d44f0f8acc76639ef9cce5d4 100755 (executable)
@@ -536,7 +536,7 @@ static inline void rk_cru_set_soft_reset(u32 idx, bool on , u32 RK_CRU_SOFTRST_C
                val = on ? 0x10001U << 8 : 0x10000U << 8;
        }
        writel_relaxed(val, reg);
-       dsb();
+       dsb(sy);
 }
 
 static void rk_camera_cif_reset(struct rk_camera_dev *pcdev, int only_rst)
@@ -3257,7 +3257,7 @@ exit:
     return err;
 }
 
-static int __exit rk_camera_remove(struct platform_device *pdev)
+static int rk_camera_remove(struct platform_device *pdev)
 {
     struct rk_camera_dev *pcdev = platform_get_drvdata(pdev);
     struct resource *res;
index e75fac61b5263005557730c4e806671e8569bfd2..1c6d2e2a8f2d36da488225e765b21ccf866cfa9c 100755 (executable)
@@ -1,8 +1,7 @@
 #ifndef __RKCAMSYS_GPIO_H__
 #define __RKCAMSYS_GPIO_H__
 
-//#include <mach/gpio.h>
-#include <asm/gpio.h>
+#include <linux/gpio.h>
 #if defined(CONFIG_ARCH_ROCKCHIP)
 #define RK30_PIN0_PA0 (0)
 #define NUM_GROUP      (32)
index 5a66051a2c445a457c39feb46b95bd33843759fb..a4daf9829f8805a5055962a466da83dfd3c4aa79 100755 (executable)
@@ -28,6 +28,7 @@
 #include <linux/mutex.h>
 #include <linux/regulator/machine.h>
 #include <linux/log2.h>
+#include <linux/gpio.h>
 //#include <mach/io.h>
 //#include <mach/gpio.h>
 //#include <mach/iomux.h>
@@ -36,8 +37,6 @@
 #include <linux/rockchip/iomap.h>
 #include <linux/rockchip/grf.h>
 
-#include <asm/gpio.h>
-#include <asm/system.h>        
 #include <asm/uaccess.h>
 
 #include <linux/of.h>
index 7d62386ee2eaba82b06ae70b14cd6ac4cc0cdfbc..12b70ae98c46225f6fb1179b720757ce47350a19 100755 (executable)
@@ -5,6 +5,7 @@
 
 static camsys_soc_priv_t* camsys_soc_p;
 
+#ifdef CONFIG_ARM
 #include "camsys_soc_rk3288.c"
 
 static int camsys_rk3288_cfg (camsys_soc_cfg_t cfg_cmd, void* cfg_para)
@@ -62,6 +63,7 @@ static int camsys_rk3288_cfg (camsys_soc_cfg_t cfg_cmd, void* cfg_para)
 
 
 }
+#endif
 
 camsys_soc_priv_t* camsys_soc_get(void)
 {
@@ -81,8 +83,10 @@ int camsys_soc_init(void)
     }
 
     if (soc_is_rk3288()) {
+#ifdef CONFIG_ARM
         strlcpy(camsys_soc_p->name,"camsys_rk3288",31);
         camsys_soc_p->soc_cfg = camsys_rk3288_cfg;
+#endif
     } else {
         camsys_err("camsys isn't support soc: 0x%lx!",rockchip_soc_id);
         goto fail;
index 5653e505f91ff0af540db4e7bb973ef3f03059a4..424f51d1e2ce0d9624d8fb49ae0ed470d9d87bdc 100644 (file)
@@ -1422,6 +1422,11 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_mptspi_probe;
         }
 
+       /* VMWare emulation doesn't properly implement WRITE_SAME
+        */
+       if (pdev->subsystem_vendor == 0x15AD)
+               sh->no_write_same = 1;
+
        spin_lock_irqsave(&ioc->FreeQlock, flags);
 
        /* Attach the SCSI Host to the IOC structure
index 759fae3ca7fb0dba592ab808d036f889bf96f922..a36f3f282ae758546f5d1f50bdea1a00503b6c0d 100644 (file)
@@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
 
                for (i = 0; i < omap->nports; i++) {
                        if (is_ehci_phy_mode(pdata->port_mode[i])) {
-                               reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+                               reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
                                break;
                        }
                }
index 45f26be359eaf5e74055f7d762529f2513dd10ef..7e28bd0de5540015636b774061837879807c1b3c 100644 (file)
@@ -1137,7 +1137,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        pcr->msi_en = msi_en;
        if (pcr->msi_en) {
                ret = pci_enable_msi(pcidev);
-               if (ret < 0)
+               if (ret)
                        pcr->msi_en = false;
        }
 
index 99cc0b07a71313e1970aad5efa0521888c3f7141..0513ea0906dd6dbdccee36a64515f4a55e085d65 100644 (file)
@@ -71,7 +71,7 @@ static int mei_cl_device_probe(struct device *dev)
 
        dev_dbg(dev, "Device probe\n");
 
-       strncpy(id.name, dev_name(dev), MEI_CL_NAME_SIZE);
+       strlcpy(id.name, dev_name(dev), sizeof(id.name));
 
        return driver->probe(device, &id);
 }
index 0bb2aa2c6fb072a5ec83823a22b683f56f349f8b..07ed4b5b1659d66ae227efeb956d09b0a1a3f5dd 100644 (file)
@@ -405,6 +405,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
                        dev_err(&dev->pdev->dev, "failed to disconnect.\n");
                        goto free;
                }
+               cl->timer_count = MEI_CONNECT_TIMEOUT;
                mdelay(10); /* Wait for hardware disconnection ready */
                list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
        } else {
@@ -511,6 +512,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
                cl->timer_count = MEI_CONNECT_TIMEOUT;
                list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
        } else {
+               cl->state = MEI_FILE_INITIALIZING;
                list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
        }
 
index 994ca4aff1a37ecf1b6fc53a2b44e1f446b710dd..4b7ea3fb143c650aa3030ad697ea69ba1e5b8b98 100644 (file)
@@ -342,9 +342,10 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
        ndev = (struct mei_nfc_dev *) cldev->priv_data;
        dev = ndev->cl->dev;
 
+       err = -ENOMEM;
        mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
        if (!mei_buf)
-               return -ENOMEM;
+               goto out;
 
        hdr = (struct mei_nfc_hci_hdr *) mei_buf;
        hdr->cmd = MEI_NFC_CMD_HCI_SEND;
@@ -354,12 +355,9 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
        hdr->data_size = length;
 
        memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
-
        err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
        if (err < 0)
-               return err;
-
-       kfree(mei_buf);
+               goto out;
 
        if (!wait_event_interruptible_timeout(ndev->send_wq,
                                ndev->recv_req_id == ndev->req_id, HZ)) {
@@ -368,7 +366,8 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
        } else {
                ndev->req_id++;
        }
-
+out:
+       kfree(mei_buf);
        return err;
 }
 
index 761e0808dcf08eb7b2c1bce7ab831ded5f56e5ff..48db0a4d5c2b913f3979d3d67138a6e6f50615dc 100755 (executable)
@@ -519,7 +519,7 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
 
 config MMC_DW
        tristate "Synopsys DesignWare Memory Card Interface"
-       depends on ARM
+       depends on ARM || ARM64
        help
          This selects support for the Synopsys DesignWare Mobile Storage IP
          block, this provides host support for SD and MMC interfaces, in both
index a67adfdef224266ff9fd032995ebfb8d24155267..b6a18943df93de1df40ca5e7b3c41eced4e02f54 100755 (executable)
@@ -1684,12 +1684,12 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
        */
        mci_writel(slot->host, PWREN, 0x0);
        mci_writel(slot->host, RST_N, 0x0);
-       dsb();
+       dsb(sy);
        udelay(10); /* 10us for bad quality eMMc. */
 
        mci_writel(slot->host, PWREN, 0x1);
        mci_writel(slot->host, RST_N, 0x1);
-       dsb();
+       dsb(sy);
        usleep_range(500, 1000); /* at least 500(> 200us) */
 }
 
index 63021434c19c651273b8b2292a1cce1d71cc3b1e..2145d992de20b67a28ec9c2b7a08a9e0940eb87e 100644 (file)
@@ -341,6 +341,13 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
        }
 
        if (rsp_type == SD_RSP_TYPE_R2) {
+               /*
+                * The controller offloads the last byte {CRC-7, end bit 1'b1}
+                * of response type R2. Assign dummy CRC, 0, and end bit to the
+                * byte(ptr[16], goes into the LSB of resp[3] later).
+                */
+               ptr[16] = 1;
+
                for (i = 0; i < 4; i++) {
                        cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
                        dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
index 19d637266fcd47026c26b6172b052f1becc23ec4..71e4f6ccae2ffc446777ce255a6b20bba077e2ef 100644 (file)
@@ -1075,7 +1075,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
                        return;
        }
 
-       ftl_freepart(partition);
        kfree(partition);
 }
 
index 8c4eb287bbdb5924684e67257337b48acdcde5fe..e9b1797cdb5f06ac4e0d38c3fd4486dbfbc3d13a 100644 (file)
@@ -948,7 +948,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
        u32 val;
 
        val = readl(info->reg.gpmc_ecc_config);
-       if (((val >> ECC_CONFIG_CS_SHIFT)  & ~CS_MASK) != info->gpmc_cs)
+       if (((val >> ECC_CONFIG_CS_SHIFT) CS_MASK) != info->gpmc_cs)
                return -EINVAL;
 
        /* read ecc result */
index 0648c6996d43a94031d135163d4a693132dcc2b4..bf8108d65b7322fd6fcce2b25618cfde901920aa 100644 (file)
@@ -330,6 +330,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
                av = tmp_av;
        else {
                ubi_err("orphaned volume in fastmap pool!");
+               kmem_cache_free(ai->aeb_slab_cache, new_aeb);
                return UBI_BAD_FASTMAP;
        }
 
index 3835321b8cf38bbc86b26383d5796964a3c494a5..3bc3ebc0882f4378d86ba067f4bbf42da237dcdb 100644 (file)
@@ -139,6 +139,7 @@ config MACVLAN
 config MACVTAP
        tristate "MAC-VLAN based tap driver"
        depends on MACVLAN
+       depends on INET
        help
          This adds a specialized tap character device driver that is based
          on the MAC-VLAN network interface, called macvtap. A macvtap device
@@ -209,6 +210,7 @@ config RIONET_RX_SIZE
 
 config TUN
        tristate "Universal TUN/TAP device driver support"
+       depends on INET
        select CRC32
        ---help---
          TUN/TAP provides packet reception and transmission for user space
index 3dba2a70a00e41f6ab86de036b4a3a7d859fd105..ec86177be1df769efe9a36da83c6f63a54b2957f 100644 (file)
@@ -312,6 +312,7 @@ struct sw_tx_bd {
        u8              flags;
 /* Set on the first BD descriptor when there is a split BD */
 #define BNX2X_TSO_SPLIT_BD             (1<<0)
+#define BNX2X_HAS_SECOND_PBD           (1<<1)
 };
 
 struct sw_rx_page {
index 70be100feeb4fc6a928a0b8c4955e00438886a64..372a7557e1fab1aae94de415c7475b18fec26a75 100644 (file)
@@ -180,6 +180,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        --nbd;
        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 
+       if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+               /* Skip second parse bd... */
+               --nbd;
+               bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+       }
+
        /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -745,7 +751,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
                return;
        }
-       bnx2x_frag_free(fp, new_data);
+       if (new_data)
+               bnx2x_frag_free(fp, new_data);
 drop:
        /* drop the packet and keep the buffer in the bin */
        DP(NETIF_MSG_RX_STATUS,
@@ -3754,6 +3761,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        /* set encapsulation flag in start BD */
                        SET_FLAG(tx_start_bd->general_data,
                                 ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+                       tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
                        nbd++;
                } else if (xmit_type & XMIT_CSUM) {
                        /* Set PBD in checksum offload case w/o encapsulation */
index 4942ddf9c8aed2878ae9fdae1c74799a2a71b5e8..3de4069f020e35da50f4e547c5b79edbeb9a44e9 100644 (file)
@@ -6767,7 +6767,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                skb->protocol = eth_type_trans(skb, tp->dev);
 
                if (len > (tp->dev->mtu + ETH_HLEN) &&
-                   skb->protocol != htons(ETH_P_8021Q)) {
+                   skb->protocol != htons(ETH_P_8021Q) &&
+                   skb->protocol != htons(ETH_P_8021AD)) {
                        dev_kfree_skb(skb);
                        goto drop_it_no_recycle;
                }
@@ -7759,8 +7760,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        entry = tnapi->tx_prod;
        base_flags = 0;
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               base_flags |= TXD_FLAG_TCPUDP_CSUM;
 
        mss = skb_shinfo(skb)->gso_size;
        if (mss) {
@@ -7776,6 +7775,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
 
+               /* HW/FW can not correctly segment packets that have been
+                * vlan encapsulated.
+                */
+               if (skb->protocol == htons(ETH_P_8021Q) ||
+                   skb->protocol == htons(ETH_P_8021AD))
+                       return tg3_tso_bug(tp, skb);
+
                if (!skb_is_gso_v6(skb)) {
                        iph->check = 0;
                        iph->tot_len = htons(mss + hdr_len);
@@ -7822,6 +7828,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                base_flags |= tsflags << 12;
                        }
                }
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               /* HW/FW can not correctly checksum packets that have been
+                * vlan encapsulated.
+                */
+               if (skb->protocol == htons(ETH_P_8021Q) ||
+                   skb->protocol == htons(ETH_P_8021AD)) {
+                       if (skb_checksum_help(skb))
+                               goto drop;
+               } else  {
+                       base_flags |= TXD_FLAG_TCPUDP_CSUM;
+               }
        }
 
        if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
index 7371626c56a1462afba10760aa3b1a87c13671ec..d81a7dbfeef606e79365ac61750fab49e8ca6b8b 100644 (file)
@@ -2663,7 +2663,7 @@ static int be_open(struct net_device *netdev)
 
        for_all_evt_queues(adapter, eqo, i) {
                napi_enable(&eqo->napi);
-               be_eq_notify(adapter, eqo->q.id, true, false, 0);
+               be_eq_notify(adapter, eqo->q.id, true, true, 0);
        }
        adapter->flags |= BE_FLAGS_NAPI_ENABLED;
 
index 70fd55968844d07a8686414baf9efc71060693ed..040ecf2027cd5bd4ff39491999043adaf2cd1bcf 100644 (file)
@@ -293,6 +293,18 @@ failure:
        atomic_add(buffers_added, &(pool->available));
 }
 
+/*
+ * The final 8 bytes of the buffer list is a counter of frames dropped
+ * because there was not a buffer in the buffer list capable of holding
+ * the frame.
+ */
+static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
+{
+       __be64 *p = adapter->buffer_list_addr + 4096 - 8;
+
+       adapter->rx_no_buffer = be64_to_cpup(p);
+}
+
 /* replenish routine */
 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
 {
@@ -308,8 +320,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
                        ibmveth_replenish_buffer_pool(adapter, pool);
        }
 
-       adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
-                                               4096 - 8);
+       ibmveth_update_rx_no_buffer(adapter);
 }
 
 /* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -689,8 +700,7 @@ static int ibmveth_close(struct net_device *netdev)
 
        free_irq(netdev->irq, netdev);
 
-       adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
-                                               4096 - 8);
+       ibmveth_update_rx_no_buffer(adapter);
 
        ibmveth_cleanup(adapter);
 
index 64cbe0dfe04347aff1af3fd15f5eb130caefaff6..4d3c8122e2aa92975911146f51c73f08a77255fa 100644 (file)
@@ -7229,6 +7229,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
 
        if (netif_running(netdev))
                igb_close(netdev);
+       else
+               igb_reset(adapter);
 
        igb_clear_interrupt_scheme(adapter);
 
index a602aeeb3acb09a6622540d306d09897e5714aa1..f8821ce2780219cb63cb86a0fcfc6bf88fa30cfe 100644 (file)
 #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 #define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
 #define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
+
+/* Exception Interrupt Port/Queue Cause register */
+
 #define MVNETA_INTR_NEW_CAUSE                    0x25a0
-#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
 #define MVNETA_INTR_NEW_MASK                     0x25a4
+
+/* bits  0..7  = TXQ SENT, one bit per queue.
+ * bits  8..15 = RXQ OCCUP, one bit per queue.
+ * bits 16..23 = RXQ FREE, one bit per queue.
+ * bit  29 = OLD_REG_SUM, see old reg ?
+ * bit  30 = TX_ERR_SUM, one bit for 4 ports
+ * bit  31 = MISC_SUM,   one bit for 4 ports
+ */
+#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
+#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
+#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
+#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
+
 #define MVNETA_INTR_OLD_CAUSE                    0x25a8
 #define MVNETA_INTR_OLD_MASK                     0x25ac
+
+/* Data Path Port/Queue Cause Register */
 #define MVNETA_INTR_MISC_CAUSE                   0x25b0
 #define MVNETA_INTR_MISC_MASK                    0x25b4
+
+#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
+#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
+#define      MVNETA_CAUSE_PTP                    BIT(4)
+
+#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
+#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
+#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
+#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
+#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
+#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
+#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
+#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
+
+#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
+#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
+#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
+
+#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
+#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
+#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
+
 #define MVNETA_INTR_ENABLE                       0x25b8
 #define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
-#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0xff000000
+#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0xff000000  // note: neta says it's 0x000000FF
+
 #define MVNETA_RXQ_CMD                           0x2680
 #define      MVNETA_RXQ_DISABLE_SHIFT            8
 #define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
 #define MVNETA_RX_COAL_PKTS            32
 #define MVNETA_RX_COAL_USEC            100
 
-/* Timer */
-#define MVNETA_TX_DONE_TIMER_PERIOD    10
-
 /* Napi polling weight */
 #define MVNETA_RX_POLL_WEIGHT          64
 
 
 #define MVNETA_RX_BUF_SIZE(pkt_size)   ((pkt_size) + NET_SKB_PAD)
 
-struct mvneta_stats {
+struct mvneta_pcpu_stats {
        struct  u64_stats_sync syncp;
-       u64     packets;
-       u64     bytes;
+       u64     rx_packets;
+       u64     rx_bytes;
+       u64     tx_packets;
+       u64     tx_bytes;
 };
 
 struct mvneta_port {
@@ -230,16 +269,11 @@ struct mvneta_port {
        void __iomem *base;
        struct mvneta_rx_queue *rxqs;
        struct mvneta_tx_queue *txqs;
-       struct timer_list tx_done_timer;
        struct net_device *dev;
 
        u32 cause_rx_tx;
        struct napi_struct napi;
 
-       /* Flags */
-       unsigned long flags;
-#define MVNETA_F_TX_DONE_TIMER_BIT  0
-
        /* Napi weight */
        int weight;
 
@@ -248,8 +282,7 @@ struct mvneta_port {
        u8 mcast_count[256];
        u16 tx_ring_size;
        u16 rx_ring_size;
-       struct mvneta_stats tx_stats;
-       struct mvneta_stats rx_stats;
+       struct mvneta_pcpu_stats *stats;
 
        struct mii_bus *mii_bus;
        struct phy_device *phy_dev;
@@ -428,21 +461,29 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
 {
        struct mvneta_port *pp = netdev_priv(dev);
        unsigned int start;
+       int cpu;
 
-       memset(stats, 0, sizeof(struct rtnl_link_stats64));
-
-       do {
-               start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
-               stats->rx_packets = pp->rx_stats.packets;
-               stats->rx_bytes = pp->rx_stats.bytes;
-       } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
+       for_each_possible_cpu(cpu) {
+               struct mvneta_pcpu_stats *cpu_stats;
+               u64 rx_packets;
+               u64 rx_bytes;
+               u64 tx_packets;
+               u64 tx_bytes;
 
+               cpu_stats = per_cpu_ptr(pp->stats, cpu);
+               do {
+                       start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+                       rx_packets = cpu_stats->rx_packets;
+                       rx_bytes   = cpu_stats->rx_bytes;
+                       tx_packets = cpu_stats->tx_packets;
+                       tx_bytes   = cpu_stats->tx_bytes;
+               } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
 
-       do {
-               start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
-               stats->tx_packets = pp->tx_stats.packets;
-               stats->tx_bytes = pp->tx_stats.bytes;
-       } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
+               stats->rx_packets += rx_packets;
+               stats->rx_bytes   += rx_bytes;
+               stats->tx_packets += tx_packets;
+               stats->tx_bytes   += tx_bytes;
+       }
 
        stats->rx_errors        = dev->stats.rx_errors;
        stats->rx_dropped       = dev->stats.rx_dropped;
@@ -1063,17 +1104,6 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
        txq->done_pkts_coal = value;
 }
 
-/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
-static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
-{
-       if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
-               pp->tx_done_timer.expires = jiffies +
-                       msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
-               add_timer(&pp->tx_done_timer);
-       }
-}
-
-
 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
                                u32 phys_addr, u32 cookie)
@@ -1145,7 +1175,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
        command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
        command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
 
-       if (l3_proto == swab16(ETH_P_IP))
+       if (l3_proto == htons(ETH_P_IP))
                command |= MVNETA_TXD_IP_CSUM;
        else
                command |= MVNETA_TX_L3_IP6;
@@ -1354,6 +1384,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
 {
        struct net_device *dev = pp->dev;
        int rx_done, rx_filled;
+       u32 rcvd_pkts = 0;
+       u32 rcvd_bytes = 0;
 
        /* Get number of received packets */
        rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -1391,10 +1423,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
 
                rx_bytes = rx_desc->data_size -
                        (ETH_FCS_LEN + MVNETA_MH_SIZE);
-               u64_stats_update_begin(&pp->rx_stats.syncp);
-               pp->rx_stats.packets++;
-               pp->rx_stats.bytes += rx_bytes;
-               u64_stats_update_end(&pp->rx_stats.syncp);
+               rcvd_pkts++;
+               rcvd_bytes += rx_bytes;
 
                /* Linux processing */
                skb_reserve(skb, MVNETA_MH_SIZE);
@@ -1415,6 +1445,15 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                }
        }
 
+       if (rcvd_pkts) {
+               struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+               u64_stats_update_begin(&stats->syncp);
+               stats->rx_packets += rcvd_pkts;
+               stats->rx_bytes   += rcvd_bytes;
+               u64_stats_update_end(&stats->syncp);
+       }
+
        /* Update rxq management counters */
        mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
 
@@ -1545,25 +1584,17 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 
 out:
        if (frags > 0) {
-               u64_stats_update_begin(&pp->tx_stats.syncp);
-               pp->tx_stats.packets++;
-               pp->tx_stats.bytes += skb->len;
-               u64_stats_update_end(&pp->tx_stats.syncp);
+               struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
+               u64_stats_update_begin(&stats->syncp);
+               stats->tx_packets++;
+               stats->tx_bytes  += skb->len;
+               u64_stats_update_end(&stats->syncp);
        } else {
                dev->stats.tx_dropped++;
                dev_kfree_skb_any(skb);
        }
 
-       if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
-               mvneta_txq_done(pp, txq);
-
-       /* If after calling mvneta_txq_done, count equals
-        * frags, we need to set the timer
-        */
-       if (txq->count == frags && frags > 0)
-               mvneta_add_tx_done_timer(pp);
-
        return NETDEV_TX_OK;
 }
 
@@ -1839,14 +1870,22 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 
        /* Read cause register */
        cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
-               MVNETA_RX_INTR_MASK(rxq_number);
+               (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+
+       /* Release Tx descriptors */
+       if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
+               int tx_todo = 0;
+
+               mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL), &tx_todo);
+               cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
+       }
 
        /* For the case where the last mvneta_poll did not process all
         * RX packets
         */
        cause_rx_tx |= pp->cause_rx_tx;
        if (rxq_number > 1) {
-               while ((cause_rx_tx != 0) && (budget > 0)) {
+               while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
                        int count;
                        struct mvneta_rx_queue *rxq;
                        /* get rx queue number from cause_rx_tx */
@@ -1878,7 +1917,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
                local_irq_save(flags);
                mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-                           MVNETA_RX_INTR_MASK(rxq_number));
+                           MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
                local_irq_restore(flags);
        }
 
@@ -1886,26 +1925,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
        return rx_done;
 }
 
-/* tx done timer callback */
-static void mvneta_tx_done_timer_callback(unsigned long data)
-{
-       struct net_device *dev = (struct net_device *)data;
-       struct mvneta_port *pp = netdev_priv(dev);
-       int tx_done = 0, tx_todo = 0;
-
-       if (!netif_running(dev))
-               return ;
-
-       clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
-       tx_done = mvneta_tx_done_gbe(pp,
-                                    (((1 << txq_number) - 1) &
-                                     MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
-                                    &tx_todo);
-       if (tx_todo > 0)
-               mvneta_add_tx_done_timer(pp);
-}
-
 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
                           int num)
@@ -2155,7 +2174,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
 
        /* Unmask interrupts */
        mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-                   MVNETA_RX_INTR_MASK(rxq_number));
+                   MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
 
        phy_start(pp->phy_dev);
        netif_tx_start_all_queues(pp->dev);
@@ -2188,16 +2207,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
        mvneta_rx_reset(pp);
 }
 
-/* tx timeout callback - display a message and stop/start the network device */
-static void mvneta_tx_timeout(struct net_device *dev)
-{
-       struct mvneta_port *pp = netdev_priv(dev);
-
-       netdev_info(dev, "tx timeout\n");
-       mvneta_stop_dev(pp);
-       mvneta_start_dev(pp);
-}
-
 /* Return positive if MTU is valid */
 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
 {
@@ -2306,7 +2315,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
 
                        if (phydev->speed == SPEED_1000)
                                val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-                       else
+                       else if (phydev->speed == SPEED_100)
                                val |= MVNETA_GMAC_CONFIG_MII_SPEED;
 
                        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
@@ -2426,8 +2435,6 @@ static int mvneta_stop(struct net_device *dev)
        free_irq(dev->irq, pp);
        mvneta_cleanup_rxqs(pp);
        mvneta_cleanup_txqs(pp);
-       del_timer(&pp->tx_done_timer);
-       clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
 
        return 0;
 }
@@ -2548,7 +2555,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
        .ndo_set_rx_mode     = mvneta_set_rx_mode,
        .ndo_set_mac_address = mvneta_set_mac_addr,
        .ndo_change_mtu      = mvneta_change_mtu,
-       .ndo_tx_timeout      = mvneta_tx_timeout,
        .ndo_get_stats64     = mvneta_get_stats64,
 };
 
@@ -2729,10 +2735,6 @@ static int mvneta_probe(struct platform_device *pdev)
 
        pp = netdev_priv(dev);
 
-       pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
-       init_timer(&pp->tx_done_timer);
-       clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
        pp->weight = MVNETA_RX_POLL_WEIGHT;
        pp->phy_node = phy_node;
        pp->phy_interface = phy_mode;
@@ -2751,7 +2753,12 @@ static int mvneta_probe(struct platform_device *pdev)
 
        clk_prepare_enable(pp->clk);
 
-       pp->tx_done_timer.data = (unsigned long)dev;
+       /* Alloc per-cpu stats */
+       pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
+       if (!pp->stats) {
+               err = -ENOMEM;
+               goto err_clk;
+       }
 
        pp->tx_ring_size = MVNETA_MAX_TXD;
        pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -2762,7 +2769,7 @@ static int mvneta_probe(struct platform_device *pdev)
        err = mvneta_init(pp, phy_addr);
        if (err < 0) {
                dev_err(&pdev->dev, "can't init eth hal\n");
-               goto err_clk;
+               goto err_free_stats;
        }
        mvneta_port_power_up(pp, phy_mode);
 
@@ -2791,6 +2798,8 @@ static int mvneta_probe(struct platform_device *pdev)
 
 err_deinit:
        mvneta_deinit(pp);
+err_free_stats:
+       free_percpu(pp->stats);
 err_clk:
        clk_disable_unprepare(pp->clk);
 err_unmap:
@@ -2811,6 +2820,7 @@ static int mvneta_remove(struct platform_device *pdev)
        unregister_netdev(dev);
        mvneta_deinit(pp);
        clk_disable_unprepare(pp->clk);
+       free_percpu(pp->stats);
        iounmap(pp->base);
        irq_dispose_mapping(dev->irq);
        free_netdev(dev);
index 4e6877a032a8414bd059e2dcf658616dc2cab0fe..bd8800c85525234d1bbf05adfa7e7d23af297e1e 100644 (file)
@@ -191,6 +191,39 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
                       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
 }
 
+static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+                             struct mlx4_en_tx_ring *ring, int index,
+                             u8 owner)
+{
+       __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+       struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+       struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+       void *end = ring->buf + ring->buf_size;
+       __be32 *ptr = (__be32 *)tx_desc;
+       int i;
+
+       /* Optimize the common case when there are no wraparounds */
+       if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+               /* Stamp the freed descriptor */
+               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+                    i += STAMP_STRIDE) {
+                       *ptr = stamp;
+                       ptr += STAMP_DWORDS;
+               }
+       } else {
+               /* Stamp the freed descriptor */
+               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+                    i += STAMP_STRIDE) {
+                       *ptr = stamp;
+                       ptr += STAMP_DWORDS;
+                       if ((void *)ptr >= end) {
+                               ptr = ring->buf;
+                               stamp ^= cpu_to_be32(0x80000000);
+                       }
+               }
+       }
+}
+
 
 static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                struct mlx4_en_tx_ring *ring,
@@ -205,8 +238,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
        void *end = ring->buf + ring->buf_size;
        int frags = skb_shinfo(skb)->nr_frags;
        int i;
-       __be32 *ptr = (__be32 *)tx_desc;
-       __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
        struct skb_shared_hwtstamps hwts;
 
        if (timestamp) {
@@ -232,12 +263,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                        skb_frag_size(frag), PCI_DMA_TODEVICE);
                        }
                }
-               /* Stamp the freed descriptor */
-               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
-                       *ptr = stamp;
-                       ptr += STAMP_DWORDS;
-               }
-
        } else {
                if (!tx_info->inl) {
                        if ((void *) data >= end) {
@@ -263,16 +288,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                ++data;
                        }
                }
-               /* Stamp the freed descriptor */
-               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
-                       *ptr = stamp;
-                       ptr += STAMP_DWORDS;
-                       if ((void *) ptr >= end) {
-                               ptr = ring->buf;
-                               stamp ^= cpu_to_be32(0x80000000);
-                       }
-               }
-
        }
        dev_kfree_skb_any(skb);
        return tx_info->nr_txbb;
@@ -318,8 +333,9 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
        struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
        struct mlx4_cqe *cqe;
        u16 index;
-       u16 new_index, ring_index;
+       u16 new_index, ring_index, stamp_index;
        u32 txbbs_skipped = 0;
+       u32 txbbs_stamp = 0;
        u32 cons_index = mcq->cons_index;
        int size = cq->size;
        u32 size_mask = ring->size_mask;
@@ -335,6 +351,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
        index = cons_index & size_mask;
        cqe = &buf[(index << factor) + factor];
        ring_index = ring->cons & size_mask;
+       stamp_index = ring_index;
 
        /* Process all completed CQEs */
        while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -359,6 +376,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
                                        priv, ring, ring_index,
                                        !!((ring->cons + txbbs_skipped) &
                                        ring->size), timestamp);
+
+                       mlx4_en_stamp_wqe(priv, ring, stamp_index,
+                                         !!((ring->cons + txbbs_stamp) &
+                                               ring->size));
+                       stamp_index = ring_index;
+                       txbbs_stamp = txbbs_skipped;
                        packets++;
                        bytes += ring->tx_info[ring_index].nr_bytes;
                } while (ring_index != new_index);
index 7be9788ed0f6fddcb43abd1b384b0424ad33cb22..4fb93c5b556319e3cb90b2e0144eb31fbd69ceaf 100644 (file)
@@ -856,6 +856,10 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
                return -ENOMEM;
        dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
                                   DMA_BIDIRECTIONAL);
+       if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) {
+               __free_page(dmatest_page);
+               return -ENOMEM;
+       }
 
        /* Run a small DMA test.
         * The magic multipliers to the length tell the firmware
@@ -1191,6 +1195,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
                        int bytes, int watchdog)
 {
        struct page *page;
+       dma_addr_t bus;
        int idx;
 #if MYRI10GE_ALLOC_SIZE > 4096
        int end_offset;
@@ -1215,11 +1220,21 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
                                        rx->watchdog_needed = 1;
                                return;
                        }
+
+                       bus = pci_map_page(mgp->pdev, page, 0,
+                                          MYRI10GE_ALLOC_SIZE,
+                                          PCI_DMA_FROMDEVICE);
+                       if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
+                               __free_pages(page, MYRI10GE_ALLOC_ORDER);
+                               if (rx->fill_cnt - rx->cnt < 16)
+                                       rx->watchdog_needed = 1;
+                               return;
+                       }
+
                        rx->page = page;
                        rx->page_offset = 0;
-                       rx->bus = pci_map_page(mgp->pdev, page, 0,
-                                              MYRI10GE_ALLOC_SIZE,
-                                              PCI_DMA_FROMDEVICE);
+                       rx->bus = bus;
+
                }
                rx->info[idx].page = rx->page;
                rx->info[idx].page_offset = rx->page_offset;
@@ -2576,6 +2591,35 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
        mb();
 }
 
+static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
+                                 struct myri10ge_tx_buf *tx, int idx)
+{
+       unsigned int len;
+       int last_idx;
+
+       /* Free any DMA resources we've alloced and clear out the skb slot */
+       last_idx = (idx + 1) & tx->mask;
+       idx = tx->req & tx->mask;
+       do {
+               len = dma_unmap_len(&tx->info[idx], len);
+               if (len) {
+                       if (tx->info[idx].skb != NULL)
+                               pci_unmap_single(mgp->pdev,
+                                                dma_unmap_addr(&tx->info[idx],
+                                                               bus), len,
+                                                PCI_DMA_TODEVICE);
+                       else
+                               pci_unmap_page(mgp->pdev,
+                                              dma_unmap_addr(&tx->info[idx],
+                                                             bus), len,
+                                              PCI_DMA_TODEVICE);
+                       dma_unmap_len_set(&tx->info[idx], len, 0);
+                       tx->info[idx].skb = NULL;
+               }
+               idx = (idx + 1) & tx->mask;
+       } while (idx != last_idx);
+}
+
 /*
  * Transmit a packet.  We need to split the packet so that a single
  * segment does not cross myri10ge->tx_boundary, so this makes segment
@@ -2599,7 +2643,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
        u32 low;
        __be32 high_swapped;
        unsigned int len;
-       int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
+       int idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
        u16 pseudo_hdr_offset, cksum_offset, queue;
        int cum_len, seglen, boundary, rdma_count;
        u8 flags, odd_flag;
@@ -2696,9 +2740,12 @@ again:
 
        /* map the skb for DMA */
        len = skb_headlen(skb);
+       bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+       if (unlikely(pci_dma_mapping_error(mgp->pdev, bus)))
+               goto drop;
+
        idx = tx->req & tx->mask;
        tx->info[idx].skb = skb;
-       bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
        dma_unmap_addr_set(&tx->info[idx], bus, bus);
        dma_unmap_len_set(&tx->info[idx], len, len);
 
@@ -2797,12 +2844,16 @@ again:
                        break;
 
                /* map next fragment for DMA */
-               idx = (count + tx->req) & tx->mask;
                frag = &skb_shinfo(skb)->frags[frag_idx];
                frag_idx++;
                len = skb_frag_size(frag);
                bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
                                       DMA_TO_DEVICE);
+               if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
+                       myri10ge_unmap_tx_dma(mgp, tx, idx);
+                       goto drop;
+               }
+               idx = (count + tx->req) & tx->mask;
                dma_unmap_addr_set(&tx->info[idx], bus, bus);
                dma_unmap_len_set(&tx->info[idx], len, len);
        }
@@ -2833,31 +2884,8 @@ again:
        return NETDEV_TX_OK;
 
 abort_linearize:
-       /* Free any DMA resources we've alloced and clear out the skb
-        * slot so as to not trip up assertions, and to avoid a
-        * double-free if linearizing fails */
+       myri10ge_unmap_tx_dma(mgp, tx, idx);
 
-       last_idx = (idx + 1) & tx->mask;
-       idx = tx->req & tx->mask;
-       tx->info[idx].skb = NULL;
-       do {
-               len = dma_unmap_len(&tx->info[idx], len);
-               if (len) {
-                       if (tx->info[idx].skb != NULL)
-                               pci_unmap_single(mgp->pdev,
-                                                dma_unmap_addr(&tx->info[idx],
-                                                               bus), len,
-                                                PCI_DMA_TODEVICE);
-                       else
-                               pci_unmap_page(mgp->pdev,
-                                              dma_unmap_addr(&tx->info[idx],
-                                                             bus), len,
-                                              PCI_DMA_TODEVICE);
-                       dma_unmap_len_set(&tx->info[idx], len, 0);
-                       tx->info[idx].skb = NULL;
-               }
-               idx = (idx + 1) & tx->mask;
-       } while (idx != last_idx);
        if (skb_is_gso(skb)) {
                netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
                goto drop;
index 2cbf73881d0a3c068916e72b4b714c14526327f4..f5cdf88cf2355faa3a3659e31a304fad13d31739 100755 (executable)
@@ -36,7 +36,7 @@
 #include <linux/regulator/consumer.h>
 
 #define grf_readl(offset)      readl_relaxed(RK_GRF_VIRT + offset)
-#define grf_writel(v, offset)  do { writel_relaxed(v, RK_GRF_VIRT + offset); dsb(); } while (0)
+#define grf_writel(v, offset)  do { writel_relaxed(v, RK_GRF_VIRT + offset); dsb(sy); } while (0)
 
 //RK3288_GRF_SOC_CON1
 //RK3128_GRF_MAC_CON1
index 3df56840a3b9282c7ab308682d448bba6dac53f4..ade8bdfc03afa0673ef2052348e33138e3e0115f 100644 (file)
@@ -656,7 +656,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        spin_lock_irqsave(&port->vio.lock, flags);
 
        dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-       if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+       if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
                if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
 
@@ -704,7 +704,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dev->stats.tx_bytes += skb->len;
 
        dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
-       if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+       if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
                netif_stop_queue(dev);
                if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
                        netif_wake_queue(dev);
@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
        return vp;
 }
 
+static void vnet_cleanup(void)
+{
+       struct vnet *vp;
+       struct net_device *dev;
+
+       mutex_lock(&vnet_list_mutex);
+       while (!list_empty(&vnet_list)) {
+               vp = list_first_entry(&vnet_list, struct vnet, list);
+               list_del(&vp->list);
+               dev = vp->dev;
+               /* vio_unregister_driver() should have cleaned up port_list */
+               BUG_ON(!list_empty(&vp->port_list));
+               unregister_netdev(dev);
+               free_netdev(dev);
+       }
+       mutex_unlock(&vnet_list_mutex);
+}
+
 static const char *local_mac_prop = "local-mac-address";
 
 static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
 
                kfree(port);
 
-               unregister_netdev(vp->dev);
        }
        return 0;
 }
@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
 static void __exit vnet_exit(void)
 {
        vio_unregister_driver(&vnet_port_driver);
+       vnet_cleanup();
 }
 
 module_init(vnet_init);
index aea78fc2e48f30894a9ee7aeab0cd92f0d53b6d3..59e9c56e5b8abc63b0ebaa8ed972bd2538cc8f67 100644 (file)
@@ -138,6 +138,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        struct hv_netvsc_packet *packet;
        int ret;
        unsigned int i, num_pages, npg_data;
+       u32 skb_length = skb->len;
 
        /* Add multipages for skb->data and additional 2 for RNDIS */
        npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
@@ -208,7 +209,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        ret = rndis_filter_send(net_device_ctx->device_ctx,
                                  packet);
        if (ret == 0) {
-               net->stats.tx_bytes += skb->len;
+               net->stats.tx_bytes += skb_length;
                net->stats.tx_packets++;
        } else {
                kfree(packet);
index 155ef4bbde91c71cf38a97ee50b893960a61958c..9be91cb4f4a305c64996ab6e4aeb31d94af547b6 100644 (file)
@@ -500,6 +500,7 @@ static int macvlan_init(struct net_device *dev)
                                  (lowerdev->state & MACVLAN_STATE_MASK);
        dev->features           = lowerdev->features & MACVLAN_FEATURES;
        dev->features           |= NETIF_F_LLTX;
+       dev->vlan_features      = lowerdev->vlan_features & MACVLAN_FEATURES;
        dev->gso_max_size       = lowerdev->gso_max_size;
        dev->iflink             = lowerdev->ifindex;
        dev->hard_header_len    = lowerdev->hard_header_len;
index 9e56eb479a4fcb3b1d7d381b255381db0e07e0c4..2d255ba911d51c52a7b1e038d37e3cc28cbdc0b5 100644 (file)
@@ -625,6 +625,8 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
                vnet_hdr->csum_start = skb_checksum_start_offset(skb);
+               if (vlan_tx_tag_present(skb))
+                       vnet_hdr->csum_start += VLAN_HLEN;
                vnet_hdr->csum_offset = skb->csum_offset;
        } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
                vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
index 72ff14b811c621c3a6694a2e4941d2a4c41651ed..5a1897d86e9448f9f53abbd40dabd60393e16d0b 100644 (file)
@@ -601,7 +601,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        if (file == ppp->owner)
                                ppp_shutdown_interface(ppp);
                }
-               if (atomic_long_read(&file->f_count) <= 2) {
+               if (atomic_long_read(&file->f_count) < 2) {
                        ppp_release(NULL, file);
                        err = 0;
                } else
index 6839fb07a4c9da817393584ab435f8f780c96ebf..becfa3ef7fdc4a8175cc8a0e1afd57df4021dc57 100644 (file)
@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
                po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
                                   dev->hard_header_len);
 
-               po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
+               po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
                po->chan.private = sk;
                po->chan.ops = &pppoe_chan_ops;
 
index 7f10588fe6686281158982f71f032721117e578d..8161c3f066a3b797174e7becb4bf876624f716e0 100644 (file)
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        nf_reset(skb);
 
        skb->ip_summed = CHECKSUM_NONE;
-       ip_select_ident(skb, &rt->dst, NULL);
+       ip_select_ident(skb, NULL);
        ip_send_check(iph);
 
        ip_local_out(skb);
index d33c3ae2fcea8612a848640f994879e21829df0f..3b449c4ecf723887a327e84741040543bc23bbe5 100644 (file)
@@ -695,6 +695,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
 {
        struct usbnet *dev = netdev_priv(net);
        struct sockaddr *addr = p;
+       int ret;
 
        if (netif_running(net))
                return -EBUSY;
@@ -704,8 +705,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
        memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
 
        /* Set the MAC address */
-       return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+       ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
                                 ETH_ALEN, net->dev_addr);
+       if (ret < 0)
+               return ret;
+
+       return 0;
 }
 
 static const struct net_device_ops ax88179_netdev_ops = {
index 6fb0082b33080985a613d31c27dc0a1e55fded5b..6c584f8a22686cc67b1505b524eac29c845627d2 100644 (file)
@@ -647,6 +647,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+       {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
        {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
@@ -721,6 +722,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
        {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
        {QMI_FIXED_INTF(0x19d2, 0x1426, 2)},    /* ZTE MF91 */
+       {QMI_FIXED_INTF(0x19d2, 0x1428, 2)},    /* Telewell TW-LTE 4G v2 */
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
@@ -733,6 +735,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
        {QMI_FIXED_INTF(0x1199, 0x9041, 8)},    /* Sierra Wireless MC7305/MC7355 */
        {QMI_FIXED_INTF(0x1199, 0x9051, 8)},    /* Netgear AirCard 340U */
+       {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
index fcbd4eee52cc7e1403e098dfffa5ea6a9cc331d1..a1dc186c6f66188b6bb778f4c531f6d0e278395e 100644 (file)
@@ -1093,7 +1093,7 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        iph->daddr      = dst;
        iph->saddr      = fl4.saddr;
        iph->ttl        = ttl ? : ip4_dst_hoplimit(&rt->dst);
-       __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
+       __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
 
        nf_reset(skb);
 
index 9dce106cd6d4a1b963910a8310737be18929867a..95a334f0719caa593b1f0cf2a546818989d7d8ef 100644 (file)
@@ -253,6 +253,7 @@ struct ar9170 {
        atomic_t rx_work_urbs;
        atomic_t rx_pool_urbs;
        kernel_ulong_t features;
+       bool usb_ep_cmd_is_bulk;
 
        /* firmware settings */
        struct completion fw_load_wait;
index 307bc0ddff99091a1f224bb236013a1a90b5fa76..83d20c8b2ad7ee6cd77ef1fc5f5edd6891259b95 100644 (file)
@@ -621,9 +621,16 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
                goto err_free;
        }
 
-       usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
-               AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
-               carl9170_usb_cmd_complete, ar, 1);
+       if (ar->usb_ep_cmd_is_bulk)
+               usb_fill_bulk_urb(urb, ar->udev,
+                                 usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD),
+                                 cmd, cmd->hdr.len + 4,
+                                 carl9170_usb_cmd_complete, ar);
+       else
+               usb_fill_int_urb(urb, ar->udev,
+                                usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD),
+                                cmd, cmd->hdr.len + 4,
+                                carl9170_usb_cmd_complete, ar, 1);
 
        if (free_buf)
                urb->transfer_flags |= URB_FREE_BUFFER;
@@ -1032,9 +1039,10 @@ static void carl9170_usb_firmware_step2(const struct firmware *fw,
 static int carl9170_usb_probe(struct usb_interface *intf,
                              const struct usb_device_id *id)
 {
+       struct usb_endpoint_descriptor *ep;
        struct ar9170 *ar;
        struct usb_device *udev;
-       int err;
+       int i, err;
 
        err = usb_reset_device(interface_to_usbdev(intf));
        if (err)
@@ -1050,6 +1058,21 @@ static int carl9170_usb_probe(struct usb_interface *intf,
        ar->intf = intf;
        ar->features = id->driver_info;
 
+       /* We need to remember the type of endpoint 4 because it differs
+        * between high- and full-speed configuration. The high-speed
+        * configuration specifies it as interrupt and the full-speed
+        * configuration as bulk endpoint. This information is required
+        * later when sending urbs to that endpoint.
+        */
+       for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) {
+               ep = &intf->cur_altsetting->endpoint[i].desc;
+
+               if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD &&
+                   usb_endpoint_dir_out(ep) &&
+                   usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK)
+                       ar->usb_ep_cmd_is_bulk = true;
+       }
+
        usb_set_intfdata(intf, ar);
        SET_IEEE80211_DEV(ar->hw, &intf->dev);
 
index 72d2ecce0b8d7614c870cfb41e5d7e4773cd1bd8..d8df1d9b0de3ce3337965fa4626a5f6ed7449b1f 100644 (file)
@@ -489,6 +489,7 @@ enum iwl_trans_state {
  *     Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @pm_support: set to true in start_hw if link pm is supported
+ * @ltr_enabled: set to true if the LTR is enabled
  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
  *     The user should use iwl_trans_{alloc,free}_tx_cmd.
  * @dev_cmd_headroom: room needed for the transport's private use before the
@@ -513,6 +514,7 @@ struct iwl_trans {
        u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
 
        bool pm_support;
+       bool ltr_enabled;
 
        /* The following fields are internal only */
        struct kmem_cache *dev_cmd_pool;
index 81fe45f46be7e97f1d346ada8c457d065b9a377e..ac38ecf13c18103de520b66706877881aa62fd63 100644 (file)
 /* Power Management Commands, Responses, Notifications */
 
 /**
- * enum iwl_scan_flags - masks for power table command flags
+ * enum iwl_ltr_config_flags - masks for LTR config command flags
+ * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
+ * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
+ *     memory access
+ * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
+ *     reg change
+ * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
+ *     D0 to D3
+ * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
+ * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
+ * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
+ */
+enum iwl_ltr_config_flags {
+       LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
+       LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
+       LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
+       LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
+       LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
+       LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
+       LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
+};
+
+/**
+ * struct iwl_ltr_config_cmd - configures the LTR
+ * @flags: See %enum iwl_ltr_config_flags
+ */
+struct iwl_ltr_config_cmd {
+       __le32 flags;
+       __le32 static_long;
+       __le32 static_short;
+} __packed;
+
+/**
+ * enum iwl_power_flags - masks for power table command flags
  * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
  *             receiver and transmitter. '0' - does not allow.
  * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
index c6384555aab4de0172c7f1a7c5662cce776e76ed..4b6730db42a5e47c5de9d25df3b7ee005e9f7a46 100644 (file)
@@ -138,6 +138,7 @@ enum {
 
        /* Power */
        POWER_TABLE_CMD = 0x77,
+       LTR_CONFIG = 0xee,
 
        /* Scanning */
        SCAN_REQUEST_CMD = 0x80,
index e18c92dd60ecdd768c86e23fe6d0f6505f396018..d250d451fd015fb645ba49cd3d9048300fea6dc4 100644 (file)
@@ -443,6 +443,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                goto error;
 
+       if (mvm->trans->ltr_enabled) {
+               struct iwl_ltr_config_cmd cmd = {
+                       .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
+               };
+
+               WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
+                                            sizeof(cmd), &cmd));
+       }
+
        IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
 
        return 0;
index 388c8a91496046e99b9d91d837c195dc2c4f0118..649d301cfa2ac82567b49f68edcf10e1f2428cc7 100644 (file)
@@ -293,6 +293,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(BT_PROFILE_NOTIFICATION),
        CMD(BT_CONFIG),
        CMD(MCAST_FILTER_CMD),
+       CMD(LTR_CONFIG),
 };
 #undef CMD
 
index b53e5c3f403bf86164b31f64fc0dd6f7f27874b1..bb020ad3f76cbbdbc1734039a876cc25fdef55ab 100644 (file)
@@ -269,6 +269,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
@@ -306,6 +308,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
index ff04135d37afa74c5832e0452934bea631ac4304..6a5eb2b29418da1a60b091b162271ddca0fcede3 100644 (file)
@@ -116,11 +116,13 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT  0x041
+#define PCI_EXP_DEVCTL2_LTR_EN 0x0400
 
 static void iwl_pcie_apm_config(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u16 lctl;
+       u16 cap;
 
        /*
         * HW bug W/A for instability in PCIe bus L0S->L1 transition.
@@ -131,16 +133,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
         *    power savings, even without L1.
         */
        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
-       if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
-               /* L1-ASPM enabled; disable(!) L0S */
+       if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
                iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-               dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
-       } else {
-               /* L1-ASPM disabled; enable(!) L0S */
+       else
                iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-               dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
-       }
        trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+
+       pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
+       trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+       dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
+                (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
+                trans->ltr_enabled ? "En" : "Dis");
 }
 
 /*
index fc3fe8ddcf62a97a02db5c20aff80349ab764532..83c61964d0823c7ba47b68895819c9ffdca56823 100644 (file)
@@ -501,6 +501,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
 
index 573643039a0feb7e8fb566401af42aead97f0a14..f2c683f9494d2a8887ab1929179900cf57ede9d2 100755 (executable)
@@ -16,6 +16,7 @@
 #include <bcmutils.h>
 #include <linux/delay.h>
 #include <pcicfg.h>
+#include <asm-generic/pci-dma-compat.h>
 
 
 
index 88a517a68d7d246cfa951595f0b1bae3e136071a..54bd47d27322a2a75f5cb09c1af78998b301aef7 100755 (executable)
@@ -49,6 +49,7 @@
 #include <linux/icmpv6.h>
 #include <net/ndisc.h>
 #include <net/checksum.h>
+#include <net/ip6_checksum.h>
 #endif
 #endif
 
index a7630d5ec8921cb9c3024e9fcf07e0bbf1a5071c..a629313dd98a855ff9da7aa07bd8d4cae051a659 100644 (file)
@@ -1920,7 +1920,7 @@ struct mac_iveiv_entry {
  * 2 - drop tx power by 12dBm,
  * 3 - increase tx power by 6dBm
  */
-#define BBP1_TX_POWER_CTRL             FIELD8(0x07)
+#define BBP1_TX_POWER_CTRL             FIELD8(0x03)
 #define BBP1_TX_ANTENNA                        FIELD8(0x18)
 
 /*
index 9ef0711a5cc11a809731f7eb5c7b03d8000e1e97..400b8679796aea95961e5fe76cf3c9f3565273b5 100644 (file)
@@ -1091,6 +1091,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* Ovislink */
        { USB_DEVICE(0x1b75, 0x3071) },
        { USB_DEVICE(0x1b75, 0x3072) },
+       { USB_DEVICE(0x1b75, 0xa200) },
        /* Para */
        { USB_DEVICE(0x20b8, 0x8888) },
        /* Pegatron */
index 8188dcb512f0a01851a67810239d10eccfc323db..e7a2af3ad05a8b9e1034fe7abcf6738c087dd97a 100644 (file)
@@ -316,6 +316,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+       {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
        {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
        {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
index 3420d833db170e2e308e48d3bedab2b988dba29f..384ab8ca4b37fa77229ecd3a2939962033f40281 100644 (file)
@@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
                targets->sens_res =
                         be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]);
                targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK];
-               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
-                      skb->data[MICROREAD_EMCF_A_LEN]);
                targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN];
+               if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
+                       r = -EINVAL;
+                       goto exit_free;
+               }
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
+                      targets->nfcid1_len);
                break;
        case MICROREAD_GATE_ID_MREAD_ISO_A_3:
                targets->supported_protocols =
@@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
                targets->sens_res =
                         be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]);
                targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK];
-               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
-                      skb->data[MICROREAD_EMCF_A3_LEN]);
                targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN];
+               if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
+                       r = -EINVAL;
+                       goto exit_free;
+               }
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
+                      targets->nfcid1_len);
                break;
        case MICROREAD_GATE_ID_MREAD_ISO_B:
                targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
index c748175675012614578dbdc8f059d21953dbe4a4..90733929f4f0cdce09831889dbc8840042e83cf9 100644 (file)
@@ -1076,52 +1076,6 @@ int of_property_read_string(struct device_node *np, const char *propname,
 }
 EXPORT_SYMBOL_GPL(of_property_read_string);
 
-/**
- * of_property_read_string_index - Find and read a string from a multiple
- * strings property.
- * @np:                device node from which the property value is to be read.
- * @propname:  name of the property to be searched.
- * @index:     index of the string in the list of strings
- * @out_string:        pointer to null terminated return string, modified only if
- *             return value is 0.
- *
- * Search for a property in a device tree node and retrieve a null
- * terminated string value (pointer to data, not a copy) in the list of strings
- * contained in that property.
- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
- * property does not have a value, and -EILSEQ if the string is not
- * null-terminated within the length of the property data.
- *
- * The out_string pointer is modified only if a valid string can be decoded.
- */
-int of_property_read_string_index(struct device_node *np, const char *propname,
-                                 int index, const char **output)
-{
-       struct property *prop = of_find_property(np, propname, NULL);
-       int i = 0;
-       size_t l = 0, total = 0;
-       const char *p;
-
-       if (!prop)
-               return -EINVAL;
-       if (!prop->value)
-               return -ENODATA;
-       if (strnlen(prop->value, prop->length) >= prop->length)
-               return -EILSEQ;
-
-       p = prop->value;
-
-       for (i = 0; total < prop->length; total += l, p += l) {
-               l = strlen(p) + 1;
-               if (i++ == index) {
-                       *output = p;
-                       return 0;
-               }
-       }
-       return -ENODATA;
-}
-EXPORT_SYMBOL_GPL(of_property_read_string_index);
-
 /**
  * of_property_match_string() - Find string in a list and return index
  * @np: pointer to node containing string list property
@@ -1148,7 +1102,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
        end = p + prop->length;
 
        for (i = 0; p < end; i++, p += l) {
-               l = strlen(p) + 1;
+               l = strnlen(p, end - p) + 1;
                if (p + l > end)
                        return -EILSEQ;
                pr_debug("comparing %s with %s\n", string, p);
@@ -1160,39 +1114,41 @@ int of_property_match_string(struct device_node *np, const char *propname,
 EXPORT_SYMBOL_GPL(of_property_match_string);
 
 /**
- * of_property_count_strings - Find and return the number of strings from a
- * multiple strings property.
+ * of_property_read_string_util() - Utility helper for parsing string properties
  * @np:                device node from which the property value is to be read.
  * @propname:  name of the property to be searched.
+ * @out_strs:  output array of string pointers.
+ * @sz:                number of array elements to read.
+ * @skip:      Number of strings to skip over at beginning of list.
  *
- * Search for a property in a device tree node and retrieve the number of null
- * terminated string contain in it. Returns the number of strings on
- * success, -EINVAL if the property does not exist, -ENODATA if property
- * does not have a value, and -EILSEQ if the string is not null-terminated
- * within the length of the property data.
+ * Don't call this function directly. It is a utility helper for the
+ * of_property_read_string*() family of functions.
  */
-int of_property_count_strings(struct device_node *np, const char *propname)
+int of_property_read_string_helper(struct device_node *np, const char *propname,
+                                  const char **out_strs, size_t sz, int skip)
 {
        struct property *prop = of_find_property(np, propname, NULL);
-       int i = 0;
-       size_t l = 0, total = 0;
-       const char *p;
+       int l = 0, i = 0;
+       const char *p, *end;
 
        if (!prop)
                return -EINVAL;
        if (!prop->value)
                return -ENODATA;
-       if (strnlen(prop->value, prop->length) >= prop->length)
-               return -EILSEQ;
-
        p = prop->value;
+       end = p + prop->length;
 
-       for (i = 0; total < prop->length; total += l, p += l, i++)
-               l = strlen(p) + 1;
-
-       return i;
+       for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
+               l = strnlen(p, end - p) + 1;
+               if (p + l > end)
+                       return -EILSEQ;
+               if (out_strs && i >= skip)
+                       *out_strs++ = p;
+       }
+       i -= skip;
+       return i <= 0 ? -ENODATA : i;
 }
-EXPORT_SYMBOL_GPL(of_property_count_strings);
+EXPORT_SYMBOL_GPL(of_property_read_string_helper);
 
 /**
  * of_parse_phandle - Resolve a phandle property to a device_node pointer
index 0eb5c38b4e07ab2bf1f653292d142d59c643e197..f5e8dc7a725c201cd8c9a13b79ad8257c1428f14 100644 (file)
@@ -126,8 +126,9 @@ static void __init of_selftest_parse_phandle_with_args(void)
        selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
 }
 
-static void __init of_selftest_property_match_string(void)
+static void __init of_selftest_property_string(void)
 {
+       const char *strings[4];
        struct device_node *np;
        int rc;
 
@@ -145,13 +146,66 @@ static void __init of_selftest_property_match_string(void)
        rc = of_property_match_string(np, "phandle-list-names", "third");
        selftest(rc == 2, "third expected:0 got:%i\n", rc);
        rc = of_property_match_string(np, "phandle-list-names", "fourth");
-       selftest(rc == -ENODATA, "unmatched string; rc=%i", rc);
+       selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
        rc = of_property_match_string(np, "missing-property", "blah");
-       selftest(rc == -EINVAL, "missing property; rc=%i", rc);
+       selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
        rc = of_property_match_string(np, "empty-property", "blah");
-       selftest(rc == -ENODATA, "empty property; rc=%i", rc);
+       selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
        rc = of_property_match_string(np, "unterminated-string", "blah");
-       selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
+       selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
+
+       /* of_property_count_strings() tests */
+       rc = of_property_count_strings(np, "string-property");
+       selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
+       rc = of_property_count_strings(np, "phandle-list-names");
+       selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
+       rc = of_property_count_strings(np, "unterminated-string");
+       selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
+       rc = of_property_count_strings(np, "unterminated-string-list");
+       selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
+
+       /* of_property_read_string_index() tests */
+       rc = of_property_read_string_index(np, "string-property", 0, strings);
+       selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
+       strings[0] = NULL;
+       rc = of_property_read_string_index(np, "string-property", 1, strings);
+       selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+       rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
+       selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
+       rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
+       selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
+       rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
+       selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
+       strings[0] = NULL;
+       rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
+       selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+       strings[0] = NULL;
+       rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
+       selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+       rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
+       selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
+       strings[0] = NULL;
+       rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
+       selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+       strings[1] = NULL;
+
+       /* of_property_read_string_array() tests */
+       rc = of_property_read_string_array(np, "string-property", strings, 4);
+       selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
+       rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
+       selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
+       rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
+       selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
+       /* -- An incorrectly formed string should cause a failure */
+       rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
+       selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
+       /* -- parsing the correctly formed strings should still work: */
+       strings[2] = NULL;
+       rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
+       selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
+       strings[1] = NULL;
+       rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
+       selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
 }
 
 static int __init of_selftest(void)
@@ -167,7 +221,7 @@ static int __init of_selftest(void)
 
        pr_info("start of selftest - you will see error messages\n");
        of_selftest_parse_phandle_with_args();
-       of_selftest_property_match_string();
+       of_selftest_property_string();
        pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
        return 0;
 }
index 5b4a9d9cd200dd5a6ee32c3d343bbae0cf2ff8ac..689f3c87ee5ce42fe0623cf1dc959e7597524abc 100644 (file)
@@ -175,7 +175,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
 
-       return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n",
+       return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
                       pci_dev->vendor, pci_dev->device,
                       pci_dev->subsystem_vendor, pci_dev->subsystem_device,
                       (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
index 4510279e28dcc52e568cf4c868de955031d50afb..910339c0791fd64724adfe2796b26d4ee25f2038 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/ioport.h>
 #include <linux/sched.h>
 #include <linux/ktime.h>
+#include <linux/mm.h>
 #include <asm/dma.h>   /* isa_dma_bridge_buggy */
 #include "pci.h"
 
@@ -291,6 +292,25 @@ static void quirk_citrine(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,    PCI_DEVICE_ID_IBM_CITRINE,      quirk_citrine);
 
+/*  On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
+static void quirk_extend_bar_to_page(struct pci_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+               struct resource *r = &dev->resource[i];
+
+               if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
+                       r->end = PAGE_SIZE - 1;
+                       r->start = 0;
+                       r->flags |= IORESOURCE_UNSET;
+                       dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
+                                i, r);
+               }
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
+
 /*
  *  S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
  *  If it's needed, re-allocate the region.
index a808bf56e271c2c2014035fe14313c423450fb24..11b829a939629363d572d0ea1ab8637ff3cd2460 100644 (file)
@@ -156,6 +156,13 @@ config PINCTRL_ROCKCHIP
        select GENERIC_PINCONF
        select GENERIC_IRQ_CHIP
 
+config PINCTRL_RK3368
+       bool
+       select PINMUX
+       select GENERIC_PINCONF
+       select GENERIC_IRQ_CHIP
+       select MFD_SYSCON
+
 config PINCTRL_SINGLE
        tristate "One-register-per-pin type device tree based pinctrl driver"
        depends on OF
index ad6ec81f7ba397eeb929bab9746ebca8d4b86ddc..91b0510ad43f6af2589bfd4d07def4437f6546b8 100644 (file)
@@ -31,6 +31,7 @@ obj-$(CONFIG_PINCTRL_STN8815) += pinctrl-nomadik-stn8815.o
 obj-$(CONFIG_PINCTRL_DB8500)   += pinctrl-nomadik-db8500.o
 obj-$(CONFIG_PINCTRL_DB8540)   += pinctrl-nomadik-db8540.o
 obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
+obj-$(CONFIG_PINCTRL_RK3368)  += pinctrl-rk3368.o
 obj-$(CONFIG_PINCTRL_SINGLE)   += pinctrl-single.o
 obj-$(CONFIG_PINCTRL_SIRF)     += pinctrl-sirf.o
 obj-$(CONFIG_PINCTRL_SUNXI)    += pinctrl-sunxi.o
index 596a2522a6b1750b2c4c94c22f7c4177847f5bb4..d2103e546ce3e7a10882ded12232a537210eb300 100644 (file)
@@ -529,7 +529,7 @@ exit:
  * <devicename> <state> <pinname> are values that should match the pinctrl-maps
  * <newvalue> reflects the new config and is driver dependant
  */
-static int pinconf_dbg_config_write(struct file *file,
+static ssize_t pinconf_dbg_config_write(struct file *file,
        const char __user *user_buf, size_t count, loff_t *ppos)
 {
        struct pinctrl_maps *maps_node;
diff --git a/drivers/pinctrl/pinctrl-rk3368.c b/drivers/pinctrl/pinctrl-rk3368.c
new file mode 100755 (executable)
index 0000000..a733dd0
--- /dev/null
@@ -0,0 +1,2307 @@
+/*
+ * Pinctrl driver for Rockchip SoCs
+ *
+ * Copyright (c) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * With some ideas taken from pinctrl-samsung:
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ * Copyright (c) 2012 Linaro Ltd
+ *             http://www.linaro.org
+ *
+ * and pinctrl-at91:
+ * Copyright (C) 2011-2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+#include "core.h"
+#include "pinconf.h"
+
+
+#if 0
+#define pinctrl_dbg(dev, format, arg...)            \
+        dev_printk(KERN_INFO , dev , format , ## arg)
+#else
+#define pinctrl_dbg(dev, format, arg...)
+#endif
+
+
+/* GPIO control registers */
+#define GPIO_SWPORT_DR         0x00
+#define GPIO_SWPORT_DDR                0x04
+#define GPIO_INTEN             0x30
+#define GPIO_INTMASK           0x34
+#define GPIO_INTTYPE_LEVEL     0x38
+#define GPIO_INT_POLARITY      0x3c
+#define GPIO_INT_STATUS                0x40
+#define GPIO_INT_RAWSTATUS     0x44
+#define GPIO_DEBOUNCE          0x48
+#define GPIO_PORTS_EOI         0x4c
+#define GPIO_EXT_PORT          0x50
+#define GPIO_LS_SYNC           0x60
+
+enum rockchip_pinctrl_type {
+       RK2928,
+       RK3066B,
+       RK3188,
+       RK3288,
+       RK3368,
+};
+
+/**
+ * Encode variants of iomux registers into a type variable
+ */
+#define IOMUX_GPIO_ONLY                BIT(0)
+#define IOMUX_WIDTH_4BIT       BIT(1)
+#define IOMUX_SOURCE_PMU       BIT(2)
+#define IOMUX_UNROUTED         BIT(3)
+
+/**
+ * @type: iomux variant using IOMUX_* constants
+ * @offset: if initialized to -1 it will be autocalculated, by specifying
+ *         an initial offset value the relevant source offset can be reset
+ *         to a new value for autocalculating the following iomux registers.
+ */
+struct rockchip_iomux {
+       int                             type;
+       int                             offset;
+};
+
+/**
+ * @reg_base: register base of the gpio bank
+ * @reg_pull: optional separate register for additional pull settings
+ * @clk: clock of the gpio bank
+ * @irq: interrupt of the gpio bank
+ * @pin_base: first pin number
+ * @nr_pins: number of pins in this bank
+ * @name: name of the bank
+ * @bank_num: number of the bank, to account for holes
+ * @iomux: array describing the 4 iomux sources of the bank
+ * @valid: are all necessary informations present
+ * @of_node: dt node of this bank
+ * @drvdata: common pinctrl basedata
+ * @domain: irqdomain of the gpio bank
+ * @gpio_chip: gpiolib chip
+ * @grange: gpio range
+ * @slock: spinlock for the gpio bank
+ */
+struct rockchip_pin_bank {
+       void __iomem                    *reg_base;
+       struct regmap                   *regmap_pull;
+       struct clk                      *clk;
+       int                             irq;
+       u32                             pin_base;
+       u8                              nr_pins;
+       char                            *name;
+       u8                              bank_num;
+       struct rockchip_iomux           iomux[4];
+       bool                            valid;
+       struct device_node              *of_node;
+       struct rockchip_pinctrl         *drvdata;
+       struct irq_domain               *domain;
+       struct gpio_chip                gpio_chip;
+       struct pinctrl_gpio_range       grange;
+       spinlock_t                      slock;
+       u32                             toggle_edge_mode;
+       u32                             suspend_wakeup;
+       u32                             saved_wakeup;
+};
+
+#define PIN_BANK(id, pins, label)                      \
+       {                                               \
+               .bank_num       = id,                   \
+               .nr_pins        = pins,                 \
+               .name           = label,                \
+               .iomux          = {                     \
+                       { .offset = -1 },               \
+                       { .offset = -1 },               \
+                       { .offset = -1 },               \
+                       { .offset = -1 },               \
+               },                                      \
+       }
+
+#define PIN_BANK_IOMUX_FLAGS(id, pins, label, iom0, iom1, iom2, iom3)  \
+       {                                                               \
+               .bank_num       = id,                                   \
+               .nr_pins        = pins,                                 \
+               .name           = label,                                \
+               .iomux          = {                                     \
+                       { .type = iom0, .offset = -1 },                 \
+                       { .type = iom1, .offset = -1 },                 \
+                       { .type = iom2, .offset = -1 },                 \
+                       { .type = iom3, .offset = -1 },                 \
+               },                                                      \
+       }
+
+/**
+ */
+struct rockchip_pin_ctrl {
+       struct rockchip_pin_bank        *pin_banks;
+       u32                             nr_banks;
+       u32                             nr_pins;
+       char                            *label;
+       enum rockchip_pinctrl_type      type;
+       int                             grf_mux_offset;
+       int                             pmu_mux_offset;
+       void    (*pull_calc_reg)(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit);
+};
+
+struct rockchip_pin_config {
+       unsigned int            func;
+       unsigned long           *configs;
+       unsigned int            nconfigs;
+};
+
+/**
+ * struct rockchip_pin_group: represent group of pins of a pinmux function.
+ * @name: name of the pin group, used to lookup the group.
+ * @pins: the pins included in this group.
+ * @npins: number of pins included in this group.
+ * @func: the mux function number to be programmed when selected.
+ * @configs: the config values to be set for each pin
+ * @nconfigs: number of configs for each pin
+ */
+struct rockchip_pin_group {
+       const char                      *name;
+       unsigned int                    npins;
+       unsigned int                    *pins;
+       struct rockchip_pin_config      *data;
+};
+
+/**
+ * struct rockchip_pmx_func: represent a pin function.
+ * @name: name of the pin function, used to lookup the function.
+ * @groups: one or more names of pin groups that provide this function.
+ * @num_groups: number of groups included in @groups.
+ */
+struct rockchip_pmx_func {
+       const char              *name;
+       const char              **groups;
+       u8                      ngroups;
+};
+
+struct rockchip_pinctrl {
+       struct regmap                   *regmap_base;
+       int                             reg_size;
+       struct regmap                   *regmap_pull;
+       struct regmap                   *regmap_pmu;
+       struct device                   *dev;
+       struct rockchip_pin_ctrl        *ctrl;
+       struct pinctrl_desc             pctl;
+       struct pinctrl_dev              *pctl_dev;
+       struct rockchip_pin_group       *groups;
+       unsigned int                    ngroups;
+       struct rockchip_pmx_func        *functions;
+       unsigned int                    nfunctions;
+};
+
+static struct regmap_config rockchip_regmap_config = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = 4,
+};
+static struct rockchip_pinctrl *g_info;
+
+static inline struct rockchip_pin_bank *gc_to_pin_bank(struct gpio_chip *gc)
+{
+       return container_of(gc, struct rockchip_pin_bank, gpio_chip);
+}
+
+static const inline struct rockchip_pin_group *pinctrl_name_to_group(
+                                       const struct rockchip_pinctrl *info,
+                                       const char *name)
+{
+       int i;
+
+       for (i = 0; i < info->ngroups; i++) {
+               if (!strcmp(info->groups[i].name, name))
+                       return &info->groups[i];
+       }
+
+       return NULL;
+}
+
+/*
+ * given a pin number that is local to a pin controller, find out the pin bank
+ * and the register base of the pin bank.
+ */
+static struct rockchip_pin_bank *pin_to_bank(struct rockchip_pinctrl *info,
+                                                               unsigned pin)
+{
+       struct rockchip_pin_bank *b = info->ctrl->pin_banks;
+
+       while (pin >= (b->pin_base + b->nr_pins))
+               b++;
+
+       return b;
+}
+
+static struct rockchip_pin_bank *bank_num_to_bank(
+                                       struct rockchip_pinctrl *info,
+                                       unsigned num)
+{
+       struct rockchip_pin_bank *b = info->ctrl->pin_banks;
+       int i;
+
+       for (i = 0; i < info->ctrl->nr_banks; i++, b++) {
+               if (b->bank_num == num)
+                       return b;
+       }
+
+       return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Pinctrl_ops handling
+ */
+
+static int rockchip_get_groups_count(struct pinctrl_dev *pctldev)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+       return info->ngroups;
+}
+
+static const char *rockchip_get_group_name(struct pinctrl_dev *pctldev,
+                                                       unsigned selector)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+       return info->groups[selector].name;
+}
+
+static int rockchip_get_group_pins(struct pinctrl_dev *pctldev,
+                                     unsigned selector, const unsigned **pins,
+                                     unsigned *npins)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+       if (selector >= info->ngroups)
+               return -EINVAL;
+
+       *pins = info->groups[selector].pins;
+       *npins = info->groups[selector].npins;
+
+       return 0;
+}
+
+static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
+                                struct device_node *np,
+                                struct pinctrl_map **map, unsigned *num_maps)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       const struct rockchip_pin_group *grp;
+       struct pinctrl_map *new_map;
+       struct device_node *parent;
+       int map_num = 1;
+       int i;
+
+       /*
+        * first find the group of this node and check if we need to create
+        * config maps for pins
+        */
+       grp = pinctrl_name_to_group(info, np->name);
+       if (!grp) {
+               dev_err(info->dev, "unable to find group for node %s\n",
+                       np->name);
+               return -EINVAL;
+       }
+
+       map_num += grp->npins;
+       new_map = devm_kzalloc(pctldev->dev, sizeof(*new_map) * map_num,
+                                                               GFP_KERNEL);
+       if (!new_map)
+               return -ENOMEM;
+
+       *map = new_map;
+       *num_maps = map_num;
+
+       /* create mux map */
+       parent = of_get_parent(np);
+       if (!parent) {
+               devm_kfree(pctldev->dev, new_map);
+               return -EINVAL;
+       }
+       new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
+       new_map[0].data.mux.function = parent->name;
+       new_map[0].data.mux.group = np->name;
+       of_node_put(parent);
+
+       /* create config map */
+       new_map++;
+       for (i = 0; i < grp->npins; i++) {
+               new_map[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
+               new_map[i].data.configs.group_or_pin =
+                               pin_get_name(pctldev, grp->pins[i]);
+               new_map[i].data.configs.configs = grp->data[i].configs;
+               new_map[i].data.configs.num_configs = grp->data[i].nconfigs;
+       }
+
+       pinctrl_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
+               (*map)->data.mux.function, (*map)->data.mux.group, map_num);
+
+       return 0;
+}
+
+static void rockchip_dt_free_map(struct pinctrl_dev *pctldev,
+                                   struct pinctrl_map *map, unsigned num_maps)
+{
+}
+
+static const struct pinctrl_ops rockchip_pctrl_ops = {
+       .get_groups_count       = rockchip_get_groups_count,
+       .get_group_name         = rockchip_get_group_name,
+       .get_group_pins         = rockchip_get_group_pins,
+       .dt_node_to_map         = rockchip_dt_node_to_map,
+       .dt_free_map            = rockchip_dt_free_map,
+};
+
+/*
+ * Hardware access
+ */
+
+static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
+{
+       struct rockchip_pinctrl *info = bank->drvdata;
+       int iomux_num = (pin / 8);
+       struct regmap *regmap;
+       unsigned int val;
+       int reg, ret, mask;
+       u8 bit;
+
+       if (iomux_num > 3)
+               return -EINVAL;
+
+       if (bank->iomux[iomux_num].type & IOMUX_UNROUTED) {
+               dev_err(info->dev, "pin %d is unrouted\n", pin);
+               return -EINVAL;
+       }
+
+       if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY)
+               return RK_FUNC_GPIO;
+
+       regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
+                               ? info->regmap_pmu : info->regmap_base;
+
+       /* get basic quadrupel of mux registers and the correct reg inside */
+       mask = (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) ? 0xf : 0x3;
+       reg = bank->iomux[iomux_num].offset;
+       if (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) {
+               if ((pin % 8) >= 4)
+                       reg += 0x4;
+               bit = (pin % 4) * 4;
+       } else {
+               bit = (pin % 8) * 2;
+       }
+
+       ret = regmap_read(regmap, reg, &val);
+       if (ret)
+               return ret;
+
+       return ((val >> bit) & mask);
+}
+
+/*
+ * Set a new mux function for a pin.
+ *
+ * The register is divided into the upper and lower 16 bit. When changing
+ * a value, the previous register value is not read and changed. Instead
+ * it seems the changed bits are marked in the upper 16 bit, while the
+ * changed value gets set in the same offset in the lower 16 bit.
+ * All pin settings seem to be 2 bit wide in both the upper and lower
+ * parts.
+ * @bank: pin bank to change
+ * @pin: pin to change
+ * @mux: new mux function to set
+ */
+static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
+{
+       struct rockchip_pinctrl *info = bank->drvdata;
+       int iomux_num = (pin / 8);
+       struct regmap *regmap;
+       int reg, ret, mask;
+       unsigned long flags;
+       u8 bit;
+       u32 data, rmask;
+
+       if (iomux_num > 3)
+               return -EINVAL;
+
+       if (bank->iomux[iomux_num].type & IOMUX_UNROUTED) {
+               dev_err(info->dev, "pin %d is unrouted\n", pin);
+               return -EINVAL;
+       }
+
+       if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY) {
+               if (mux != RK_FUNC_GPIO) {
+                       dev_err(info->dev,
+                               "pin %d only supports a gpio mux\n", pin);
+                       return -ENOTSUPP;
+               } else {
+                       return 0;
+               }
+       }
+
+       pinctrl_dbg(info->dev, "setting mux of GPIO%d-%d to %d\n",
+                                               bank->bank_num, pin, mux);
+
+       regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
+                               ? info->regmap_pmu : info->regmap_base;
+
+       /* get basic quadrupel of mux registers and the correct reg inside */
+       mask = (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) ? 0xf : 0x3;
+       reg = bank->iomux[iomux_num].offset;
+       if (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) {
+               if ((pin % 8) >= 4)
+                       reg += 0x4;
+               bit = (pin % 4) * 4;
+       } else {
+               bit = (pin % 8) * 2;
+       }
+
+       spin_lock_irqsave(&bank->slock, flags);
+
+       data = (mask << (bit + 16));
+       rmask = data | (data >> 16);
+       data |= (mux & mask) << bit;
+       ret = regmap_update_bits(regmap, reg, rmask, data);
+
+       spin_unlock_irqrestore(&bank->slock, flags);
+
+       return ret;
+}
+
+#define RK2928_PULL_OFFSET             0x118
+#define RK2928_PULL_PINS_PER_REG       16
+#define RK2928_PULL_BANK_STRIDE                8
+
+static void rk2928_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit)
+{
+       struct rockchip_pinctrl *info = bank->drvdata;
+
+       *regmap = info->regmap_base;
+       *reg = RK2928_PULL_OFFSET;
+       *reg += bank->bank_num * RK2928_PULL_BANK_STRIDE;
+       *reg += (pin_num / RK2928_PULL_PINS_PER_REG) * 4;
+
+       *bit = pin_num % RK2928_PULL_PINS_PER_REG;
+};
+
+#define RK3188_PULL_OFFSET             0x164
+#define RK3188_PULL_BITS_PER_PIN       2
+#define RK3188_PULL_PINS_PER_REG       8
+#define RK3188_PULL_BANK_STRIDE                16
+#define RK3188_PULL_PMU_OFFSET         0x64
+
+static void rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit)
+{
+       struct rockchip_pinctrl *info = bank->drvdata;
+
+       /* The first 12 pins of the first bank are located elsewhere */
+       if (bank->bank_num == 0 && pin_num < 12) {
+               *regmap = info->regmap_pmu ? info->regmap_pmu
+                                          : bank->regmap_pull;
+               *reg = info->regmap_pmu ? RK3188_PULL_PMU_OFFSET : 0;
+               *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+               *bit = pin_num % RK3188_PULL_PINS_PER_REG;
+               *bit *= RK3188_PULL_BITS_PER_PIN;
+       } else {
+               *regmap = info->regmap_pull ? info->regmap_pull
+                                           : info->regmap_base;
+               *reg = info->regmap_pull ? 0 : RK3188_PULL_OFFSET;
+
+               /* correct the offset, as it is the 2nd pull register */
+               *reg -= 4;
+               *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+               *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+
+               /*
+                * The bits in these registers have an inverse ordering
+                * with the lowest pin being in bits 15:14 and the highest
+                * pin in bits 1:0
+                */
+               *bit = 7 - (pin_num % RK3188_PULL_PINS_PER_REG);
+               *bit *= RK3188_PULL_BITS_PER_PIN;
+       }
+}
+
+#define RK3288_PULL_OFFSET             0x140
+#define RK3368_PULL_PMU_OFFSET 0x10
+#define RK3368_PULL_OFFSET             0x100
+
+static void rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit)
+{
+       struct rockchip_pinctrl *info = bank->drvdata;
+       struct rockchip_pin_ctrl *ctrl = info->ctrl;
+
+       /* The first 24 pins of the first bank are located in PMU */
+       if (bank->bank_num == 0) {
+               *regmap = info->regmap_pmu;
+               if(ctrl->type == RK3288)
+                       *reg = RK3188_PULL_PMU_OFFSET;
+               else if (ctrl->type == RK3368)
+                       *reg = RK3368_PULL_PMU_OFFSET;
+
+               *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+               *bit = pin_num % RK3188_PULL_PINS_PER_REG;
+               *bit *= RK3188_PULL_BITS_PER_PIN;
+       } else {
+               *regmap = info->regmap_base;
+               if(ctrl->type == RK3288)
+                       *reg = RK3288_PULL_OFFSET;
+               else if (ctrl->type == RK3368)
+                       *reg = RK3368_PULL_OFFSET;
+
+               /* correct the offset, as we're starting with the 2nd bank */
+               *reg -= 0x10;
+               *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+               *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+
+               *bit = (pin_num % RK3188_PULL_PINS_PER_REG);
+               *bit *= RK3188_PULL_BITS_PER_PIN;
+       }
+}
+
+#define RK3288_DRV_PMU_OFFSET          0x70
+#define RK3288_DRV_GRF_OFFSET          0x1c0
+#define RK3288_DRV_BITS_PER_PIN                2
+#define RK3288_DRV_PINS_PER_REG                8
+#define RK3288_DRV_BANK_STRIDE         16
+static int rk3288_drv_list[] = { 2, 4, 8, 12 };
+
+#define RK3368_DRV_PMU_OFFSET          0x20
+#define RK3368_DRV_GRF_OFFSET          0x200
+
+
+static void rk3288_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit)
+{
+       struct rockchip_pinctrl *info = bank->drvdata;
+       struct rockchip_pin_ctrl *ctrl = info->ctrl;
+
+       /* The first 24 pins of the first bank are located in PMU */
+       if (bank->bank_num == 0) {
+               *regmap = info->regmap_pmu;
+               if(ctrl->type == RK3288)
+                       *reg = RK3288_DRV_PMU_OFFSET;
+               else if (ctrl->type == RK3368)
+                       *reg = RK3368_DRV_PMU_OFFSET;
+
+               *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4);
+               *bit = pin_num % RK3288_DRV_PINS_PER_REG;
+               *bit *= RK3288_DRV_BITS_PER_PIN;
+       } else {
+               *regmap = info->regmap_base;
+               if(ctrl->type == RK3288)
+                       *reg = RK3288_DRV_GRF_OFFSET;
+               else if (ctrl->type == RK3368)
+                       *reg = RK3368_DRV_GRF_OFFSET;
+
+
+               /* correct the offset, as we're starting with the 2nd bank */
+               *reg -= 0x10;
+               *reg += bank->bank_num * RK3288_DRV_BANK_STRIDE;
+               *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4);
+
+               *bit = (pin_num % RK3288_DRV_PINS_PER_REG);
+               *bit *= RK3288_DRV_BITS_PER_PIN;
+       }
+}
+
+static int rk3288_get_drive(struct rockchip_pin_bank *bank, int pin_num)
+{
+       struct regmap *regmap;
+       int reg, ret;
+       u32 data;
+       u8 bit;
+
+       rk3288_calc_drv_reg_and_bit(bank, pin_num, &regmap, &reg, &bit);
+
+       ret = regmap_read(regmap, reg, &data);
+       if (ret)
+               return ret;
+
+       data >>= bit;
+       data &= (1 << RK3288_DRV_BITS_PER_PIN) - 1;
+
+       return rk3288_drv_list[data];
+}
+
+static int rk3288_set_drive(struct rockchip_pin_bank *bank, int pin_num,
+                           int strength)
+{
+       struct rockchip_pinctrl *info = bank->drvdata;
+       struct regmap *regmap;
+       unsigned long flags;
+       int reg, ret, i;
+       u32 data, rmask;
+       u8 bit;
+
+       rk3288_calc_drv_reg_and_bit(bank, pin_num, &regmap, &reg, &bit);
+
+       ret = -EINVAL;
+       for (i = 0; i < ARRAY_SIZE(rk3288_drv_list); i++) {
+               if (rk3288_drv_list[i] == strength) {
+                       ret = i;
+                       break;
+               }
+       }
+
+       if (ret < 0) {
+               dev_err(info->dev, "unsupported driver strength %d\n",
+                       strength);
+               return ret;
+       }
+
+       spin_lock_irqsave(&bank->slock, flags);
+
+       /* enable the write to the equivalent lower bits */
+       data = ((1 << RK3288_DRV_BITS_PER_PIN) - 1) << (bit + 16);
+       rmask = data | (data >> 16);
+       data |= (ret << bit);
+
+       ret = regmap_update_bits(regmap, reg, rmask, data);
+       spin_unlock_irqrestore(&bank->slock, flags);
+
+       return ret;
+}
+
+static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
+{
+       struct rockchip_pinctrl *info = bank->drvdata;
+       struct rockchip_pin_ctrl *ctrl = info->ctrl;
+       struct regmap *regmap;
+       int reg, ret;
+       u8 bit;
+       u32 data;
+
+       /* rk3066b does support any pulls */
+       if (ctrl->type == RK3066B)
+               return PIN_CONFIG_BIAS_DISABLE;
+
+       ctrl->pull_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+
+       ret = regmap_read(regmap, reg, &data);
+       if (ret)
+               return ret;
+
+       switch (ctrl->type) {
+       case RK2928:
+               return !(data & BIT(bit))
+                               ? PIN_CONFIG_BIAS_PULL_PIN_DEFAULT
+                               : PIN_CONFIG_BIAS_DISABLE;
+       case RK3188:
+       case RK3288:
+       case RK3368:
+               data >>= bit;
+               data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
+
+               switch (data) {
+               case 0:
+                       return PIN_CONFIG_BIAS_DISABLE;
+               case 1:
+                       return PIN_CONFIG_BIAS_PULL_UP;
+               case 2:
+                       return PIN_CONFIG_BIAS_PULL_DOWN;
+               case 3:
+                       return PIN_CONFIG_BIAS_BUS_HOLD;
+               }
+
+               dev_err(info->dev, "unknown pull setting\n");
+               return -EIO;
+       default:
+               dev_err(info->dev, "unsupported pinctrl type\n");
+               return -EINVAL;
+       };
+}
+
+static int rockchip_set_pull(struct rockchip_pin_bank *bank,
+                                       int pin_num, int pull)
+{
+       struct rockchip_pinctrl *info = bank->drvdata;
+       struct rockchip_pin_ctrl *ctrl = info->ctrl;
+       struct regmap *regmap;
+       int reg, ret;
+       unsigned long flags;
+       u8 bit;
+       u32 data, rmask;
+
+       pinctrl_dbg(info->dev, "setting pull of GPIO%d-%d to %d\n",
+                bank->bank_num, pin_num, pull);
+
+       /* rk3066b does support any pulls */
+       if (ctrl->type == RK3066B)
+               return pull ? -EINVAL : 0;
+
+       ctrl->pull_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+
+       switch (ctrl->type) {
+       case RK2928:
+               spin_lock_irqsave(&bank->slock, flags);
+
+               data = BIT(bit + 16);
+               if (pull == PIN_CONFIG_BIAS_DISABLE)
+                       data |= BIT(bit);
+               ret = regmap_write(regmap, reg, data);
+
+               spin_unlock_irqrestore(&bank->slock, flags);
+               break;
+       case RK3188:
+       case RK3288:
+       case RK3368:
+               spin_lock_irqsave(&bank->slock, flags);
+
+               /* enable the write to the equivalent lower bits */
+               data = ((1 << RK3188_PULL_BITS_PER_PIN) - 1) << (bit + 16);
+               rmask = data | (data >> 16);
+
+               switch (pull) {
+               case PIN_CONFIG_BIAS_DISABLE:
+                       break;
+               case PIN_CONFIG_BIAS_PULL_UP:
+                       data |= (1 << bit);
+                       break;
+               case PIN_CONFIG_BIAS_PULL_DOWN:
+                       data |= (2 << bit);
+                       break;
+               case PIN_CONFIG_BIAS_BUS_HOLD:
+                       data |= (3 << bit);
+                       break;
+               default:
+                       spin_unlock_irqrestore(&bank->slock, flags);
+                       dev_err(info->dev, "unsupported pull setting %d\n",
+                               pull);
+                       return -EINVAL;
+               }
+
+               ret = regmap_update_bits(regmap, reg, rmask, data);
+
+               spin_unlock_irqrestore(&bank->slock, flags);
+               break;
+       default:
+               dev_err(info->dev, "unsupported pinctrl type\n");
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+/*
+ * Pinmux_ops handling
+ */
+
+static int rockchip_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+       return info->nfunctions;
+}
+
+static const char *rockchip_pmx_get_func_name(struct pinctrl_dev *pctldev,
+                                         unsigned selector)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+       return info->functions[selector].name;
+}
+
+static int rockchip_pmx_get_groups(struct pinctrl_dev *pctldev,
+                               unsigned selector, const char * const **groups,
+                               unsigned * const num_groups)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+       *groups = info->functions[selector].groups;
+       *num_groups = info->functions[selector].ngroups;
+
+       return 0;
+}
+
+static int rockchip_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
+                                                           unsigned group)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       const unsigned int *pins = info->groups[group].pins;
+       const struct rockchip_pin_config *data = info->groups[group].data;
+       struct rockchip_pin_bank *bank;
+       int cnt, ret = 0;
+
+       pinctrl_dbg(info->dev, "enable function %s group %s\n",
+               info->functions[selector].name, info->groups[group].name);
+
+       /*
+        * for each pin in the pin group selected, program the correspoding pin
+        * pin function number in the config register.
+        */
+       for (cnt = 0; cnt < info->groups[group].npins; cnt++) {
+               bank = pin_to_bank(info, pins[cnt]);
+               ret = rockchip_set_mux(bank, pins[cnt] - bank->pin_base,
+                                      data[cnt].func);
+               if (ret)
+                       break;
+       }
+
+       if (ret) {
+               /* revert the already done pin settings */
+               for (cnt--; cnt >= 0; cnt--)
+                       rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
+
+               return ret;
+       }
+
+       return 0;
+}
+
+static void rockchip_pmx_disable(struct pinctrl_dev *pctldev,
+                                       unsigned selector, unsigned group)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       const unsigned int *pins = info->groups[group].pins;
+       struct rockchip_pin_bank *bank;
+       int cnt;
+
+       pinctrl_dbg(info->dev, "disable function %s group %s\n",
+               info->functions[selector].name, info->groups[group].name);
+
+       for (cnt = 0; cnt < info->groups[group].npins; cnt++) {
+               bank = pin_to_bank(info, pins[cnt]);
+               rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
+       }
+}
+
+/*
+ * The calls to gpio_direction_output() and gpio_direction_input()
+ * leads to this function call (via the pinctrl_gpio_direction_{input|output}()
+ * function called from the gpiolib interface).
+ */
+static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+                                             struct pinctrl_gpio_range *range,
+                                             unsigned offset, bool input)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       struct rockchip_pin_bank *bank;
+       struct gpio_chip *chip;
+       int pin, ret;
+       u32 data;
+
+       chip = range->gc;
+       bank = gc_to_pin_bank(chip);
+       pin = offset - chip->base;
+
+       dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
+                offset, range->name, pin, input ? "input" : "output");
+
+       ret = rockchip_set_mux(bank, pin, RK_FUNC_GPIO);
+       if (ret < 0)
+               return ret;
+
+       data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
+       /* set bit to 1 for output, 0 for input */
+       if (!input)
+               data |= BIT(pin);
+       else
+               data &= ~BIT(pin);
+       writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
+
+       return 0;
+}
+
+static const struct pinmux_ops rockchip_pmx_ops = {
+       .get_functions_count    = rockchip_pmx_get_funcs_count,
+       .get_function_name      = rockchip_pmx_get_func_name,
+       .get_function_groups    = rockchip_pmx_get_groups,
+       .enable                 = rockchip_pmx_enable,
+       .disable                = rockchip_pmx_disable,
+       .gpio_set_direction     = rockchip_pmx_gpio_set_direction,
+};
+
+/*
+ * Pinconf_ops handling
+ */
+
+static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
+                                       enum pin_config_param pull)
+{
+       switch (ctrl->type) {
+       case RK2928:
+               return (pull == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT ||
+                                       pull == PIN_CONFIG_BIAS_DISABLE);
+       case RK3066B:
+               return pull ? false : true;
+       case RK3188:
+       case RK3288:
+       case RK3368:
+               return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
+       }
+
+       return false;
+}
+
+static int rockchip_gpio_direction_output(struct gpio_chip *gc,
+                                         unsigned offset, int value);
+static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset);
+
+/* set the pin config settings for a specified pin */
+static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+                               unsigned long configs)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
+       enum pin_config_param param;
+       u16 arg;
+       //int i;
+       int rc;
+
+       //for (i = 0; i < num_configs; i++) {
+               param = pinconf_to_config_param(configs);
+               arg = pinconf_to_config_argument(configs);
+
+               switch (param) {
+               case PIN_CONFIG_BIAS_DISABLE:
+                       rc =  rockchip_set_pull(bank, pin - bank->pin_base,
+                               param);
+                       if (rc)
+                               return rc;
+                       break;
+               case PIN_CONFIG_BIAS_PULL_UP:
+               case PIN_CONFIG_BIAS_PULL_DOWN:
+               case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+               case PIN_CONFIG_BIAS_BUS_HOLD:
+                       if (!rockchip_pinconf_pull_valid(info->ctrl, param))
+                               return -ENOTSUPP;
+
+                       if (!arg)
+                               return -EINVAL;
+
+                       rc = rockchip_set_pull(bank, pin - bank->pin_base,
+                               param);
+                       if (rc)
+                               return rc;
+                       break;
+               case PIN_CONFIG_OUTPUT:
+                       rc = rockchip_gpio_direction_output(&bank->gpio_chip,
+                                                           pin - bank->pin_base,
+                                                           arg);
+                       if (rc)
+                               return rc;
+                       break;
+               case PIN_CONFIG_DRIVE_STRENGTH:
+                       /* rk3288 RK3368 is the first with per-pin drive-strength */
+                       if ((info->ctrl->type != RK3288) && ((info->ctrl->type != RK3368)))
+                               return -ENOTSUPP;
+
+                       rc = rk3288_set_drive(bank, pin - bank->pin_base, arg);
+                       if (rc < 0)
+                               return rc;
+                       break;
+               default:
+                       return -ENOTSUPP;
+                       break;
+               }
+       //} /* for each config */
+
+       return 0;
+}
+
+/* get the pin config settings for a specified pin */
+static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+                                                       unsigned long *config)
+{
+       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
+       enum pin_config_param param = pinconf_to_config_param(*config);
+       u16 arg;
+       int rc;
+
+       switch (param) {
+       case PIN_CONFIG_BIAS_DISABLE:
+               if (rockchip_get_pull(bank, pin - bank->pin_base) != param)
+                       return -EINVAL;
+
+               arg = 0;
+               break;
+       case PIN_CONFIG_BIAS_PULL_UP:
+       case PIN_CONFIG_BIAS_PULL_DOWN:
+       case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+       case PIN_CONFIG_BIAS_BUS_HOLD:
+               if (!rockchip_pinconf_pull_valid(info->ctrl, param))
+                       return -ENOTSUPP;
+
+               if (rockchip_get_pull(bank, pin - bank->pin_base) != param)
+                       return -EINVAL;
+
+               arg = 1;
+               break;
+       case PIN_CONFIG_OUTPUT:
+               rc = rockchip_get_mux(bank, pin - bank->pin_base);
+               if (rc != RK_FUNC_GPIO)
+                       return -EINVAL;
+
+               rc = rockchip_gpio_get(&bank->gpio_chip, pin - bank->pin_base);
+               if (rc < 0)
+                       return rc;
+
+               arg = rc ? 1 : 0;
+               break;
+       case PIN_CONFIG_DRIVE_STRENGTH:
+               /* rk3288 RK3368 is the first with per-pin drive-strength */
+               if ((info->ctrl->type != RK3288) && ((info->ctrl->type != RK3368)))
+                       return -ENOTSUPP;
+
+               rc = rk3288_get_drive(bank, pin - bank->pin_base);
+               if (rc < 0)
+                       return rc;
+
+               arg = rc;
+               break;
+       default:
+               return -ENOTSUPP;
+               break;
+       }
+
+       *config = pinconf_to_config_packed(param, arg);
+
+       return 0;
+}
+
+static const struct pinconf_ops rockchip_pinconf_ops = {
+       .pin_config_get                 = rockchip_pinconf_get,
+       .pin_config_set                 = rockchip_pinconf_set,
+       .is_generic                     = true,
+};
+
+static const struct of_device_id rockchip_bank_match[] = {
+       { .compatible = "rockchip,gpio-bank" },
+       { .compatible = "rockchip,rk3188-gpio-bank0" },
+       {},
+};
+
+static void rockchip_pinctrl_child_count(struct rockchip_pinctrl *info,
+                                               struct device_node *np)
+{
+       struct device_node *child;
+
+       for_each_child_of_node(np, child) {
+               if (of_match_node(rockchip_bank_match, child))
+                       continue;
+
+               info->nfunctions++;
+               info->ngroups += of_get_child_count(child);
+       }
+}
+
+static int rockchip_pinctrl_parse_groups(struct device_node *np,
+                                             struct rockchip_pin_group *grp,
+                                             struct rockchip_pinctrl *info,
+                                             u32 index)
+{
+       struct rockchip_pin_bank *bank;
+       int size;
+       const __be32 *list;
+       int num;
+       int i, j;
+       int ret;
+
+       dev_dbg(info->dev, "group(%d): %s\n", index, np->name);
+
+       /* Initialise group */
+       grp->name = np->name;
+
+       /*
+        * the binding format is rockchip,pins = <bank pin mux CONFIG>,
+        * do sanity check and calculate pins number
+        */
+       list = of_get_property(np, "rockchip,pins", &size);
+       /* we do not check return since it's safe node passed down */
+       size /= sizeof(*list);
+       if (!size || size % 4) {
+               dev_err(info->dev, "wrong pins number or pins and configs should be by 4\n");
+               return -EINVAL;
+       }
+
+       grp->npins = size / 4;
+
+       grp->pins = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
+                                               GFP_KERNEL);
+       grp->data = devm_kzalloc(info->dev, grp->npins *
+                                         sizeof(struct rockchip_pin_config),
+                                       GFP_KERNEL);
+       if (!grp->pins || !grp->data)
+               return -ENOMEM;
+
+       for (i = 0, j = 0; i < size; i += 4, j++) {
+               const __be32 *phandle;
+               struct device_node *np_config;
+
+               num = be32_to_cpu(*list++);
+               bank = bank_num_to_bank(info, num);
+               if (IS_ERR(bank))
+                       return PTR_ERR(bank);
+
+               grp->pins[j] = bank->pin_base + be32_to_cpu(*list++);
+               grp->data[j].func = be32_to_cpu(*list++);
+
+               phandle = list++;
+               if (!phandle)
+                       return -EINVAL;
+
+               np_config = of_find_node_by_phandle(be32_to_cpup(phandle));
+               ret = pinconf_generic_parse_dt_config(np_config,
+                               &grp->data[j].configs, &grp->data[j].nconfigs);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int rockchip_pinctrl_parse_functions(struct device_node *np,
+                                               struct rockchip_pinctrl *info,
+                                               u32 index)
+{
+       struct device_node *child;
+       struct rockchip_pmx_func *func;
+       struct rockchip_pin_group *grp;
+       int ret;
+       static u32 grp_index;
+       u32 i = 0;
+
+       dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
+
+       func = &info->functions[index];
+
+       /* Initialise function */
+       func->name = np->name;
+       func->ngroups = of_get_child_count(np);
+       if (func->ngroups <= 0)
+               return 0;
+
+       func->groups = devm_kzalloc(info->dev,
+                       func->ngroups * sizeof(char *), GFP_KERNEL);
+       if (!func->groups)
+               return -ENOMEM;
+
+       for_each_child_of_node(np, child) {
+               func->groups[i] = child->name;
+               grp = &info->groups[grp_index++];
+               ret = rockchip_pinctrl_parse_groups(child, grp, info, i++);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int rockchip_pinctrl_parse_dt(struct platform_device *pdev,
+                                             struct rockchip_pinctrl *info)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct device_node *child;
+       int ret;
+       int i;
+
+       rockchip_pinctrl_child_count(info, np);
+
+       dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
+       dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
+
+       info->functions = devm_kzalloc(dev, info->nfunctions *
+                                             sizeof(struct rockchip_pmx_func),
+                                             GFP_KERNEL);
+       if (!info->functions) {
+               dev_err(dev, "failed to allocate memory for function list\n");
+               return -EINVAL;
+       }
+
+       info->groups = devm_kzalloc(dev, info->ngroups *
+                                           sizeof(struct rockchip_pin_group),
+                                           GFP_KERNEL);
+       if (!info->groups) {
+               dev_err(dev, "failed allocate memory for ping group list\n");
+               return -EINVAL;
+       }
+
+       i = 0;
+
+       for_each_child_of_node(np, child) {
+               if (of_match_node(rockchip_bank_match, child))
+                       continue;
+
+               ret = rockchip_pinctrl_parse_functions(child, info, i++);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to parse function\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int rockchip_pinctrl_register(struct platform_device *pdev,
+                                       struct rockchip_pinctrl *info)
+{
+       struct pinctrl_desc *ctrldesc = &info->pctl;
+       struct pinctrl_pin_desc *pindesc, *pdesc;
+       struct rockchip_pin_bank *pin_bank;
+       int pin, bank, ret;
+       int k;
+
+       ctrldesc->name = "rockchip-pinctrl";
+       ctrldesc->owner = THIS_MODULE;
+       ctrldesc->pctlops = &rockchip_pctrl_ops;
+       ctrldesc->pmxops = &rockchip_pmx_ops;
+       ctrldesc->confops = &rockchip_pinconf_ops;
+
+       pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
+                       info->ctrl->nr_pins, GFP_KERNEL);
+       if (!pindesc) {
+               dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n");
+               return -ENOMEM;
+       }
+       ctrldesc->pins = pindesc;
+       ctrldesc->npins = info->ctrl->nr_pins;
+
+       pdesc = pindesc;
+       for (bank = 0 , k = 0; bank < info->ctrl->nr_banks; bank++) {
+               pin_bank = &info->ctrl->pin_banks[bank];
+               for (pin = 0; pin < pin_bank->nr_pins; pin++, k++) {
+                       pdesc->number = k;
+                       pdesc->name = kasprintf(GFP_KERNEL, "%s-%d",
+                                               pin_bank->name, pin);
+                       pdesc++;
+               }
+       }
+
+       info->pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, info);
+       if (!info->pctl_dev) {
+               dev_err(&pdev->dev, "could not register pinctrl driver\n");
+               return -EINVAL;
+       }
+
+       for (bank = 0; bank < info->ctrl->nr_banks; ++bank) {
+               pin_bank = &info->ctrl->pin_banks[bank];
+               pin_bank->grange.name = pin_bank->name;
+               pin_bank->grange.id = bank;
+               pin_bank->grange.pin_base = pin_bank->pin_base;
+               pin_bank->grange.base = pin_bank->gpio_chip.base;
+               pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
+               pin_bank->grange.gc = &pin_bank->gpio_chip;
+               pinctrl_add_gpio_range(info->pctl_dev, &pin_bank->grange);
+       }
+
+       ret = rockchip_pinctrl_parse_dt(pdev, info);
+       if (ret) {
+               pinctrl_unregister(info->pctl_dev);
+               return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * GPIO handling
+ */
+
+static int rockchip_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+       return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void rockchip_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+       pinctrl_free_gpio(chip->base + offset);
+}
+
+static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+       struct rockchip_pin_bank *bank = gc_to_pin_bank(gc);
+       void __iomem *reg = bank->reg_base + GPIO_SWPORT_DR;
+       unsigned long flags;
+       u32 data;
+
+       spin_lock_irqsave(&bank->slock, flags);
+
+       data = readl(reg);
+       data &= ~BIT(offset);
+       if (value)
+               data |= BIT(offset);
+       writel(data, reg);
+
+       spin_unlock_irqrestore(&bank->slock, flags);
+}
+
+/*
+ * Returns the level of the pin for input direction and setting of the DR
+ * register for output gpios.
+ */
+static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+       struct rockchip_pin_bank *bank = gc_to_pin_bank(gc);
+       u32 data;
+
+       data = readl(bank->reg_base + GPIO_EXT_PORT);
+       data >>= offset;
+       data &= 1;
+       return data;
+}
+
+/*
+ * gpiolib gpio_direction_input callback function. The setting of the pin
+ * mux function as 'gpio input' will be handled by the pinctrl susbsystem
+ * interface.
+ */
+static int rockchip_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+       return pinctrl_gpio_direction_input(gc->base + offset);
+}
+
+/*
+ * gpiolib gpio_direction_output callback function. The setting of the pin
+ * mux function as 'gpio output' will be handled by the pinctrl susbsystem
+ * interface.
+ */
+static int rockchip_gpio_direction_output(struct gpio_chip *gc,
+                                         unsigned offset, int value)
+{
+       rockchip_gpio_set(gc, offset, value);
+       return pinctrl_gpio_direction_output(gc->base + offset);
+}
+
+/*
+ * gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
+ * and a virtual IRQ, if not already present.
+ */
+static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+       struct rockchip_pin_bank *bank = gc_to_pin_bank(gc);
+       unsigned int virq;
+
+       if (!bank->domain)
+               return -ENXIO;
+
+       virq = irq_create_mapping(bank->domain, offset);
+
+       return (virq) ? : -ENXIO;
+}
+
+static const struct gpio_chip rockchip_gpiolib_chip = {
+       .request = rockchip_gpio_request,
+       .free = rockchip_gpio_free,
+       .set = rockchip_gpio_set,
+       .get = rockchip_gpio_get,
+       .direction_input = rockchip_gpio_direction_input,
+       .direction_output = rockchip_gpio_direction_output,
+       .to_irq = rockchip_gpio_to_irq,
+       .owner = THIS_MODULE,
+};
+
+/*
+ * Interrupt handling
+ */
+
+static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+       struct irq_chip *chip = irq_get_chip(irq);
+       struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
+       u32 polarity = 0, data = 0;
+       u32 pend;
+       bool edge_changed = false;
+
+       pinctrl_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
+
+       chained_irq_enter(chip, desc);
+
+       pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
+
+       if (bank->toggle_edge_mode) {
+               polarity = readl_relaxed(bank->reg_base +
+                                        GPIO_INT_POLARITY);
+               data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
+       }
+
+       while (pend) {
+               unsigned int virq;
+
+               irq = __ffs(pend);
+               pend &= ~BIT(irq);
+               virq = irq_linear_revmap(bank->domain, irq);
+
+               if (!virq) {
+                       dev_err(bank->drvdata->dev, "unmapped irq %d\n", irq);
+                       continue;
+               }
+
+               pinctrl_dbg(bank->drvdata->dev, "handling irq %d\n", irq);
+
+               /*
+                * Triggering IRQ on both rising and falling edge
+                * needs manual intervention.
+                */
+               if (bank->toggle_edge_mode & BIT(irq)) {
+                       if (data & BIT(irq))
+                               polarity &= ~BIT(irq);
+                       else
+                               polarity |= BIT(irq);
+
+                       edge_changed = true;
+               }
+
+               generic_handle_irq(virq);
+       }
+
+       if (bank->toggle_edge_mode && edge_changed) {
+               /* Interrupt params should only be set with ints disabled */
+               data = readl_relaxed(bank->reg_base + GPIO_INTEN);
+               writel_relaxed(0, bank->reg_base + GPIO_INTEN);
+               writel(polarity, bank->reg_base + GPIO_INT_POLARITY);
+               writel(data, bank->reg_base + GPIO_INTEN);
+       }
+
+       chained_irq_exit(chip, desc);
+}
+
+static int rockchip_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+       struct rockchip_pin_bank *bank = irq_data_get_irq_chip_data(d); 
+       //struct rockchip_pinctrl *info = bank->drvdata;
+       u32 mask = BIT(d->hwirq);
+       u32 polarity;
+       u32 level;
+       u32 data;
+       int ret;
+       unsigned long flags;
+
+       /* make sure the pin is configured as gpio input */
+       ret = rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
+       if (ret < 0)
+               return ret;
+
+       data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
+       data &= ~mask;
+       writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
+
+       if (type & IRQ_TYPE_EDGE_BOTH)
+               __irq_set_handler_locked(d->irq, handle_edge_irq);
+       else
+               __irq_set_handler_locked(d->irq, handle_level_irq);
+       
+       spin_lock_irqsave(&bank->slock, flags);
+       
+       level = readl_relaxed(bank->reg_base + GPIO_INTTYPE_LEVEL);
+       polarity = readl_relaxed(bank->reg_base + GPIO_INT_POLARITY);
+
+       switch (type) {
+       case IRQ_TYPE_EDGE_BOTH:
+               bank->toggle_edge_mode |= mask;
+               level |= mask;
+
+               /*
+                * Determine gpio state. If 1 next interrupt should be falling
+                * otherwise rising.
+                */
+               data = readl(bank->reg_base + GPIO_EXT_PORT);
+               if (data & mask)
+                       polarity &= ~mask;
+               else
+                       polarity |= mask;
+               break;
+       case IRQ_TYPE_EDGE_RISING:
+               bank->toggle_edge_mode &= ~mask;
+               level |= mask;
+               polarity |= mask;
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               bank->toggle_edge_mode &= ~mask;
+               level |= mask;
+               polarity &= ~mask;
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               bank->toggle_edge_mode &= ~mask;
+               level &= ~mask;
+               polarity |= mask;
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               bank->toggle_edge_mode &= ~mask;
+               level &= ~mask;
+               polarity &= ~mask;
+               break;
+       default:
+               //irq_gc_unlock(gc);
+               return -EINVAL;
+       }
+
+       writel_relaxed(level, bank->reg_base + GPIO_INTTYPE_LEVEL);
+       writel_relaxed(polarity, bank->reg_base + GPIO_INT_POLARITY);
+       
+       spin_unlock_irqrestore(&bank->slock, flags);
+       
+       //DBG_PINCTRL("%s:type=%d,irq=%d,hwirq=%d,ok\n",__func__,type, d->irq, (int)d->hwirq);
+       return 0;
+}
+
+static inline void rockchip_gpio_bit_op(void __iomem *reg_base
+       , unsigned int offset, u32 bit, unsigned char flag)
+{
+       u32 val = __raw_readl(reg_base + offset);
+       if (flag)
+               val |= BIT(bit);
+       else
+               val &= ~BIT(bit);
+
+       
+       __raw_writel(val, reg_base + offset);
+}
+
+static inline unsigned gpio_to_bit(struct rockchip_pin_bank *bank, unsigned gpio)
+{
+       while (gpio >= (bank->pin_base + bank->nr_pins))
+               bank++;
+
+       return gpio - bank->pin_base;
+}
+
+static inline unsigned offset_to_bit(unsigned offset)
+{
+       return 1u << offset;
+}
+
+static void GPIOEnableIntr(void __iomem *reg_base, unsigned int bit)
+{
+       rockchip_gpio_bit_op(reg_base, GPIO_INTEN, bit, 1);
+}
+
+static void GPIODisableIntr(void __iomem *reg_base, unsigned int bit)
+{
+       rockchip_gpio_bit_op(reg_base, GPIO_INTEN, bit, 0);
+}
+
+static void GPIOAckIntr(void __iomem *reg_base, unsigned int bit)
+{
+       rockchip_gpio_bit_op(reg_base, GPIO_PORTS_EOI, bit, 1);
+}
+
+static int rockchip_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+       struct rockchip_pin_bank *bank = irq_data_get_irq_chip_data(d); 
+       //struct rockchip_pinctrl *info = bank->drvdata;
+       u32 bit = d->hwirq;
+       unsigned long flags;
+       //int pin = d->hwirq;
+
+       spin_lock_irqsave(&bank->slock, flags);
+       
+       if (on)
+       {
+               bank->suspend_wakeup |= BIT(bit);
+       }
+       else
+       {
+               bank->suspend_wakeup &= ~BIT(bit);                      
+       }
+       spin_unlock_irqrestore(&bank->slock, flags);
+       
+       //DBG_PINCTRL("%s:irq=%d,hwirq=%d,bank->reg_base=0x%x,bit=%d\n"
+               //, __func__,d->irq, (int)d->hwirq, (int)bank->reg_base,bit);
+       return 0;
+}
+
+static void rockchip_gpio_irq_unmask(struct irq_data *d)
+{
+       struct rockchip_pin_bank *bank = irq_data_get_irq_chip_data(d);
+       //struct rockchip_pinctrl *info = bank->drvdata;
+       u32 bit = d->hwirq;
+       unsigned long flags;
+       //int pin = d->hwirq;
+
+       spin_lock_irqsave(&bank->slock, flags);
+       GPIOEnableIntr(bank->reg_base, bit);
+       spin_unlock_irqrestore(&bank->slock, flags);
+
+       //DBG_PINCTRL("%s:irq=%d,hwirq=%d,bank->reg_base=0x%x,bit=%d\n"
+               //, __func__,d->irq, (int)d->hwirq, (int)bank->reg_base,bit);
+}
+
+static void rockchip_gpio_irq_mask(struct irq_data *d)
+{
+       struct rockchip_pin_bank *bank = irq_data_get_irq_chip_data(d); 
+       //struct rockchip_pinctrl *info = bank->drvdata;
+       //u32 bit = gpio_to_bit(bank, d->irq);
+       u32 bit = d->hwirq;
+       unsigned long flags;    
+       //int pin = d->hwirq;
+
+       spin_lock_irqsave(&bank->slock, flags);
+       GPIODisableIntr(bank->reg_base, bit);
+       spin_unlock_irqrestore(&bank->slock, flags);
+       
+       //DBG_PINCTRL("%s:irq=%d,hwirq=%d,bank->reg_base=0x%x,bit=%d\n"
+               //, __func__,d->irq, (int)d->hwirq, (int)bank->reg_base,bit);
+}
+
+static void rockchip_gpio_irq_ack(struct irq_data *d)
+{
+       struct rockchip_pin_bank *bank = irq_data_get_irq_chip_data(d); 
+       //struct rockchip_pinctrl *info = bank->drvdata;
+       //u32 bit = gpio_to_bit(bank, d->irq);
+       u32 bit = d->hwirq;     
+       //int pin = d->hwirq;
+
+       GPIOAckIntr(bank->reg_base, bit);
+
+       //DBG_PINCTRL("%s:irq=%d,hwirq=%d,bank->reg_base=0x%x,bit=%d\n"
+               //, __func__,d->irq, (int)d->hwirq, (int)bank->reg_base,bit);
+}
+
+static struct irq_chip rockchip_gpio_irq_chip = {
+       .name           = "ROCKCHIP_GPIO_CHIP",
+       .irq_ack                = rockchip_gpio_irq_ack,
+       .irq_disable    = rockchip_gpio_irq_mask,
+       .irq_mask       = rockchip_gpio_irq_mask,
+       .irq_unmask     = rockchip_gpio_irq_unmask,
+       .irq_set_type   = rockchip_gpio_irq_set_type,
+       .irq_set_wake   = rockchip_gpio_irq_set_wake,
+};
+
+static int rockchip_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+                               irq_hw_number_t hwirq)
+{
+       struct rockchip_pin_bank *bank = d->host_data;
+       //struct rockchip_pinctrl *info = bank->drvdata;
+       struct irq_data *irq_data = irq_get_irq_data(irq);      
+       //int pin = hwirq;
+       
+       if (!bank)
+       {
+               printk("%s:bank=0x%p,irq=%d\n",__func__,bank, irq);
+               return -EINVAL;
+       }
+       
+       irq_set_chip_and_handler(irq, &rockchip_gpio_irq_chip, handle_level_irq);
+       irq_set_chip_data(irq, bank);
+       set_irq_flags(irq, IRQF_VALID);
+       
+       irq_data->hwirq = hwirq;
+       irq_data->irq = irq;
+               
+       pinctrl_dbg(bank->drvdata->dev, "%s:irq = %d, hwirq =%ld\n",__func__,irq, hwirq);
+       return 0;
+}
+
+static const struct irq_domain_ops rockchip_gpio_irq_ops = {
+       .map = rockchip_gpio_irq_map,
+       .xlate = irq_domain_xlate_twocell,
+};
+
+static int rockchip_interrupts_register(struct platform_device *pdev,
+                                               struct rockchip_pinctrl *info)
+{
+       struct rockchip_pin_ctrl *ctrl = info->ctrl;
+       struct rockchip_pin_bank *bank = ctrl->pin_banks;
+       //unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+       int i;
+
+       for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
+               if (!bank->valid) {
+                       dev_warn(&pdev->dev, "bank %s is not valid\n",
+                                bank->name);
+                       continue;
+               }
+               
+               __raw_writel(0, bank->reg_base + GPIO_INTEN);
+               
+               bank->drvdata = info;
+               bank->domain = irq_domain_add_linear(bank->of_node, 32,
+                               &rockchip_gpio_irq_ops, bank);
+               if (!bank->domain) {
+                       dev_warn(&pdev->dev, "could not initialize irq domain for bank %s\n",
+                                bank->name);
+                       continue;
+               }
+
+               //if(atomic_read(&info->bank_debug_flag) == (bank->bank_num + 1))
+                       //printk("%s:bank_num=%d\n",__func__,bank->bank_num);
+
+               irq_set_handler_data(bank->irq, bank);
+               irq_set_chained_handler(bank->irq, rockchip_irq_demux);
+       }
+
+       return 0;
+}
+
+static int rockchip_gpiolib_register(struct platform_device *pdev,
+                                               struct rockchip_pinctrl *info)
+{
+       struct rockchip_pin_ctrl *ctrl = info->ctrl;
+       struct rockchip_pin_bank *bank = ctrl->pin_banks;
+       struct gpio_chip *gc;
+       int ret;
+       int i;
+
+       for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
+               if (!bank->valid) {
+                       dev_warn(&pdev->dev, "bank %s is not valid\n",
+                                bank->name);
+                       continue;
+               }
+
+               bank->gpio_chip = rockchip_gpiolib_chip;
+
+               gc = &bank->gpio_chip;
+               gc->base = bank->pin_base;
+               gc->ngpio = bank->nr_pins;
+               gc->dev = &pdev->dev;
+               gc->of_node = bank->of_node;
+               gc->label = bank->name;
+
+               ret = gpiochip_add(gc);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to register gpio_chip %s, error code: %d\n",
+                                                       gc->label, ret);
+                       goto fail;
+               }
+       }
+
+       rockchip_interrupts_register(pdev, info);
+
+       return 0;
+
+fail:
+       for (--i, --bank; i >= 0; --i, --bank) {
+               if (!bank->valid)
+                       continue;
+
+               if (gpiochip_remove(&bank->gpio_chip))
+                       dev_err(&pdev->dev, "gpio chip %s remove failed\n",
+                                                       bank->gpio_chip.label);
+       }
+       return ret;
+}
+
+static int rockchip_gpiolib_unregister(struct platform_device *pdev,
+                                               struct rockchip_pinctrl *info)
+{
+       struct rockchip_pin_ctrl *ctrl = info->ctrl;
+       struct rockchip_pin_bank *bank = ctrl->pin_banks;
+       int ret = 0;
+       int i;
+
+       for (i = 0; !ret && i < ctrl->nr_banks; ++i, ++bank) {
+               if (!bank->valid)
+                       continue;
+
+               ret = gpiochip_remove(&bank->gpio_chip);
+       }
+
+       if (ret)
+               dev_err(&pdev->dev, "gpio chip remove failed\n");
+
+       return ret;
+}
+
+static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
+                                 struct rockchip_pinctrl *info)
+{
+       struct resource res;
+       void __iomem *base;
+
+       if (of_address_to_resource(bank->of_node, 0, &res)) {
+               dev_err(info->dev, "cannot find IO resource for bank\n");
+               return -ENOENT;
+       }
+
+       bank->reg_base = devm_ioremap_resource(info->dev, &res);
+       if (IS_ERR(bank->reg_base))
+               return PTR_ERR(bank->reg_base);
+
+       /*
+        * special case, where parts of the pull setting-registers are
+        * part of the PMU register space
+        */
+       if (of_device_is_compatible(bank->of_node,
+                                   "rockchip,rk3188-gpio-bank0")) {
+               struct device_node *node;
+
+               node = of_parse_phandle(bank->of_node->parent,
+                                       "rockchip,pmu", 0);
+               if (!node) {
+                       if (of_address_to_resource(bank->of_node, 1, &res)) {
+                               dev_err(info->dev, "cannot find IO resource for bank\n");
+                               return -ENOENT;
+                       }
+
+                       base = devm_ioremap_resource(info->dev, &res);
+                       if (IS_ERR(base))
+                               return PTR_ERR(base);
+                       rockchip_regmap_config.max_register =
+                                                   resource_size(&res) - 4;
+                       rockchip_regmap_config.name =
+                                           "rockchip,rk3188-gpio-bank0-pull";
+                       bank->regmap_pull = devm_regmap_init_mmio(info->dev,
+                                                   base,
+                                                   &rockchip_regmap_config);
+               }
+       }
+
+       bank->irq = irq_of_parse_and_map(bank->of_node, 0);
+
+       bank->clk = of_clk_get(bank->of_node, 0);
+       if (IS_ERR(bank->clk))
+               return PTR_ERR(bank->clk);
+
+       return clk_prepare_enable(bank->clk);
+}
+
+static const struct of_device_id rockchip_pinctrl_dt_match[];
+
+/* retrieve the soc specific data */
+static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
+                                               struct rockchip_pinctrl *d,
+                                               struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+       struct device_node *node = pdev->dev.of_node;
+       struct device_node *np;
+       struct rockchip_pin_ctrl *ctrl;
+       struct rockchip_pin_bank *bank;
+       int grf_offs, pmu_offs, i, j;
+
+       match = of_match_node(rockchip_pinctrl_dt_match, node);
+       ctrl = (struct rockchip_pin_ctrl *)match->data;
+
+       for_each_child_of_node(node, np) {
+               if (!of_find_property(np, "gpio-controller", NULL))
+                       continue;
+
+               bank = ctrl->pin_banks;
+               for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
+                       if (!strcmp(bank->name, np->name)) {
+                               bank->of_node = np;
+
+                               if (!rockchip_get_bank_data(bank, d))
+                                       bank->valid = true;
+
+                               break;
+                       }
+               }
+       }
+
+       grf_offs = ctrl->grf_mux_offset;
+       pmu_offs = ctrl->pmu_mux_offset;
+       bank = ctrl->pin_banks;
+       for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
+               int bank_pins = 0;
+
+               spin_lock_init(&bank->slock);
+               bank->drvdata = d;
+               bank->pin_base = ctrl->nr_pins;
+               ctrl->nr_pins += bank->nr_pins;
+
+               /* calculate iomux offsets */
+               for (j = 0; j < 4; j++) {
+                       struct rockchip_iomux *iom = &bank->iomux[j];
+                       int inc;
+
+                       if (bank_pins >= bank->nr_pins)
+                               break;
+
+                       /* preset offset value, set new start value */
+                       if (iom->offset >= 0) {
+                               if (iom->type & IOMUX_SOURCE_PMU)
+                                       pmu_offs = iom->offset;
+                               else
+                                       grf_offs = iom->offset;
+                       } else { /* set current offset */
+                               iom->offset = (iom->type & IOMUX_SOURCE_PMU) ?
+                                                       pmu_offs : grf_offs;
+                       }
+
+                       pinctrl_dbg(d->dev, "bank %d, iomux %d has offset 0x%x\n",
+                                i, j, iom->offset);
+
+                       /*
+                        * Increase offset according to iomux width.
+                        * 4bit iomux'es are spread over two registers.
+                        */
+                       inc = (iom->type & IOMUX_WIDTH_4BIT) ? 8 : 4;
+                       if (iom->type & IOMUX_SOURCE_PMU)
+                               pmu_offs += inc;
+                       else
+                               grf_offs += inc;
+
+                       bank_pins += 8;
+               }
+       }
+
+       return ctrl;
+}
+
+static int rockchip_pinctrl_probe(struct platform_device *pdev)
+{
+       struct rockchip_pinctrl *info;
+       struct device *dev = &pdev->dev;
+       struct rockchip_pin_ctrl *ctrl;
+       struct device_node *np = pdev->dev.of_node, *node;
+       struct resource *res;
+       void __iomem *base;
+       int ret;
+
+       if (!dev->of_node) {
+               dev_err(dev, "device tree node not found\n");
+               return -ENODEV;
+       }
+
+       info = devm_kzalloc(dev, sizeof(struct rockchip_pinctrl), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->dev = dev;
+
+       ctrl = rockchip_pinctrl_get_soc_data(info, pdev);
+       if (!ctrl) {
+               dev_err(dev, "driver data not available\n");
+               return -EINVAL;
+       }
+       info->ctrl = ctrl;
+       g_info = info;
+
+       node = of_parse_phandle(np, "rockchip,grf", 0);
+       if (node) {
+               info->regmap_base = syscon_node_to_regmap(node);
+               if (IS_ERR(info->regmap_base))
+                       return PTR_ERR(info->regmap_base);
+       } else {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+               base = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(base))
+                       return PTR_ERR(base);
+
+               rockchip_regmap_config.max_register = resource_size(res) - 4;
+               rockchip_regmap_config.name = "rockchip,pinctrl";
+               info->regmap_base = devm_regmap_init_mmio(&pdev->dev, base,
+                                                   &rockchip_regmap_config);
+
+               /* to check for the old dt-bindings */
+               info->reg_size = resource_size(res);
+
+               /* Honor the old binding, with pull registers as 2nd resource */
+               if (ctrl->type == RK3188 && info->reg_size < 0x200) {
+                       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+                       base = devm_ioremap_resource(&pdev->dev, res);
+                       if (IS_ERR(base))
+                               return PTR_ERR(base);
+
+                       rockchip_regmap_config.max_register =
+                                                       resource_size(res) - 4;
+                       rockchip_regmap_config.name = "rockchip,pinctrl-pull";
+                       info->regmap_pull = devm_regmap_init_mmio(&pdev->dev,
+                                                   base,
+                                                   &rockchip_regmap_config);
+               }
+       }
+
+       /* try to find the optional reference to the pmu syscon */
+       node = of_parse_phandle(np, "rockchip,pmu", 0);
+       if (node) {
+               info->regmap_pmu = syscon_node_to_regmap(node);
+               if (IS_ERR(info->regmap_pmu))
+                       return PTR_ERR(info->regmap_pmu);
+       }
+
+       ret = rockchip_gpiolib_register(pdev, info);
+       if (ret)
+               return ret;
+
+       ret = rockchip_pinctrl_register(pdev, info);
+       if (ret) {
+               rockchip_gpiolib_unregister(pdev, info);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, info);
+       printk("%s:init ok\n",__func__);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int rockchip_pinctrl_suspend(void)
+{      
+       struct rockchip_pinctrl *info = g_info;
+       struct rockchip_pin_ctrl *ctrl = info->ctrl;
+       struct rockchip_pin_bank *bank = ctrl->pin_banks;
+       int n;
+       //int value = 0;
+               
+       for(n=0; n<ctrl->nr_banks-1; n++)
+       {
+#if 0
+               int i;
+               for(i=0; i<0x60; i=i+4)
+               {
+                       value = readl_relaxed(bank->reg_base + i);
+                       printk("%s:bank_num=%d,reg[0x%x+0x%x]=0x%x,bank_name=%s\n",__func__,bank->bank_num, bank->reg_base, i, value, bank->name);
+               }
+#endif         
+               bank->saved_wakeup = __raw_readl(bank->reg_base + GPIO_INTEN);
+               __raw_writel(bank->suspend_wakeup, bank->reg_base + GPIO_INTEN);
+
+               if (!bank->suspend_wakeup)
+               clk_disable_unprepare(bank->clk);
+               
+               //if(atomic_read(&info->bank_debug_flag) == (bank->bank_num + 1))       
+                       //printk("%s:bank_num=%d, suspend_wakeup=0x%x\n"
+                               //,__func__, bank->bank_num, bank->suspend_wakeup);
+               bank++;
+       }
+
+       
+       return 0;
+}
+
+static void rockchip_pinctrl_resume(void)
+{
+       struct rockchip_pinctrl *info = g_info;
+       struct rockchip_pin_ctrl *ctrl = info->ctrl;
+       struct rockchip_pin_bank *bank = ctrl->pin_banks;
+       int n;
+       u32 isr;
+
+       for(n=0; n<ctrl->nr_banks-1; n++)
+       {
+#if 0
+               int i;
+               for(i=0; i<0x60; i=i+4)
+               {
+                       u32 value = readl_relaxed(bank->reg_base + i);
+                       printk("%s:bank_num=%d,reg[0x%x+0x%x]=0x%x,bank_name=%s\n",__func__,bank->bank_num, bank->reg_base, i, value, bank->name);
+               }
+#endif         
+               if (!bank->suspend_wakeup)
+               clk_prepare_enable(bank->clk);
+
+               /* keep enable for resume irq */
+                isr = __raw_readl(bank->reg_base + GPIO_INT_STATUS);
+                       __raw_writel(bank->saved_wakeup | (bank->suspend_wakeup & isr)
+                                       , bank->reg_base + GPIO_INTEN);
+
+               //if(atomic_read(&info->bank_debug_flag) == (bank->bank_num + 1))       
+                       //printk("%s:bank_num=%d, suspend_wakeup=0x%x\n",__func__
+                                //bank->bank_num, bank->saved_wakeup | (bank->suspend_wakeup & isr));
+
+               bank++;
+       }
+              
+}
+#endif
+
+static struct rockchip_pin_bank rk2928_pin_banks[] = {
+       PIN_BANK(0, 32, "gpio0"),
+       PIN_BANK(1, 32, "gpio1"),
+       PIN_BANK(2, 32, "gpio2"),
+       PIN_BANK(3, 32, "gpio3"),
+};
+
+static struct rockchip_pin_ctrl rk2928_pin_ctrl = {
+               .pin_banks              = rk2928_pin_banks,
+               .nr_banks               = ARRAY_SIZE(rk2928_pin_banks),
+               .label                  = "RK2928-GPIO",
+               .type                   = RK2928,
+               .grf_mux_offset         = 0xa8,
+               .pull_calc_reg          = rk2928_calc_pull_reg_and_bit,
+};
+
+static struct rockchip_pin_bank rk3066a_pin_banks[] = {
+       PIN_BANK(0, 32, "gpio0"),
+       PIN_BANK(1, 32, "gpio1"),
+       PIN_BANK(2, 32, "gpio2"),
+       PIN_BANK(3, 32, "gpio3"),
+       PIN_BANK(4, 32, "gpio4"),
+       PIN_BANK(6, 16, "gpio6"),
+};
+
+static struct rockchip_pin_ctrl rk3066a_pin_ctrl = {
+               .pin_banks              = rk3066a_pin_banks,
+               .nr_banks               = ARRAY_SIZE(rk3066a_pin_banks),
+               .label                  = "RK3066a-GPIO",
+               .type                   = RK2928,
+               .grf_mux_offset         = 0xa8,
+               .pull_calc_reg          = rk2928_calc_pull_reg_and_bit,
+};
+
+static struct rockchip_pin_bank rk3066b_pin_banks[] = {
+       PIN_BANK(0, 32, "gpio0"),
+       PIN_BANK(1, 32, "gpio1"),
+       PIN_BANK(2, 32, "gpio2"),
+       PIN_BANK(3, 32, "gpio3"),
+};
+
+static struct rockchip_pin_ctrl rk3066b_pin_ctrl = {
+               .pin_banks      = rk3066b_pin_banks,
+               .nr_banks       = ARRAY_SIZE(rk3066b_pin_banks),
+               .label          = "RK3066b-GPIO",
+               .type           = RK3066B,
+               .grf_mux_offset = 0x60,
+};
+
+static struct rockchip_pin_bank rk3188_pin_banks[] = {
+       PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_GPIO_ONLY, 0, 0, 0),
+       PIN_BANK(1, 32, "gpio1"),
+       PIN_BANK(2, 32, "gpio2"),
+       PIN_BANK(3, 32, "gpio3"),
+};
+
+static struct rockchip_pin_ctrl rk3188_pin_ctrl = {
+               .pin_banks              = rk3188_pin_banks,
+               .nr_banks               = ARRAY_SIZE(rk3188_pin_banks),
+               .label                  = "RK3188-GPIO",
+               .type                   = RK3188,
+               .grf_mux_offset         = 0x60,
+               .pull_calc_reg          = rk3188_calc_pull_reg_and_bit,
+};
+
+static struct rockchip_pin_bank rk3288_pin_banks[] = {
+       PIN_BANK_IOMUX_FLAGS(0, 24, "gpio0", IOMUX_SOURCE_PMU,
+                                            IOMUX_SOURCE_PMU,
+                                            IOMUX_SOURCE_PMU,
+                                            IOMUX_UNROUTED
+                           ),
+       PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_UNROUTED,
+                                            IOMUX_UNROUTED,
+                                            IOMUX_UNROUTED,
+                                            0
+                           ),
+       PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0, 0, 0, IOMUX_UNROUTED),
+       PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", 0, 0, 0, IOMUX_WIDTH_4BIT),
+       PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_4BIT,
+                                            IOMUX_WIDTH_4BIT,
+                                            0,
+                                            0
+                           ),
+       PIN_BANK_IOMUX_FLAGS(5, 32, "gpio5", IOMUX_UNROUTED,
+                                            0,
+                                            0,
+                                            IOMUX_UNROUTED
+                           ),
+       PIN_BANK_IOMUX_FLAGS(6, 32, "gpio6", 0, 0, 0, IOMUX_UNROUTED),
+       PIN_BANK_IOMUX_FLAGS(7, 32, "gpio7", 0,
+                                            0,
+                                            IOMUX_WIDTH_4BIT,
+                                            IOMUX_UNROUTED
+                           ),
+       PIN_BANK(8, 16, "gpio8"),
+};
+
+static struct rockchip_pin_ctrl rk3288_pin_ctrl = {
+               .pin_banks              = rk3288_pin_banks,
+               .nr_banks               = ARRAY_SIZE(rk3288_pin_banks),
+               .label                  = "RK3288-GPIO",
+               .type                   = RK3288,
+               .grf_mux_offset         = 0x0,
+               .pmu_mux_offset         = 0x84,
+               .pull_calc_reg          = rk3288_calc_pull_reg_and_bit,
+};
+
+static struct rockchip_pin_bank rk3368_pin_banks[] = {
+       PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU, 
+                                            IOMUX_SOURCE_PMU,
+                                            IOMUX_SOURCE_PMU,
+                                            IOMUX_SOURCE_PMU),
+       PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
+       PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0, 0, 0, 0),
+       PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", 0, 0, 0, 0),
+};
+
+static struct rockchip_pin_ctrl rk3368_pin_ctrl = {
+               .pin_banks              = rk3368_pin_banks,
+               .nr_banks               = ARRAY_SIZE(rk3368_pin_banks),
+               .label                  = "RK3368-GPIO",
+               .type                   = RK3368,
+               .grf_mux_offset         = 0x0,
+               .pmu_mux_offset         = 0x0,
+               .pull_calc_reg          = rk3288_calc_pull_reg_and_bit,
+};
+
+static const struct of_device_id rockchip_pinctrl_dt_match[] = {
+       { .compatible = "rockchip,rk2928-pinctrl",
+               .data = (void *)&rk2928_pin_ctrl },
+       { .compatible = "rockchip,rk3066a-pinctrl",
+               .data = (void *)&rk3066a_pin_ctrl },
+       { .compatible = "rockchip,rk3066b-pinctrl",
+               .data = (void *)&rk3066b_pin_ctrl },
+       { .compatible = "rockchip,rk3188-pinctrl",
+               .data = (void *)&rk3188_pin_ctrl },
+       { .compatible = "rockchip,rk3288-pinctrl",
+               .data = (void *)&rk3288_pin_ctrl },
+       { .compatible = "rockchip,rk3368-pinctrl",
+               .data = (void *)&rk3368_pin_ctrl },
+       {},
+};
+MODULE_DEVICE_TABLE(of, rockchip_pinctrl_dt_match);
+
+static struct platform_driver rockchip_pinctrl_driver = {
+       .probe          = rockchip_pinctrl_probe,
+       .driver = {
+               .name   = "rockchip-pinctrl",
+               .owner  = THIS_MODULE,
+               .of_match_table = rockchip_pinctrl_dt_match,
+       },
+};
+
+#ifdef CONFIG_PM
+static struct syscore_ops rockchip_gpio_syscore_ops = {
+        .suspend        = rockchip_pinctrl_suspend,
+        .resume         = rockchip_pinctrl_resume,
+};
+#endif
+
+static int __init rockchip_pinctrl_drv_register(void)
+{
+#ifdef CONFIG_PM
+               register_syscore_ops(&rockchip_gpio_syscore_ops);
+#endif
+       return platform_driver_register(&rockchip_pinctrl_driver);
+}
+postcore_initcall(rockchip_pinctrl_drv_register);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("Rockchip pinctrl driver");
+MODULE_LICENSE("GPL v2");
index c9076bdaf2c18fc9a34c3d3f906cd333971f57e5..59a8d325a69776f7d689972b6bcc058cd38f87b7 100644 (file)
@@ -572,6 +572,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
                },
        },
+       {
+               /*
+                * Note no video_set_backlight_video_vendor, we must use the
+                * acer interface, as there is no native backlight interface.
+                */
+               .ident = "Acer KAV80",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
+               },
+       },
        {}
 };
 
index fa9a2171cc134b733c837f89a4f1b80aeb0989ea..b264d8fe190812285287c57c4a9b3a3b85f6f53c 100644 (file)
@@ -163,18 +163,24 @@ static void dell_wmi_notify(u32 value, void *context)
                const struct key_entry *key;
                int reported_key;
                u16 *buffer_entry = (u16 *)obj->buffer.pointer;
+               int buffer_size = obj->buffer.length/2;
 
-               if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
+               if (buffer_size >= 2 && dell_new_hk_type && buffer_entry[1] != 0x10) {
                        pr_info("Received unknown WMI event (0x%x)\n",
                                buffer_entry[1]);
                        kfree(obj);
                        return;
                }
 
-               if (dell_new_hk_type || buffer_entry[1] == 0x0)
+               if (buffer_size >= 3 && (dell_new_hk_type || buffer_entry[1] == 0x0))
                        reported_key = (int)buffer_entry[2];
-               else
+               else if (buffer_size >= 2)
                        reported_key = (int)buffer_entry[1] & 0xffff;
+               else {
+                       pr_info("Received unknown WMI event\n");
+                       kfree(obj);
+                       return;
+               }
 
                key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
                                                        reported_key);
index 2a1008b61121ae6cfa49f5a757037ade4d14ed25..7f3d389bd601e7d616c8d528b64a248706c3131a 100644 (file)
@@ -10,3 +10,11 @@ menuconfig POWER_AVS
          AVS is also called SmartReflex on OMAP devices.
 
          Say Y here to enable Adaptive Voltage Scaling class support.
+
+config ROCKCHIP_IODOMAIN
+        tristate "Rockchip IO domain support"
+        depends on ARCH_ROCKCHIP && OF
+        help
+          Say y here to enable support io domains on Rockchip SoCs. It is
+          necessary for the io domain setting of the SoC to match the
+          voltage supplied by the regulators.
index 0843386a6c1951e6fe5b44f59d9a43992409a922..ba4c7bc6922533dcc15627ecaff584a0e3d122e9 100644 (file)
@@ -1 +1,2 @@
 obj-$(CONFIG_POWER_AVS_OMAP)           += smartreflex.o
+obj-$(CONFIG_ROCKCHIP_IODOMAIN)                += rockchip-io-domain.o
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
new file mode 100755 (executable)
index 0000000..ef7967e
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * Rockchip IO Voltage Domain driver
+ *
+ * Copyright 2014 MundoReader S.L.
+ * Copyright 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define MAX_SUPPLIES           16
+
+/*
+ * The max voltage for 1.8V and 3.3V come from the Rockchip datasheet under
+ * "Recommended Operating Conditions" for "Digital GPIO".   When the typical
+ * is 3.3V the max is 3.6V.  When the typical is 1.8V the max is 1.98V.
+ *
+ * They are used like this:
+ * - If the voltage on a rail is above the "1.8" voltage (1.98V) we'll tell the
+ *   SoC we're at 3.3.
+ * - If the voltage on a rail is above the "3.3" voltage (3.6V) we'll consider
+ *   that to be an error.
+ */
+#define MAX_VOLTAGE_1_8                1980000
+#define MAX_VOLTAGE_3_3                3600000
+
+#define RK3288_SOC_CON2                        0x24c
+#define RK3288_SOC_CON2_FLASH0         BIT(7)
+#define RK3288_SOC_FLASH_SUPPLY_NUM    2
+
+#define RK3368_GRF_SOC_CON15                   0x43c
+#define RK3368_GRF_SOC_CON15_FLASH0            BIT(14)
+#define RK3368_SOC_FLASH_SUPPLY_NUM    2
+
+
+struct rockchip_iodomain;
+
+/**
+ * @supplies: voltage settings matching the register bits.
+ */
+struct rockchip_iodomain_soc_data {
+       int grf_offset;
+       const char *supply_names[MAX_SUPPLIES];
+       void (*init)(struct rockchip_iodomain *iod);
+};
+
+struct rockchip_iodomain_supply {
+       struct rockchip_iodomain *iod;
+       struct regulator *reg;
+       struct notifier_block nb;
+       int idx;
+};
+
+struct rockchip_iodomain {
+       struct device *dev;
+       struct regmap *grf;
+       struct rockchip_iodomain_soc_data *soc_data;
+       struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
+};
+
+static int rockchip_iodomain_write(struct rockchip_iodomain_supply *supply,
+                                  int uV)
+{
+       struct rockchip_iodomain *iod = supply->iod;
+       u32 val;
+       int ret;
+
+       /* set value bit */
+       val = (uV > MAX_VOLTAGE_1_8) ? 0 : 1;
+       val <<= supply->idx;
+
+       /* apply hiword-mask */
+       val |= (BIT(supply->idx) << 16);
+
+       ret = regmap_write(iod->grf, iod->soc_data->grf_offset, val);
+       if (ret)
+               dev_err(iod->dev, "Couldn't write to GRF\n");
+
+       return ret;
+}
+
+static int rockchip_iodomain_notify(struct notifier_block *nb,
+                                   unsigned long event,
+                                   void *data)
+{
+       struct rockchip_iodomain_supply *supply =
+                       container_of(nb, struct rockchip_iodomain_supply, nb);
+       int uV;
+       int ret;
+
+       /*
+        * According to Rockchip it's important to keep the SoC IO domain
+        * higher than (or equal to) the external voltage.  That means we need
+        * to change it before external voltage changes happen in the case
+        * of an increase.
+        *
+        * Note that in the "pre" change we pick the max possible voltage that
+        * the regulator might end up at (the client requests a range and we
+        * don't know for certain the exact voltage).  Right now we rely on the
+        * slop in MAX_VOLTAGE_1_8 and MAX_VOLTAGE_3_3 to save us if clients
+        * request something like a max of 3.6V when they really want 3.3V.
+        * We could attempt to come up with better rules if this fails.
+        */
+/*
+       if (event & REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) {
+               struct pre_voltage_change_data *pvc_data = data;
+
+               uV = max_t(unsigned long, pvc_data->old_uV, pvc_data->max_uV);
+       } else 
+*/
+       if (event & (REGULATOR_EVENT_VOLTAGE_CHANGE)) {// |
+                           //REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE)) {
+               uV = (unsigned long)data;
+       } else {
+               return NOTIFY_OK;
+       }
+
+       dev_dbg(supply->iod->dev, "Setting to %d\n", uV);
+
+       if (uV > MAX_VOLTAGE_3_3) {
+               dev_err(supply->iod->dev, "Voltage too high: %d\n", uV);
+
+               //if (event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
+                       return NOTIFY_BAD;
+       }
+
+       ret = rockchip_iodomain_write(supply, uV);
+/*
+       if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
+               return NOTIFY_BAD;
+*/
+
+       dev_info(supply->iod->dev, "Setting to %d done\n", uV);
+       return NOTIFY_OK;
+}
+
+static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
+{
+       int ret;
+       u32 val;
+
+       /* if no flash supply we should leave things alone */
+       if (!iod->supplies[RK3288_SOC_FLASH_SUPPLY_NUM].reg)
+               return;
+
+       /*
+        * set flash0 iodomain to also use this framework
+        * instead of a special gpio.
+        */
+       val = RK3288_SOC_CON2_FLASH0 | (RK3288_SOC_CON2_FLASH0 << 16);
+       ret = regmap_write(iod->grf, RK3288_SOC_CON2, val);
+       if (ret < 0)
+               dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
+}
+
+static void rk3368_iodomain_init(struct rockchip_iodomain *iod)
+{
+       int ret;
+       u32 val;
+
+       /* if no flash supply we should leave things alone */
+       if (!iod->supplies[RK3368_SOC_FLASH_SUPPLY_NUM].reg)
+               return;
+
+       /*
+        * set flash0 iodomain to also use this framework
+        * instead of a special gpio.
+        */
+       val = RK3368_GRF_SOC_CON15_FLASH0 | (RK3368_GRF_SOC_CON15_FLASH0 << 16);
+       ret = regmap_write(iod->grf, RK3368_GRF_SOC_CON15, val);
+       if (ret < 0)
+               dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
+}
+
+
+/*
+ * On the rk3188 the io-domains are handled by a shared register with the
+ * lower 8 bits being still being continuing drive-strength settings.
+ */
+static const struct rockchip_iodomain_soc_data soc_data_rk3188 = {
+       .grf_offset = 0x104,
+       .supply_names = {
+               NULL,
+               NULL,
+               NULL,
+               NULL,
+               NULL,
+               NULL,
+               NULL,
+               NULL,
+               "ap0",
+               "ap1",
+               "cif",
+               "flash",
+               "vccio0",
+               "vccio1",
+               "lcdc0",
+               "lcdc1",
+       },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
+       .grf_offset = 0x380,
+       .supply_names = {
+               "lcdc",         /* LCDC_VDD */
+               "dvp",          /* DVPIO_VDD */
+               "flash0",       /* FLASH0_VDD (emmc) */
+               "flash1",       /* FLASH1_VDD (sdio1) */
+               "wifi",         /* APIO3_VDD  (sdio0) */
+               "bb",           /* APIO5_VDD */
+               "audio",        /* APIO4_VDD */
+               "sdcard",       /* SDMMC0_VDD (sdmmc) */
+               "gpio30",       /* APIO1_VDD */
+               "gpio1830",     /* APIO2_VDD */
+       },
+       .init = rk3288_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3368 = {
+       .grf_offset = 0x900,
+       .supply_names = {
+               NULL,
+               "dvp_v18sel",           /*DVP IO domain*/
+               "flash0_v18sel",                /*FLASH0 IO domain*/
+               "wifi_v18sel",  /*WIFI IO domain*/
+               NULL,
+               "audio_v18sel", /*AUDIO IO domain*/
+               "sdcard_v18sel",                /*SDCARD IO domain*/
+               "gpio30_v18sel",                /*GPIO30 IO domain*/
+               "gpio1830_v18sel",      /*GPIO1830 IO domain*/
+       },
+       .init = rk3368_iodomain_init,
+};
+
+
+static const struct of_device_id rockchip_iodomain_match[] = {
+       {
+               .compatible = "rockchip,rk3188-io-voltage-domain",
+               .data = (void *)&soc_data_rk3188
+       },
+       {
+               .compatible = "rockchip,rk3288-io-voltage-domain",
+               .data = (void *)&soc_data_rk3288
+       },
+       {
+               .compatible = "rockchip,rk3368-io-voltage-domain",
+               .data = (void *)&soc_data_rk3368
+       },
+       { /* sentinel */ },
+};
+
+static int rockchip_iodomain_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *match;
+       struct rockchip_iodomain *iod;
+       int i, ret = 0;
+
+       if (!np)
+               return -ENODEV;
+
+       iod = devm_kzalloc(&pdev->dev, sizeof(*iod), GFP_KERNEL);
+       if (!iod)
+               return -ENOMEM;
+
+       iod->dev = &pdev->dev;
+       platform_set_drvdata(pdev, iod);
+
+       match = of_match_node(rockchip_iodomain_match, np);
+       iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
+
+       iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+       if (IS_ERR(iod->grf)) {
+               dev_err(&pdev->dev, "couldn't find grf regmap\n");
+               return PTR_ERR(iod->grf);
+       }
+
+       for (i = 0; i < MAX_SUPPLIES; i++) {
+               const char *supply_name = iod->soc_data->supply_names[i];
+               struct rockchip_iodomain_supply *supply = &iod->supplies[i];
+               struct regulator *reg;
+               int uV;
+               const char *regulator_name = NULL;
+
+               if (!supply_name)
+                       continue;
+
+               of_property_read_string(np, supply_name, &regulator_name);
+               if (!regulator_name)
+                       continue;
+
+               reg = regulator_get(NULL, regulator_name);
+               if (IS_ERR(reg)) {
+                       ret = PTR_ERR(reg);
+
+                       /* If a supply wasn't specified, that's OK */
+                       if (ret == -ENODEV)
+                               continue;
+                       else if (ret != -EPROBE_DEFER)
+                               dev_err(iod->dev, "couldn't get regulator %s\n",
+                                       supply_name);
+                       goto unreg_notify;
+               }
+
+               /* set initial correct value */
+               uV = regulator_get_voltage(reg);
+
+               /* must be a regulator we can get the voltage of */
+               if (uV < 0) {
+                       dev_err(iod->dev, "Can't determine voltage: %s\n",
+                               supply_name);
+                       goto unreg_notify;
+               }
+
+               if (uV > MAX_VOLTAGE_3_3) {
+                       dev_crit(iod->dev,
+                                "%d uV is too high. May damage SoC!\n",
+                                uV);
+                       ret = -EINVAL;
+                       goto unreg_notify;
+               }
+
+               /* setup our supply */
+               supply->idx = i;
+               supply->iod = iod;
+               supply->reg = reg;
+               supply->nb.notifier_call = rockchip_iodomain_notify;
+
+               ret = rockchip_iodomain_write(supply, uV);
+               if (ret) {
+                       supply->reg = NULL;
+                       goto unreg_notify;
+               }
+
+               /* register regulator notifier */
+               ret = regulator_register_notifier(reg, &supply->nb);
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "regulator notifier request failed\n");
+                       supply->reg = NULL;
+                       goto unreg_notify;
+               }
+       }
+
+       if (iod->soc_data->init)
+               iod->soc_data->init(iod);
+
+       return 0;
+
+unreg_notify:
+       for (i = MAX_SUPPLIES - 1; i >= 0; i--) {
+               struct rockchip_iodomain_supply *io_supply = &iod->supplies[i];
+
+               if (io_supply->reg)
+                       regulator_unregister_notifier(io_supply->reg,
+                                                     &io_supply->nb);
+       }
+
+       return ret;
+}
+
+static int rockchip_iodomain_remove(struct platform_device *pdev)
+{
+       struct rockchip_iodomain *iod = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = MAX_SUPPLIES - 1; i >= 0; i--) {
+               struct rockchip_iodomain_supply *io_supply = &iod->supplies[i];
+
+               if (io_supply->reg)
+                       regulator_unregister_notifier(io_supply->reg,
+                                                     &io_supply->nb);
+       }
+
+       return 0;
+}
+
+static struct platform_driver rockchip_iodomain_driver = {
+       .probe   = rockchip_iodomain_probe,
+       .remove  = rockchip_iodomain_remove,
+       .driver  = {
+               .name  = "rockchip-iodomain",
+               .of_match_table = rockchip_iodomain_match,
+       },
+};
+
+module_platform_driver(rockchip_iodomain_driver);
+
+MODULE_DESCRIPTION("Rockchip IO-domain driver");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_AUTHOR("Doug Anderson <dianders@chromium.org>");
+MODULE_LICENSE("GPL v2");
index d1c8cce46b2fe321977918ea992f13047daaa33e..4d025ba6d7e8186a3ba47ad05244adaa03afca75 100755 (executable)
@@ -22,9 +22,6 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <asm/io.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
 #include <linux/adc.h>
 #include <linux/delay.h>
 #include <linux/ktime.h>
index d44f1fa47cda1873823c24d840c0beb62d762f7b..d494cf90e809f8ae7ab924c84a30a9ea1b13398b 100644 (file)
@@ -191,11 +191,11 @@ static int  rk_pwm_config_v1(struct pwm_chip *chip, struct pwm_device *pwm,
        conf |= (prescale << DW_PWM_PRESCALE);
        barrier();
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CTRL,off);
-       dsb();
+       dsb(sy);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_HRC,dc);//0x1900);// dc);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_LRC, pv);//0x5dc0);//pv);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CNTR,0);
-       dsb();
+       dsb(sy);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CTRL,on|conf);
        
        spin_unlock_irqrestore(lock, flags);    
@@ -229,11 +229,11 @@ static void rk_pwm_resume_v1(struct pwm_chip *chip, struct pwm_device *pwm)
        int     off =  PWM_RESET;
 
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CTRL,off);
-       dsb();
+       dsb(sy);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_HRC,pc->pwm_duty);//0x1900);// dc);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_LRC, pc->pwm_period);//0x5dc0);//pv);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CNTR,pc->pwm_count);
-       dsb();
+       dsb(sy);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CTRL,pc->pwm_ctrl);
 }
 /* config for rockchip,pwm*/
@@ -300,11 +300,11 @@ static int  rk_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm,
        conf |= (prescale << RK_PWM_PRESCALE);  
        barrier();
        //rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CTRL,off);
-       //dsb();
+       //dsb(sy);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_DUTY,dc);//0x1900);// dc);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_PERIOD,pv);//0x5dc0);//pv);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CNTR,0);
-       dsb();
+       dsb(sy);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CTRL,on|conf);
        spin_unlock_irqrestore(lock, flags);    
 
@@ -342,7 +342,7 @@ static void rk_pwm_resume_v2(struct pwm_chip *chip, struct pwm_device *pwm)
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_DUTY,    pc->pwm_duty);//0x1900);// dc);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_PERIOD, pc->pwm_period);//0x5dc0);//pv);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CNTR,pc->pwm_count);
-       dsb();
+       dsb(sy);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_CTRL,pc->pwm_ctrl);
 }
 
@@ -417,11 +417,11 @@ static int  rk_pwm_config_v3(struct pwm_chip *chip, struct pwm_device *pwm,
        barrier();
 //     rk_pwm_writel(pc, pwm->hwpwm, VOP_REG_CTRL,off);
        
-//     dsb();
+//     dsb(sy);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_DUTY,dc);   //   2    0x1900);// dc);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_PERIOD,pv);   // 4 0x5dc0);//pv);
        rk_pwm_writel(pc, pwm->hwpwm, VOP_REG_CNTR,0);
-       dsb();
+       dsb(sy);
        rk_pwm_writel(pc, pwm->hwpwm, VOP_REG_CTRL,on|conf);
 
        spin_unlock_irqrestore(lock, flags);    
@@ -456,7 +456,7 @@ static void rk_pwm_resume_v3(struct pwm_chip *chip, struct pwm_device *pwm)
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_DUTY,    pc->pwm_duty);//0x1900);// dc);
        rk_pwm_writel(pc, pwm->hwpwm, PWM_REG_PERIOD, pc->pwm_period);//0x5dc0);//pv);
        rk_pwm_writel(pc, pwm->hwpwm, VOP_REG_CNTR,pc->pwm_count);
-       dsb();
+       dsb(sy);
        rk_pwm_writel(pc, pwm->hwpwm, VOP_REG_CTRL,pc->pwm_ctrl);
 }
 
index 91245f5dbe81a7d235f13a2dd2edbe741508d584..47257b6eea849bf168123076714d31b7813105b8 100644 (file)
@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
                        "desc %p not ACKed\n", tx_desc);
        }
 
+       if (ret == NULL) {
+               dev_dbg(bdma_chan->dchan.device->dev,
+                       "%s: unable to obtain tx descriptor\n", __func__);
+               goto err_out;
+       }
+
        i = bdma_chan->wr_count_next % bdma_chan->bd_num;
        if (i == bdma_chan->bd_num - 1) {
                i = 0;
@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
        tx_desc->txd.phys = bdma_chan->bd_phys +
                                i * sizeof(struct tsi721_dma_desc);
        tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
-
+err_out:
        spin_unlock_bh(&bdma_chan->lock);
 
        return ret;
index 81d8681c31959557fa78a7121d64e85c957e719b..b1b35f38d11d5dc80eaa67241000e5ab89a61068 100644 (file)
@@ -141,8 +141,6 @@ static struct regulator_ops arizona_ldo1_ops = {
        .map_voltage = regulator_map_voltage_linear,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
-       .get_bypass = regulator_get_bypass_regmap,
-       .set_bypass = regulator_set_bypass_regmap,
 };
 
 static const struct regulator_desc arizona_ldo1 = {
index 131e9933f2a3add23b6938a950499e7bcbca58f1..d7b9b4dc8a3db12ef7094e54e80b959289fc142c 100644 (file)
@@ -88,7 +88,7 @@ static int _regulator_disable(struct regulator_dev *rdev);
 static int _regulator_get_voltage(struct regulator_dev *rdev);
 static int _regulator_get_current_limit(struct regulator_dev *rdev);
 static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
-static void _notifier_call_chain(struct regulator_dev *rdev,
+static int _notifier_call_chain(struct regulator_dev *rdev,
                                  unsigned long event, void *data);
 static int _regulator_do_set_voltage(struct regulator_dev *rdev,
                                     int min_uV, int max_uV);
@@ -2304,6 +2304,55 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
 }
 EXPORT_SYMBOL_GPL(regulator_map_voltage_linear);
 
+static int _regulator_call_set_voltage(struct regulator_dev *rdev,
+                                      int min_uV, int max_uV,
+                                      unsigned *selector)
+{
+       struct pre_voltage_change_data data;
+       int ret;
+
+       data.old_uV = _regulator_get_voltage(rdev);
+       data.min_uV = min_uV;
+       data.max_uV = max_uV;
+       ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE,
+                                  &data);
+       if (ret & NOTIFY_STOP_MASK)
+               return -EINVAL;
+
+       ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, selector);
+       if (ret >= 0)
+               return ret;
+
+       _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE,
+                            (void *)data.old_uV);
+
+       return ret;
+}
+
+static int _regulator_call_set_voltage_sel(struct regulator_dev *rdev,
+                                          int uV, unsigned selector)
+{
+       struct pre_voltage_change_data data;
+       int ret;
+
+       data.old_uV = _regulator_get_voltage(rdev);
+       data.min_uV = uV;
+       data.max_uV = uV;
+       ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE,
+                                  &data);
+       if (ret & NOTIFY_STOP_MASK)
+               return -EINVAL;
+
+       ret = rdev->desc->ops->set_voltage_sel(rdev, selector);
+       if (ret >= 0)
+               return ret;
+
+       _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE,
+                            (void *)data.old_uV);
+
+       return ret;
+}
+
 static int _regulator_do_set_voltage(struct regulator_dev *rdev,
                                     int min_uV, int max_uV)
 {
@@ -2331,8 +2380,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
        }
 
        if (rdev->desc->ops->set_voltage) {
-               ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
-                                                  &selector);
+               ret = _regulator_call_set_voltage(rdev, min_uV, max_uV,
+                                                 &selector);
 
                if (ret >= 0) {
                        if (rdev->desc->ops->list_voltage)
@@ -2363,8 +2412,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
                                if (old_selector == selector)
                                        ret = 0;
                                else
-                                       ret = rdev->desc->ops->set_voltage_sel(
-                                                               rdev, ret);
+                                       ret = _regulator_call_set_voltage_sel(
+                                               rdev, best_val, selector);
                        } else {
                                ret = -EINVAL;
                        }
@@ -3047,11 +3096,11 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier);
 /* notify regulator consumers and downstream regulator consumers.
  * Note mutex must be held by caller.
  */
-static void _notifier_call_chain(struct regulator_dev *rdev,
+static int _notifier_call_chain(struct regulator_dev *rdev,
                                  unsigned long event, void *data)
 {
        /* call rdev chain first */
-       blocking_notifier_call_chain(&rdev->notifier, event, data);
+       return blocking_notifier_call_chain(&rdev->notifier, event, data);
 }
 
 /**
index 160e7510aca694913e407e03dd2bb1408c6cf0b7..0787b97561657d1116f48c32ae5efefb31d71c8d 100644 (file)
@@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op,
        if (!tp)
                return;
 
+       INIT_LIST_HEAD(&tp->bp_list);
+       INIT_LIST_HEAD(&tp->glob_list);
+
        tp->client = bbc_i2c_attach(bp, op);
        if (!tp->client) {
                kfree(tp);
@@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op,
        if (!fp)
                return;
 
+       INIT_LIST_HEAD(&fp->bp_list);
+       INIT_LIST_HEAD(&fp->glob_list);
+
        fp->client = bbc_i2c_attach(bp, op);
        if (!fp->client) {
                kfree(fp);
index c1441ed282eb911ff67a6363ce5a78f6cbe45199..e0e6cd605cca76062f51cf5f138ee1354518220c 100644 (file)
@@ -301,13 +301,18 @@ static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index
        if (!bp)
                return NULL;
 
+       INIT_LIST_HEAD(&bp->temps);
+       INIT_LIST_HEAD(&bp->fans);
+
        bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs");
        if (!bp->i2c_control_regs)
                goto fail;
 
-       bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
-       if (!bp->i2c_bussel_reg)
-               goto fail;
+       if (op->num_resources == 2) {
+               bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
+               if (!bp->i2c_bussel_reg)
+                       goto fail;
+       }
 
        bp->waiting = 0;
        init_waitqueue_head(&bp->wq);
index 245a9595a93a131449fd660a7b7d8bd0e9e7f3b5..ef0a78b0d730a7ecd0c9f0071d510bf7dadb16a0 100644 (file)
@@ -812,17 +812,20 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
 
        if (ip_action == IP_ACTION_ADD) {
                memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
-                      ip_param->len);
+                      sizeof(req->ip_params.ip_record.ip_addr.addr));
 
                if (subnet_param)
                        memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
-                              subnet_param->value, subnet_param->len);
+                              subnet_param->value,
+                              sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
        } else {
                memcpy(req->ip_params.ip_record.ip_addr.addr,
-                      if_info->ip_addr.addr, ip_param->len);
+                      if_info->ip_addr.addr,
+                      sizeof(req->ip_params.ip_record.ip_addr.addr));
 
                memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
-                      if_info->ip_addr.subnet_mask, ip_param->len);
+                      if_info->ip_addr.subnet_mask,
+                      sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
        }
 
        rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
@@ -850,7 +853,7 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
        req->action = gtway_action;
        req->ip_addr.ip_type = BE2_IPV4;
 
-       memcpy(req->ip_addr.addr, gt_addr, param_len);
+       memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
 
        return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
 }
index 23a90e7b71071844c3b37c09eee4e6033cccc473..a119421cb3246f3a340106bf9a155434100f0c0a 100644 (file)
@@ -72,7 +72,7 @@ struct bfa_sge_s {
 } while (0)
 
 #define bfa_swap_words(_x)  (  \
-       ((_x) << 32) | ((_x) >> 32))
+       ((u64)(_x) << 32) | ((u64)(_x) >> 32))
 
 #ifdef __BIG_ENDIAN
 #define bfa_sge_to_be(_x)
index 0353d7f2172baaea5f63fa3e117b25b57e4a8c0d..62ed744bbe06c47487d5af052cfe9a9e46dbd32a 100644 (file)
@@ -1206,8 +1206,8 @@ static void complete_scsi_command(struct CommandList *cp)
        scsi_set_resid(cmd, ei->ResidualCnt);
 
        if (ei->CommandStatus == 0) {
-               cmd->scsi_done(cmd);
                cmd_free(h, cp);
+               cmd->scsi_done(cmd);
                return;
        }
 
@@ -1380,8 +1380,8 @@ static void complete_scsi_command(struct CommandList *cp)
                dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
                                cp, ei->CommandStatus);
        }
-       cmd->scsi_done(cmd);
        cmd_free(h, cp);
+       cmd->scsi_done(cmd);
 }
 
 static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -3118,7 +3118,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                }
                if (ioc->Request.Type.Direction == XFER_WRITE) {
                        if (copy_from_user(buff[sg_used], data_ptr, sz)) {
-                               status = -ENOMEM;
+                               status = -EFAULT;
                                goto cleanup1;
                        }
                } else
index 5de946984500692b51705b9f6a42640b3b456e4c..f91d41788ce4b3ef330cb11f437dca3bf4952ffc 100644 (file)
@@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                        return NULL;
                }
 
+               if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
+                       iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
+                       return NULL;
+               }
+
                task = conn->login_task;
        } else {
                if (session->state != ISCSI_STATE_LOGGED_IN)
                        return NULL;
 
+               if (data_size != 0) {
+                       iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
+                       return NULL;
+               }
+
                BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
                BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
 
index f033b191a022d882a77174675d66b1fce29a99e2..e6884940d1070250971f8263316098e43b218f5d 100644 (file)
@@ -1514,12 +1514,10 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
        uint32_t req_cnt)
 {
-       struct qla_hw_data *ha = vha->hw;
-       device_reg_t __iomem *reg = ha->iobase;
        uint32_t cnt;
 
        if (vha->req->cnt < (req_cnt + 2)) {
-               cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
+               cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
 
                ql_dbg(ql_dbg_tgt, vha, 0xe00a,
                    "Request ring circled: cnt=%d, vha->->ring_index=%d, "
index 66b0b26a1381e4ecb17806c935ca05c1b62cfe70..cfd49eca67aae1b5bc8d1845f89defc5612ea4d5 100644 (file)
@@ -762,7 +762,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
        pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
 
        node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
-       WARN_ON(node && (node != se_nacl));
+       if (WARN_ON(node && (node != se_nacl))) {
+               /*
+                * The nacl no longer matches what we think it should be.
+                * Most likely a new dynamic acl has been added while
+                * someone dropped the hardware lock.  It clearly is a
+                * bug elsewhere, but this bit can't make things worse.
+                */
+               btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
+                              node, GFP_ATOMIC);
+       }
 
        pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
            se_nacl, nacl->nport_wwnn, nacl->nport_id);
index f43de1e56420ac7916ca99c83281b81b417f4fb9..3668b1b23b5a3fd6a27d61fd4637e4a1904ad611 100644 (file)
@@ -1689,8 +1689,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
         * is no point trying to lock the door of an off-line device.
         */
        shost_for_each_device(sdev, shost) {
-               if (scsi_device_online(sdev) && sdev->locked)
+               if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
                        scsi_eh_lock_door(sdev);
+                       sdev->was_reset = 0;
+               }
        }
 
        /*
index 75574da08d13e80129afa12120bb8d6deeb0adff..26b898182e8f1939056c6f7cc8715b03e79a674e 100644 (file)
@@ -815,6 +815,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        scsi_next_command(cmd);
                        return;
                }
+       } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
+               /*
+                * Certain non BLOCK_PC requests are commands that don't
+                * actually transfer anything (FLUSH), so cannot use
+                * good_bytes != blk_rq_bytes(req) as the signal for an error.
+                * This sets the error explicitly for the problem case.
+                */
+               error = __scsi_error_from_host_byte(cmd, result);
        }
 
        /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
index 91b76cea3e3cb94f9bea8229379d96fbcbfeb2c1..87ca72d36d5b31ffb21ec9330580d2dbf97f713b 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/device.h>
 #include <linux/hyperv.h>
 #include <linux/mempool.h>
+#include <linux/blkdev.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_host.h>
@@ -803,6 +804,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
                case ATA_12:
                        set_host_byte(scmnd, DID_PASSTHROUGH);
                        break;
+               /*
+                * On Some Windows hosts TEST_UNIT_READY command can return
+                * SRB_STATUS_ERROR, let the upper level code deal with it
+                * based on the sense information.
+                */
+               case TEST_UNIT_READY:
+                       break;
                default:
                        set_host_byte(scmnd, DID_TARGET_FAILURE);
                }
@@ -1285,6 +1293,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
        return SUCCESS;
 }
 
+/*
+ * The host guarantees to respond to each command, although I/O latencies might
+ * be unbounded on Azure.  Reset the timer unconditionally to give the host a
+ * chance to perform EH.
+ */
+static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
+{
+       return BLK_EH_RESET_TIMER;
+}
+
 static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
 {
        bool allowed = true;
@@ -1444,6 +1462,7 @@ static struct scsi_host_template scsi_driver = {
        .bios_param =           storvsc_get_chs,
        .queuecommand =         storvsc_queuecommand,
        .eh_host_reset_handler =        storvsc_host_reset_handler,
+       .eh_timed_out =         storvsc_eh_timed_out,
        .slave_alloc =          storvsc_device_alloc,
        .slave_destroy =        storvsc_device_destroy,
        .slave_configure =      storvsc_device_configure,
index b9f0192758d6d929aab86d087c443adc46154e66..0791c92e8c505cc9a35158bcdcdf0a7612ae92de 100644 (file)
@@ -89,7 +89,13 @@ err_exit:
 
 static void mid_spi_dma_exit(struct dw_spi *dws)
 {
+       if (!dws->dma_inited)
+               return;
+
+       dmaengine_terminate_all(dws->txchan);
        dma_release_channel(dws->txchan);
+
+       dmaengine_terminate_all(dws->rxchan);
        dma_release_channel(dws->rxchan);
 }
 
@@ -136,7 +142,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
        txconf.dst_addr = dws->dma_addr;
        txconf.dst_maxburst = LNW_DMA_MSIZE_16;
        txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+       txconf.dst_addr_width = dws->dma_width;
        txconf.device_fc = false;
 
        txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
@@ -159,7 +165,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
        rxconf.src_addr = dws->dma_addr;
        rxconf.src_maxburst = LNW_DMA_MSIZE_16;
        rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+       rxconf.src_addr_width = dws->dma_width;
        rxconf.device_fc = false;
 
        rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
index 86d2158946bbf3de879cc459e53aa00005e6768f..798729eb66894dce93764449c15e2c3e3fb85736 100644 (file)
@@ -136,6 +136,7 @@ struct omap2_mcspi_cs {
        void __iomem            *base;
        unsigned long           phys;
        int                     word_len;
+       u16                     mode;
        struct list_head        node;
        /* Context save and restore shadow register */
        u32                     chconf0;
@@ -801,6 +802,8 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
 
        mcspi_write_chconf0(spi, l);
 
+       cs->mode = spi->mode;
+
        dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
                        OMAP2_MCSPI_MAX_FREQ >> div,
                        (spi->mode & SPI_CPHA) ? "trailing" : "leading",
@@ -871,6 +874,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
                        return -ENOMEM;
                cs->base = mcspi->base + spi->chip_select * 0x14;
                cs->phys = mcspi->phys + spi->chip_select * 0x14;
+               cs->mode = 0;
                cs->chconf0 = 0;
                spi->controller_state = cs;
                /* Link this to context save list */
@@ -1043,6 +1047,16 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
                        mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
        }
 
+       /*
+        * The slave driver could have changed spi->mode in which case
+        * it will be different from cs->mode (the current hardware setup).
+        * If so, set par_override (even though its not a parity issue) so
+        * omap2_mcspi_setup_transfer will be called to configure the hardware
+        * with the correct mode on the first iteration of the loop below.
+        */
+       if (spi->mode != cs->mode)
+               par_override = 1;
+
        omap2_mcspi_set_enable(spi, 0);
 
        m->status = status;
index 66a5f82cf138d86b4fa84d5235e1086022ca6b08..183aa80c90176f86a860756237205e685e661598 100644 (file)
@@ -403,8 +403,6 @@ static int orion_spi_probe(struct platform_device *pdev)
        struct resource *r;
        unsigned long tclk_hz;
        int status = 0;
-       const u32 *iprop;
-       int size;
 
        master = spi_alloc_master(&pdev->dev, sizeof *spi);
        if (master == NULL) {
@@ -415,10 +413,10 @@ static int orion_spi_probe(struct platform_device *pdev)
        if (pdev->id != -1)
                master->bus_num = pdev->id;
        if (pdev->dev.of_node) {
-               iprop = of_get_property(pdev->dev.of_node, "cell-index",
-                                       &size);
-               if (iprop && size == sizeof(*iprop))
-                       master->bus_num = *iprop;
+               u32 cell_index;
+               if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
+                                         &cell_index))
+                       master->bus_num = cell_index;
        }
 
        /* we support only mode 0, and no options */
index 371cc66f1a0e9d4abfce97b8ee5fe3466cb28ad3..5266c89fc9896e854a8008c8fa1f7cd6bf102556 100644 (file)
@@ -1080,7 +1080,7 @@ err_rxdesc:
                     pl022->sgt_tx.nents, DMA_TO_DEVICE);
 err_tx_sgmap:
        dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
-                    pl022->sgt_tx.nents, DMA_FROM_DEVICE);
+                    pl022->sgt_rx.nents, DMA_FROM_DEVICE);
 err_rx_sgmap:
        sg_free_table(&pl022->sgt_tx);
 err_alloc_tx_sg:
index 48b396fced0acdde9fe6f28cb518a5b3024247c0..d26a2d195d217ca5f5c5181e68ec732ebae4f767 100644 (file)
@@ -1324,7 +1324,9 @@ static int pxa2xx_spi_suspend(struct device *dev)
        if (status != 0)
                return status;
        write_SSCR0(0, drv_data->ioaddr);
-       clk_disable_unprepare(ssp->clk);
+
+       if (!pm_runtime_suspended(dev))
+               clk_disable_unprepare(ssp->clk);
 
        return 0;
 }
@@ -1338,7 +1340,8 @@ static int pxa2xx_spi_resume(struct device *dev)
        pxa2xx_spi_dma_resume(drv_data);
 
        /* Enable the SSP clock */
-       clk_prepare_enable(ssp->clk);
+       if (!pm_runtime_suspended(dev))
+               clk_prepare_enable(ssp->clk);
 
        /* Start the queue running */
        status = spi_master_resume(drv_data->master);
index 93cb1803d2af3e544ecfa06f7b1bdaa068528254..dbe542cc11d51a4055c9c0ae378463545b425904 100755 (executable)
@@ -428,7 +428,7 @@ static void giveback(struct dw_spi *dws)
        dws->cur_chip = NULL;
        spi_finalize_current_message(dws->master);
        
-       DBG_SPI("%s:line=%d,tx_left=%d\n",__func__,__LINE__, (dws->tx_end - dws->tx) / dws->n_bytes);
+       DBG_SPI("%s:line=%d,tx_left=%ld\n",__func__,__LINE__, (long)(dws->tx_end - dws->tx) / dws->n_bytes);
 }
 
 
@@ -540,7 +540,7 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id)
 /* Must be called inside pump_transfers() */
 static void poll_transfer(struct dw_spi *dws)
 {      
-       DBG_SPI("%s:len=%d\n",__func__, dws->len);
+       DBG_SPI("%s:len=%ld\n",__func__, (long)dws->len);
        
        do {
                dw_writer(dws);
@@ -645,7 +645,7 @@ static void pump_transfers(unsigned long data)
                        chip->clk_div = clk_div;
                }
        }
-       DBG_SPI("%s:len=%d,clk_div=%d,speed_hz=%d\n",__func__,dws->len,chip->clk_div,chip->speed_hz);
+       DBG_SPI("%s:len=%ld,clk_div=%d,speed_hz=%d\n",__func__, (long)dws->len,chip->clk_div,chip->speed_hz);
        if (transfer->bits_per_word) {
                bits = transfer->bits_per_word;
 
index cbd613a0c62999d67736a28ba437641575174427..7c224da9c1295225ba3c2c4ece8a407f14569211 100755 (executable)
@@ -276,7 +276,8 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
                txdesc->callback = dw_spi_dma_txcb;
                txdesc->callback_param = dws;
 
-               DBG_SPI("%s:dst_addr=0x%p,tx_dma=0x%p,len=%d,burst=%d,width=%d\n",__func__,(int *)dws->tx_dma_addr, (int *)dws->tx_dma, dws->len,txconf.dst_maxburst, width);
+               DBG_SPI("%s:dst_addr=0x%p,tx_dma=0x%p,len=%ld,burst=%d,width=%d\n"
+                       ,__func__,(int *)dws->tx_dma_addr, (int *)dws->tx_dma, (long)dws->len,txconf.dst_maxburst, width);
        }
 
        if (dws->rx)
@@ -307,7 +308,8 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
                rxdesc->callback = dw_spi_dma_rxcb;
                rxdesc->callback_param = dws;
                
-               DBG_SPI("%s:src_addr=0x%p,rx_dma=0x%p,len=%d,burst=%d,width=%d\n",__func__, (int *)dws->rx_dma_addr, (int *)dws->rx_dma, dws->len, rxconf.src_maxburst, width);
+               DBG_SPI("%s:src_addr=0x%p,rx_dma=0x%p,len=%ld,burst=%d,width=%d\n"
+                       ,__func__, (int *)dws->rx_dma_addr, (int *)dws->rx_dma, (long)dws->len, rxconf.src_maxburst, width);
        }
 
        if(!dws->tx)
index 3511b0840362b88c878abcd1d2de885d005f053c..ccaef8b48ebaaafc72d244edf6c1063270367902 100644 (file)
@@ -363,7 +363,9 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
        if (!sc->nr_to_scan)
                return lru_count;
 
-       mutex_lock(&ashmem_mutex);
+       if (!mutex_trylock(&ashmem_mutex))
+               return -1;
+
        list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
                loff_t start = range->pgstart * PAGE_SIZE;
                loff_t end = (range->pgend + 1) * PAGE_SIZE;
index b0204f398357c8d764f9149ae8b10f92f3409322..b02cfe89eb0c9fca4e760a41e97e53085342c724 100755 (executable)
@@ -361,6 +361,8 @@ static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state)
                }
        }
        for (n = 0; n < NR_IPI; n++) {
+#define S(x,s) [x] = s
+#ifdef CONFIG_ARM
                enum ipi_msg_type {
                        IPI_WAKEUP,
                        IPI_TIMER,
@@ -372,7 +374,6 @@ static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state)
                        IPI_CPU_BACKTRACE,
                };
                static const char *ipi_types[NR_IPI] = {
-#define S(x,s) [x] = s
                        S(IPI_WAKEUP, "CPU wakeup"),
                        S(IPI_TIMER, "Timer broadcast"),
                        S(IPI_RESCHEDULE, "Rescheduling"),
@@ -381,8 +382,24 @@ static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state)
                        S(IPI_CPU_STOP, "CPU stop"),
                        S(IPI_COMPLETION, "Completion"),
                        S(IPI_CPU_BACKTRACE, "CPU backtrace"),
-#undef S
                };
+#elif defined(CONFIG_ARM64)
+               enum ipi_msg_type {
+                       IPI_RESCHEDULE,
+                       IPI_CALL_FUNC,
+                       IPI_CALL_FUNC_SINGLE,
+                       IPI_CPU_STOP,
+                       IPI_TIMER,
+               };
+               static const char *ipi_types[NR_IPI] = {
+                       S(IPI_RESCHEDULE, "Rescheduling"),
+                       S(IPI_CALL_FUNC, "Function call"),
+                       S(IPI_CALL_FUNC_SINGLE, "Single function call"),
+                       S(IPI_CPU_STOP, "CPU stop"),
+                       S(IPI_TIMER, "Timer broadcast"),
+               };
+#endif
+#undef S
                for (cpu = 0; cpu < NR_CPUS; cpu++) {
                        unsigned int irqs = __get_irq_stat(cpu, ipi_irqs[n]);
                        if (irqs == 0)
index bde5f6eeb0873fd29dc5dde999205987a1c5464c..662c9c47468e9be73ef92930a7bffe01ec06fb03 100755 (executable)
@@ -123,7 +123,7 @@ struct ion_handle {
 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
 #endif
 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
-extern char *rockchip_ion_snapshot_get(unsigned *size);
+extern char *rockchip_ion_snapshot_get(size_t *size);
 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
 static int ion_snapshot_save(struct ion_device *idev, size_t len);
 #endif
@@ -285,7 +285,7 @@ err2:
 
 void ion_buffer_destroy(struct ion_buffer *buffer)
 {
-       trace_ion_buffer_destroy("", (unsigned int)buffer, buffer->size);
+       trace_ion_buffer_destroy("", (void*)buffer, buffer->size);
 
        if (WARN_ON(buffer->kmap_cnt > 0))
                buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
@@ -551,7 +551,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
                handle = ERR_PTR(ret);
        }
 
-       trace_ion_buffer_alloc(client->display_name, (unsigned int)buffer,
+       trace_ion_buffer_alloc(client->display_name, (void*)buffer,
                buffer->size);
 
        return handle;
@@ -573,7 +573,7 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
                return;
        }
        mutex_unlock(&client->lock);
-       trace_ion_buffer_free(client->display_name, (unsigned int)handle->buffer,
+       trace_ion_buffer_free(client->display_name, (void*)handle->buffer,
                        handle->buffer->size);
        ion_handle_put(handle);
 }
@@ -684,8 +684,8 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
        vaddr = ion_handle_kmap_get(handle);
        mutex_unlock(&buffer->lock);
        mutex_unlock(&client->lock);
-       trace_ion_kernel_map(client->display_name, (unsigned int)buffer,
-                       buffer->size, (unsigned int)vaddr);
+       trace_ion_kernel_map(client->display_name, (void*)buffer,
+                       buffer->size, (void*)vaddr);
        return vaddr;
 }
 EXPORT_SYMBOL(ion_map_kernel);
@@ -697,7 +697,7 @@ void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
        mutex_lock(&client->lock);
        buffer = handle->buffer;
        mutex_lock(&buffer->lock);
-       trace_ion_kernel_unmap(client->display_name, (unsigned int)buffer,
+       trace_ion_kernel_unmap(client->display_name, (void*)buffer,
                        buffer->size);
        ion_handle_kmap_put(handle);
        mutex_unlock(&buffer->lock);
@@ -722,7 +722,7 @@ static void ion_iommu_add(struct ion_buffer *buffer,
                } else if (iommu->key > entry->key) {
                        p = &(*p)->rb_right;
                } else {
-                       pr_err("%s: buffer %p already has mapping for domainid %x\n",
+                       pr_err("%s: buffer %p already has mapping for domainid %lx\n",
                                __func__,
                                buffer,
                                iommu->key);
@@ -735,7 +735,7 @@ static void ion_iommu_add(struct ion_buffer *buffer,
 }
 
 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
-                                               uint32_t key)
+                                               unsigned long key)
 {
        struct rb_node **p = &buffer->iommu_maps.rb_node;
        struct rb_node *parent = NULL;
@@ -768,7 +768,7 @@ static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
                return ERR_PTR(-ENOMEM);
 
        data->buffer = buffer;
-       data->key = (uint32_t)iommu_dev;
+       data->key = (unsigned long)iommu_dev;
 
        ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
                                                buffer->size, buffer->flags);
@@ -821,13 +821,13 @@ int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
        }
 
        if (buffer->size & ~PAGE_MASK) {
-               pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
+               pr_debug("%s: buffer size %zu is not aligned to %lx", __func__,
                        buffer->size, PAGE_SIZE);
                ret = -EINVAL;
                goto out;
        }
 
-       iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
+       iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
        if (!iommu_map) {
                pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
                iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
@@ -837,7 +837,7 @@ int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
                pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
                if (iommu_map->mapped_size != buffer->size) {
                        pr_err("%s: handle %p is already mapped with length"
-                                       " %x, trying to map with length %x\n",
+                                       " %d, trying to map with length %zu\n",
                                __func__, handle, iommu_map->mapped_size, buffer->size);
                        ret = -EINVAL;
                } else {
@@ -848,7 +848,7 @@ int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
        if (!ret)
                buffer->iommu_map_cnt++;
        *size = buffer->size;
-       trace_ion_iommu_map(client->display_name, (unsigned int)buffer, buffer->size,
+       trace_ion_iommu_map(client->display_name, (void*)buffer, buffer->size,
                dev_name(iommu_dev), *iova, *size, buffer->iommu_map_cnt);
 out:
        mutex_unlock(&buffer->lock);
@@ -863,7 +863,7 @@ static void ion_iommu_release(struct kref *kref)
                                                ref);
        struct ion_buffer *buffer = map->buffer;
 
-       trace_ion_iommu_release("", (unsigned int)buffer, buffer->size,
+       trace_ion_iommu_release("", (void*)buffer, buffer->size,
                "", map->iova_addr, map->mapped_size, buffer->iommu_map_cnt);
 
        rb_erase(&map->node, &buffer->iommu_maps);
@@ -906,7 +906,7 @@ void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
 
        mutex_lock(&buffer->lock);
 
-       iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
+       iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
 
        if (!iommu_map) {
                WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
@@ -918,7 +918,7 @@ void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
 
        buffer->iommu_map_cnt--;
 
-       trace_ion_iommu_unmap(client->display_name, (unsigned int)buffer, buffer->size,
+       trace_ion_iommu_unmap(client->display_name, (void*)buffer, buffer->size,
                dev_name(iommu_dev), iommu_map->iova_addr,
                iommu_map->mapped_size, buffer->iommu_map_cnt);
 out:
@@ -942,7 +942,8 @@ static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffe
        while (node != NULL) {
                iommu_map = rb_entry(node, struct ion_iommu_map, node);
                seq_printf(s, "%16.16s:   0x%08lx   0x%08x   0x%08x %8zuKB %4d\n",
-                       "<iommu>", iommu_map->iova_addr, 0, 0, iommu_map->mapped_size>>10,
+                       "<iommu>", iommu_map->iova_addr, 0, 0,
+                       (size_t)iommu_map->mapped_size>>10,
                        atomic_read(&iommu_map->ref.refcount));
 
                node = rb_next(node);
@@ -1481,7 +1482,7 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
        if (fd < 0)
                dma_buf_put(dmabuf);
 
-       trace_ion_buffer_share(client->display_name, (unsigned int)handle->buffer,
+       trace_ion_buffer_share(client->display_name, (void*)handle->buffer,
                                handle->buffer->size, fd);
        return fd;
 }
@@ -1529,7 +1530,7 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
                handle = ERR_PTR(ret);
        }
 
-       trace_ion_buffer_import(client->display_name, (unsigned int)buffer,
+       trace_ion_buffer_import(client->display_name, (void*)buffer,
                                buffer->size);
 end:
        dma_buf_put(dmabuf);
@@ -1870,8 +1871,8 @@ static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
        seq_printf(s, "%s Heap bitmap:\n", heap->name);
 
        for(i = rows - 1; i>= 0; i--){
-               seq_printf(s, "%.4uM@0x%08x: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
-                               i+1, base+(i)*SZ_1M,
+               seq_printf(s, "%.4uM@0x%lx: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+                               i+1, (unsigned long)base+(i)*SZ_1M,
                                cma->bitmap[i*8 + 7],
                                cma->bitmap[i*8 + 6],
                                cma->bitmap[i*8 + 5],
@@ -1881,8 +1882,8 @@ static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
                                cma->bitmap[i*8 + 1],
                                cma->bitmap[i*8]);
        }
-       seq_printf(s, "Heap size: %luM, Heap base: 0x%08x\n",
-               (cma->count)>>8, base);
+       seq_printf(s, "Heap size: %luM, Heap base: 0x%lx\n",
+               (cma->count)>>8, (unsigned long)base);
 
        return 0;
 }
@@ -2112,10 +2113,10 @@ static int ion_snapshot_save(struct ion_device *idev, size_t len)
        }
        memset(seqf.buf, 0, seqf.size);
        seqf.count = 0;
-       pr_debug("%s: save snapshot 0x%x@0x%lx\n", __func__, seqf.size,
-               __pa(seqf.buf));
+       pr_debug("%s: save snapshot 0x%zx@0x%lx\n", __func__, seqf.size,
+               (unsigned long)__pa(seqf.buf));
 
-       seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %uKB\n",
+       seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %zuKB\n",
                current->comm, current->pid, len>>10);
 
        down_read(&idev->lock);
index d957a22ba218d7a6db2b053d147a8a6f204f6923..081905d1bc431b9e487a5185e5077a9f2a0f0be8 100644 (file)
@@ -158,7 +158,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
        page = pfn_to_page(PFN_DOWN(heap_data->base));
        size = heap_data->size;
 
-       printk("%s: %x@%lx\n", __func__, size, heap_data->base);
+       printk("%s: %zx@%lx\n", __func__, size, heap_data->base);
 
        ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
 
index 508c44e2627227a51b7a29eb6c07fe4246f787f0..67a737fa39c29f042cdca94ad74fe77e3a98e89d 100755 (executable)
@@ -205,7 +205,7 @@ static int ion_cma_map_iommu(struct ion_buffer *buffer,
        struct ion_cma_buffer_info *info = buffer->priv_virt;
 
        data->iova_addr = rockchip_iovmm_map(iommu_dev, info->table->sgl, 0, iova_length);
-       pr_debug("%s: map %x -> %lx\n", __func__, info->table->sgl->dma_address,
+       pr_debug("%s: map %lx -> %lx\n", __func__, (unsigned long)info->table->sgl->dma_address,
                data->iova_addr);
        if (IS_ERR_VALUE(data->iova_addr)) {
                pr_err("%s: rockchip_iovmm_map() failed: %lx\n", __func__, data->iova_addr);
index b1c8a1829c44336b2bc974886eed40789c8979a5..ee113db3042e86c09ef53af481564a3a7d59022b 100644 (file)
@@ -174,7 +174,7 @@ static int ion_drm_heap_map_iommu(struct ion_buffer *buffer,
        struct sg_table *table = (struct sg_table*)buffer->priv_virt;
 
        data->iova_addr = rockchip_iovmm_map(iommu_dev, table->sgl, 0, iova_length);
-       pr_debug("%s: map %x -> %lx\n", __func__, table->sgl->dma_address,
+       pr_debug("%s: map %lx -> %lx\n", __func__, (unsigned long)table->sgl->dma_address,
                data->iova_addr);
        if (IS_ERR_VALUE(data->iova_addr)) {
                pr_err("%s: rockchip_iovmm_map() failed: %lx\n", __func__,
@@ -226,7 +226,7 @@ struct ion_heap *ion_drm_heap_create(struct ion_platform_heap *heap_data)
        page = pfn_to_page(PFN_DOWN(heap_data->base));
        size = heap_data->size;
 
-       printk("%s: %x@%lx\n", __func__, size, heap_data->base);
+       printk("%s: %zx@%lx\n", __func__, size, heap_data->base);
 
        ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
 
index 12e615d1cd513c2bdc9d1a32e8848af4100a722a..671b05248edd0077c51c626e5454df411e211169 100755 (executable)
@@ -49,7 +49,7 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
 struct ion_iommu_map {
        unsigned long iova_addr;
        struct rb_node node;
-       uint32_t key;
+       unsigned long key;
        struct ion_buffer *buffer;
        struct kref ref;
        int mapped_size;
index 4593046b9a6f7867241b79891498dfecc015e623..e53e0456787eada34ee292fea146b3b0a679cde4 100755 (executable)
@@ -261,7 +261,7 @@ static int ion_system_map_iommu(struct ion_buffer *buffer,
        struct sg_table *table = (struct sg_table*)buffer->priv_virt;
 
        data->iova_addr = rockchip_iovmm_map(iommu_dev, table->sgl, 0, iova_length);
-       pr_debug("%s: map %x -> %lx\n", __func__, table->sgl->dma_address, data->iova_addr);
+       pr_debug("%s: map %lx -> %lx\n", __func__, (unsigned long)table->sgl->dma_address, data->iova_addr);
        if (IS_ERR_VALUE(data->iova_addr)) {
                pr_err("%s: rockchip_iovmm_map() failed: %lx\n", __func__, data->iova_addr);
                ret = data->iova_addr;
index cb957f9506a0c83b542b86d6faa1e0d7b9a707a7..31394da72882e215f502cb338d975378ddf28085 100755 (executable)
@@ -152,7 +152,7 @@ static long rockchip_custom_ioctl (struct ion_client *client, unsigned int cmd,
                if (IS_ERR(dmabuf))
                        return PTR_ERR(dmabuf);
 
-               data.id = (unsigned int)dmabuf;
+               data.id = (unsigned long)dmabuf;
 //             dma_buf_put(dmabuf);
 
                if (copy_to_user((void __user *)arg, &data, sizeof(struct ion_share_id_data)))
@@ -270,7 +270,7 @@ int __init rockchip_ion_find_heap(unsigned long node, const char *uname,
                return 0;
 
        prop = of_get_flat_dt_prop(node, "rockchip,ion_heap", &len);
-       if (!prop || (len != sizeof(unsigned long)))
+       if (!prop || (len != sizeof(__be32)))
                return 0;
 
        heap = &pdata->heaps[pdata->nr++];
@@ -279,14 +279,14 @@ int __init rockchip_ion_find_heap(unsigned long node, const char *uname,
        rockchip_ion_populate_heap(heap);
 
        prop = of_get_flat_dt_prop(node, "reg", &len);
-       if (prop && (len >= 2*sizeof(unsigned long))) {
+       if (prop && (len >= 2*sizeof(__be32))) {
                heap->base = be32_to_cpu(prop[0]);
                heap->size = be32_to_cpu(prop[1]);
-               if (len==3*sizeof(unsigned long))
+               if (len==3*sizeof(__be32))
                        heap->align = be32_to_cpu(prop[2]);
        }
 
-       pr_info("ion heap(%s): base(%lx) size(%x) align(%lx)\n", heap->name,
+       pr_info("ion heap(%s): base(%lx) size(%zx) align(%lx)\n", heap->name,
                        heap->base, heap->size, heap->align);
        return 0;
 }
index e163032d931ca48c782e9ca410bdee677b46d829..4814b42cde0443f8180a84ef22152acbb4e9de4d 100755 (executable)
@@ -68,7 +68,7 @@ static const struct file_operations ion_snapshot_fops = {
        .read = ion_snapshot_read,
 };
 
-char *rockchip_ion_snapshot_get(unsigned *size)
+char *rockchip_ion_snapshot_get(size_t *size)
 {
        *size = LOG_BUF_LEN;
        return ion_snapshot_buf;
@@ -127,12 +127,14 @@ static int __init rockchip_ion_snapshot_init(void)
 
        ion_snapshot_buf = last_ion_vmap(virt_to_phys(log_buf), 1 << LOG_BUF_PAGE_ORDER);
        if (!ion_snapshot_buf) {
-               pr_err("failed to map %d pages at 0x%08x\n", 1 << LOG_BUF_PAGE_ORDER, virt_to_phys(log_buf));
+               pr_err("failed to map %d pages at 0x%lx\n", 1 << LOG_BUF_PAGE_ORDER,
+                       (unsigned long)virt_to_phys(log_buf));
                return 0;
        }
 
-       pr_info("0x%08x map to 0x%p and copy to 0x%p (version 0.1)\n", 
-                       virt_to_phys(log_buf), ion_snapshot_buf, last_ion_buf);
+       pr_info("0x%lx map to 0x%p and copy to 0x%p (version 0.1)\n", 
+                       (unsigned long)virt_to_phys(log_buf), ion_snapshot_buf,
+                       last_ion_buf);
 
        memcpy(last_ion_buf, ion_snapshot_buf, LOG_BUF_LEN);
        memset(ion_snapshot_buf, 0, LOG_BUF_LEN);
index 2dcc34cf2a815094f48e0e6f86282245df2d2917..67bf9d322d46cae973efb655d244b71a6a52abc3 100644 (file)
@@ -8,11 +8,11 @@
 #include <linux/tracepoint.h>
 
 DECLARE_EVENT_CLASS(ion_buffer_op,
-       TP_PROTO(const char* client, unsigned int buf, unsigned int size),
+       TP_PROTO(const char* client, void* buf, unsigned int size),
        TP_ARGS(client, buf, size),
        TP_STRUCT__entry(
                __string(client, client)
-               __field(unsigned int, buf)
+               __field(void*, buf)
                __field(unsigned int, size)
        ),
        TP_fast_assign(
@@ -20,35 +20,35 @@ DECLARE_EVENT_CLASS(ion_buffer_op,
                __entry->buf = buf;
                __entry->size = size;
        ),
-       TP_printk("client=%s,buffer=%08x:%d",
+       TP_printk("client=%s,buffer=%p:%d",
                  __get_str(client), __entry->buf, __entry->size)
 );
 DEFINE_EVENT(ion_buffer_op, ion_buffer_alloc,
-       TP_PROTO(const char* client, unsigned int buffer, unsigned int size),
+       TP_PROTO(const char* client, void* buffer, unsigned int size),
        TP_ARGS(client, buffer, size));
 
 DEFINE_EVENT(ion_buffer_op, ion_buffer_free,
-       TP_PROTO(const char* client, unsigned int buffer, unsigned int size),
+       TP_PROTO(const char* client, void* buffer, unsigned int size),
        TP_ARGS(client, buffer, size));
 
 DEFINE_EVENT(ion_buffer_op, ion_buffer_import,
-       TP_PROTO(const char* client, unsigned int buffer, unsigned int size),
+       TP_PROTO(const char* client, void* buffer, unsigned int size),
        TP_ARGS(client, buffer, size));
 
 DEFINE_EVENT(ion_buffer_op, ion_buffer_destroy,
-       TP_PROTO(const char* client, unsigned int buffer, unsigned int size),
+       TP_PROTO(const char* client, void* buffer, unsigned int size),
        TP_ARGS(client, buffer, size));
 
 DEFINE_EVENT(ion_buffer_op, ion_kernel_unmap,
-       TP_PROTO(const char* client, unsigned int buffer, unsigned int size),
+       TP_PROTO(const char* client, void* buffer, unsigned int size),
        TP_ARGS(client, buffer, size));
 
 TRACE_EVENT(ion_buffer_share,
-       TP_PROTO(const char* client, unsigned int buf, unsigned int size, int fd),
+       TP_PROTO(const char* client, void* buf, unsigned int size, int fd),
        TP_ARGS(client, buf, size, fd),
        TP_STRUCT__entry(
                __string(client, client)
-               __field(unsigned int, buf)
+               __field(void*, buf)
                __field(unsigned int, size)
                __field(int, fd)
        ),
@@ -58,7 +58,7 @@ TRACE_EVENT(ion_buffer_share,
                __entry->size = size;
                __entry->fd = fd;
        ),
-       TP_printk("client=%s,buffer=%08x:%d,fd=%d",
+       TP_printk("client=%s,buffer=%p:%d,fd=%d",
                  __get_str(client), __entry->buf, __entry->size, __entry->fd)
 );
 
@@ -81,13 +81,13 @@ DEFINE_EVENT(ion_client_op, ion_client_destroy,
        TP_ARGS(client));
 
 DECLARE_EVENT_CLASS(ion_iommu_op,
-       TP_PROTO(const char* client, unsigned int buf, unsigned int size,
+       TP_PROTO(const char* client, void* buf, unsigned int size,
                const char* iommu_dev, unsigned int iommu_addr,
                unsigned int iommu_size, unsigned int map_cnt),
        TP_ARGS(client, buf, size, iommu_dev, iommu_addr, iommu_size, map_cnt),
        TP_STRUCT__entry(
                __string(client, client)
-               __field(unsigned int, buf)
+               __field(void*, buf)
                __field(unsigned int, size)
                __string(iommu_dev, iommu_dev)
                __field(unsigned int, iommu_addr)
@@ -103,35 +103,35 @@ DECLARE_EVENT_CLASS(ion_iommu_op,
                __entry->iommu_size = iommu_size;
                __entry->map_cnt = map_cnt;
        ),
-       TP_printk("client=%s,buffer=%08x:%d,iommu=%s,map=%08x:%d,map_count=%d",
+       TP_printk("client=%s,buffer=%p:%d,iommu=%s,map=%08x:%d,map_count=%d",
                  __get_str(client), __entry->buf, __entry->size,
                  __get_str(iommu_dev), __entry->iommu_addr, __entry->iommu_size,
                  __entry->map_cnt)
 );
 DEFINE_EVENT(ion_iommu_op, ion_iommu_map,
-       TP_PROTO(const char* client, unsigned int buf, unsigned int size,
+       TP_PROTO(const char* client, void* buf, unsigned int size,
                const char* iommu_dev, unsigned int iommu_addr,
                unsigned int iommu_size, unsigned int map_cnt),
        TP_ARGS(client, buf, size, iommu_dev, iommu_addr, iommu_size, map_cnt));
 DEFINE_EVENT(ion_iommu_op, ion_iommu_unmap,
-       TP_PROTO(const char* client, unsigned int buf, unsigned int size,
+       TP_PROTO(const char* client, void* buf, unsigned int size,
                const char* iommu_dev, unsigned int iommu_addr,
                unsigned int iommu_size, unsigned int map_cnt),
        TP_ARGS(client, buf, size, iommu_dev, iommu_addr, iommu_size, map_cnt));
 DEFINE_EVENT(ion_iommu_op, ion_iommu_release,
-       TP_PROTO(const char* client, unsigned int buf, unsigned int size,
+       TP_PROTO(const char* client, void* buf, unsigned int size,
                const char* iommu_dev, unsigned int iommu_addr,
                unsigned int iommu_size, unsigned int map_cnt),
        TP_ARGS(client, buf, size, iommu_dev, iommu_addr, iommu_size, map_cnt));
 
 DECLARE_EVENT_CLASS(ion_kmap_op,
-       TP_PROTO(const char* client, unsigned int buf, unsigned int size, unsigned int kaddr),
+       TP_PROTO(const char* client, void* buf, unsigned int size, void* kaddr),
        TP_ARGS(client, buf, size, kaddr),
        TP_STRUCT__entry(
                __string(client, client)
-               __field(unsigned int, buf)
+               __field(void*, buf)
                __field(unsigned int, size)
-               __field(unsigned int, kaddr)
+               __field(void*, kaddr)
        ),
        TP_fast_assign(
                __assign_str(client, client);
@@ -139,11 +139,11 @@ DECLARE_EVENT_CLASS(ion_kmap_op,
                __entry->size = size;
                __entry->kaddr = kaddr;
        ),
-       TP_printk("client=%s,buffer=%08x:%d,kaddr=%08x",
+       TP_printk("client=%s,buffer=%p:%d,kaddr=%p",
                  __get_str(client), __entry->buf, __entry->size, __entry->kaddr)
 );
 DEFINE_EVENT(ion_kmap_op, ion_kernel_map,
-       TP_PROTO(const char* client, unsigned int buffer, unsigned int size, unsigned int kaddr),
+       TP_PROTO(const char* client, void* buffer, unsigned int size, void* kaddr),
        TP_ARGS(client, buffer, size, kaddr));
 
 DECLARE_EVENT_CLASS(ion_mmap_op,
index 6330af656a0f536d6a56580a5503eac75bdd3e53..bc23d66a7a1e87c98ed1d746347cb9d7520b952b 100644 (file)
@@ -115,6 +115,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
                .channel = 0,
                .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
                .address = AD5933_REG_TEMP_DATA,
+               .scan_index = -1,
                .scan_type = {
                        .sign = 's',
                        .realbits = 14,
@@ -124,9 +125,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
                .type = IIO_VOLTAGE,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "real_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
-               BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "real",
                .address = AD5933_REG_REAL_DATA,
                .scan_index = 0,
                .scan_type = {
@@ -138,9 +137,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
                .type = IIO_VOLTAGE,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "imag_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
-               BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "imag",
                .address = AD5933_REG_IMAG_DATA,
                .scan_index = 1,
                .scan_type = {
@@ -746,14 +743,14 @@ static int ad5933_probe(struct i2c_client *client,
        indio_dev->name = id->name;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = ad5933_channels;
-       indio_dev->num_channels = 1; /* only register temp0_input */
+       indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
 
        ret = ad5933_register_ring_funcs_and_init(indio_dev);
        if (ret)
                goto error_disable_reg;
 
-       /* skip temp0_input, register in0_(real|imag)_raw */
-       ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2);
+       ret = iio_buffer_register(indio_dev, ad5933_channels,
+               ARRAY_SIZE(ad5933_channels));
        if (ret)
                goto error_unreg_ring;
 
index 07318203a836e86155760c7ec16c595094b37489..e8c98cf570701d4c32a51269c844862aea3c5f70 100644 (file)
@@ -119,7 +119,6 @@ struct ade7758_state {
        u8                      *tx;
        u8                      *rx;
        struct mutex            buf_lock;
-       const struct iio_chan_spec *ade7758_ring_channels;
        struct spi_transfer     ring_xfer[4];
        struct spi_message      ring_msg;
        /*
index 8f5bcfab3563a6675d1dd912d3461f83d6d3c9ee..75d9fe6a1bc1b84deacf6ad66201c3d3a218aa3f 100644 (file)
@@ -648,9 +648,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_VOLTAGE,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
                .scan_index = 0,
                .scan_type = {
@@ -662,9 +659,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_CURRENT,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
                .scan_index = 1,
                .scan_type = {
@@ -676,9 +670,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "apparent_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "apparent",
                .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
                .scan_index = 2,
                .scan_type = {
@@ -690,9 +682,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "active_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "active",
                .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
                .scan_index = 3,
                .scan_type = {
@@ -704,9 +694,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "reactive_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "reactive",
                .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
                .scan_index = 4,
                .scan_type = {
@@ -718,9 +706,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_VOLTAGE,
                .indexed = 1,
                .channel = 1,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
                .scan_index = 5,
                .scan_type = {
@@ -732,9 +717,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_CURRENT,
                .indexed = 1,
                .channel = 1,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
                .scan_index = 6,
                .scan_type = {
@@ -746,9 +728,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 1,
-               .extend_name = "apparent_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "apparent",
                .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
                .scan_index = 7,
                .scan_type = {
@@ -760,9 +740,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 1,
-               .extend_name = "active_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "active",
                .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
                .scan_index = 8,
                .scan_type = {
@@ -774,9 +752,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 1,
-               .extend_name = "reactive_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "reactive",
                .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
                .scan_index = 9,
                .scan_type = {
@@ -788,9 +764,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_VOLTAGE,
                .indexed = 1,
                .channel = 2,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
                .scan_index = 10,
                .scan_type = {
@@ -802,9 +775,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_CURRENT,
                .indexed = 1,
                .channel = 2,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
                .scan_index = 11,
                .scan_type = {
@@ -816,9 +786,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 2,
-               .extend_name = "apparent_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "apparent",
                .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
                .scan_index = 12,
                .scan_type = {
@@ -830,9 +798,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 2,
-               .extend_name = "active_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "active",
                .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
                .scan_index = 13,
                .scan_type = {
@@ -844,9 +810,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 2,
-               .extend_name = "reactive_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "reactive",
                .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
                .scan_index = 14,
                .scan_type = {
@@ -890,13 +854,14 @@ static int ade7758_probe(struct spi_device *spi)
                goto error_free_rx;
        }
        st->us = spi;
-       st->ade7758_ring_channels = &ade7758_channels[0];
        mutex_init(&st->buf_lock);
 
        indio_dev->name = spi->dev.driver->name;
        indio_dev->dev.parent = &spi->dev;
        indio_dev->info = &ade7758_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->channels = ade7758_channels;
+       indio_dev->num_channels = ARRAY_SIZE(ade7758_channels);
 
        ret = ade7758_configure_ring(indio_dev);
        if (ret)
index b29e2d5d993773c0e5ff4e22e97e8f48f349925f..6a0ef97e9146535514960996313f47a4bde362dc 100644 (file)
@@ -89,11 +89,10 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
  **/
 static int ade7758_ring_preenable(struct iio_dev *indio_dev)
 {
-       struct ade7758_state *st = iio_priv(indio_dev);
        unsigned channel;
        int ret;
 
-       if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+       if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
                return -EINVAL;
 
        ret = iio_sw_buffer_preenable(indio_dev);
@@ -104,7 +103,7 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
                                 indio_dev->masklength);
 
        ade7758_write_waveform_type(&indio_dev->dev,
-               st->ade7758_ring_channels[channel].address);
+               indio_dev->channels[channel].address);
 
        return 0;
 }
index 7a94ddd42f593e7bc1355cd06ebfcb6c400ff014..8c4f2896cd0d6fbf119a2395f40d58569f2bd39a 100644 (file)
@@ -85,7 +85,7 @@ int ade7758_probe_trigger(struct iio_dev *indio_dev)
        ret = iio_trigger_register(st->trig);
 
        /* select default trigger */
-       indio_dev->trig = st->trig;
+       indio_dev->trig = iio_trigger_get(st->trig);
        if (ret)
                goto error_free_irq;
 
index f67941e78e4a45cc755897a12ba9231b53c1547f..b9359753784eaf9aee8e94d6d10f53fa3f240a38 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/device.h> /* for dev_warn */
 #include <linux/selection.h>
 #include <linux/workqueue.h>
+#include <linux/tty.h>
 #include <asm/cmpxchg.h>
 
 #include "speakup.h"
@@ -135,8 +136,12 @@ static void __speakup_paste_selection(struct work_struct *work)
        struct tty_struct *tty = xchg(&spw->tty, NULL);
        struct vc_data *vc = (struct vc_data *) tty->driver_data;
        int pasted = 0, count;
+       struct tty_ldisc *ld;
        DECLARE_WAITQUEUE(wait, current);
 
+       ld = tty_ldisc_ref_wait(tty);
+
+       /* FIXME: this is completely unsafe */
        add_wait_queue(&vc->paste_wait, &wait);
        while (sel_buffer && sel_buffer_lth > pasted) {
                set_current_state(TASK_INTERRUPTIBLE);
@@ -146,12 +151,13 @@ static void __speakup_paste_selection(struct work_struct *work)
                }
                count = sel_buffer_lth - pasted;
                count = min_t(int, count, tty->receive_room);
-               tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
-                       NULL, count);
+               ld->ops->receive_buf(tty, sel_buffer + pasted, NULL, count);
                pasted += count;
        }
        remove_wait_queue(&vc->paste_wait, &wait);
        current->state = TASK_RUNNING;
+
+       tty_ldisc_deref(ld);
        tty_kref_put(tty);
 }
 
index f983915168b7883c30a435c4561b8571abb77f75..3496a77612bac6b335964ed014a3748415923425 100644 (file)
@@ -1026,7 +1026,7 @@ start:
                pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
        }
 
-       {
+       if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
                pDevice->byReAssocCount++;
                if ((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) {  //10 sec timeout
                        printk("Re-association timeout!!!\n");
index 08b250f01dae6e7fd2f323cf9f7a06920266fe11..d170b6f9db7cf2bd88ee46b85508f1a18565a2e1 100644 (file)
@@ -2434,6 +2434,7 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
        int             handled = 0;
        unsigned char byData = 0;
        int             ii = 0;
+       unsigned long flags;
 //    unsigned char byRSSI;
 
        MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
@@ -2459,7 +2460,8 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
 
        handled = 1;
        MACvIntDisable(pDevice->PortOffset);
-       spin_lock_irq(&pDevice->lock);
+
+       spin_lock_irqsave(&pDevice->lock, flags);
 
        //Make sure current page is 0
        VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
@@ -2700,7 +2702,8 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
                MACvSelectPage1(pDevice->PortOffset);
        }
 
-       spin_unlock_irq(&pDevice->lock);
+       spin_unlock_irqrestore(&pDevice->lock, flags);
+
        MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
 
        return IRQ_RETVAL(handled);
index 799f84e686b5d9ecd6938f3dcdba379995d9edec..651b5768862f38c5384438ff5013b07ac9a0cf86 100644 (file)
@@ -4453,6 +4453,7 @@ static void iscsit_logout_post_handler_diffcid(
 {
        struct iscsi_conn *l_conn;
        struct iscsi_session *sess = conn->sess;
+       bool conn_found = false;
 
        if (!sess)
                return;
@@ -4461,12 +4462,13 @@ static void iscsit_logout_post_handler_diffcid(
        list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
                if (l_conn->cid == cid) {
                        iscsit_inc_conn_usage_count(l_conn);
+                       conn_found = true;
                        break;
                }
        }
        spin_unlock_bh(&sess->conn_lock);
 
-       if (!l_conn)
+       if (!conn_found)
                return;
 
        if (l_conn->sock)
index e38222191a33b7c19ef71e827a6d995f31dc8271..30be6c9bdbc6b7f467d9055b16a731aec4bd3520 100644 (file)
@@ -603,7 +603,7 @@ int iscsi_copy_param_list(
        param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
        if (!param_list) {
                pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
-               goto err_out;
+               return -1;
        }
        INIT_LIST_HEAD(&param_list->param_list);
        INIT_LIST_HEAD(&param_list->extra_response_list);
index 68398753eb826b65b92aa5e1a0eddbf1e1e3775e..2be407e22eb499902bd2320e976b0c178fe4bfd3 100644 (file)
@@ -1293,7 +1293,8 @@ int core_dev_add_initiator_node_lun_acl(
         * Check to see if there are any existing persistent reservation APTPL
         * pre-registrations that need to be enabled for this LUN ACL..
         */
-       core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
+       core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
+                                           lacl->mapped_lun);
        return 0;
 }
 
index 04a74938bb43e580f68a22b8745517762ad8970c..27ec6e4d1c7cb744101408b401c2d16c5eab1d51 100644 (file)
@@ -945,10 +945,10 @@ int core_scsi3_check_aptpl_registration(
        struct se_device *dev,
        struct se_portal_group *tpg,
        struct se_lun *lun,
-       struct se_lun_acl *lun_acl)
+       struct se_node_acl *nacl,
+       u32 mapped_lun)
 {
-       struct se_node_acl *nacl = lun_acl->se_lun_nacl;
-       struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
+       struct se_dev_entry *deve = nacl->device_list[mapped_lun];
 
        if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
                return 0;
index b4a004247ab298a6123c0da08c3d588e5a8e0114..ea9220de1dff56172a25000389279f1001e3498b 100644 (file)
@@ -55,7 +55,7 @@ extern int core_scsi3_alloc_aptpl_registration(
                        unsigned char *, u16, u32, int, int, u8);
 extern int core_scsi3_check_aptpl_registration(struct se_device *,
                        struct se_portal_group *, struct se_lun *,
-                       struct se_lun_acl *);
+                       struct se_node_acl *, u32);
 extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
                                             struct se_node_acl *);
 extern void core_scsi3_free_all_registrations(struct se_device *);
index aac9d2727e3c8584a20db563ef5c914bc758de19..8572207e3d4d77cf7a800855043dd26be7362844 100644 (file)
@@ -40,6 +40,7 @@
 #include <target/target_core_fabric.h>
 
 #include "target_core_internal.h"
+#include "target_core_pr.h"
 
 extern struct se_device *g_lun0_dev;
 
@@ -165,6 +166,13 @@ void core_tpg_add_node_to_devs(
 
                core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
                                lun_access, acl, tpg);
+               /*
+                * Check to see if there are any existing persistent reservation
+                * APTPL pre-registrations that need to be enabled for this dynamic
+                * LUN ACL now..
+                */
+               core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
+                                                   lun->unpacked_lun);
                spin_lock(&tpg->tpg_lun_lock);
        }
        spin_unlock(&tpg->tpg_lun_lock);
index 6866d86e86630bfa8cd15e6c7269af1370f23073..12342695ed7972b76a16b447c64224df1fc36973 100644 (file)
@@ -1788,8 +1788,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
 
        if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
                ret = cmd->se_tfo->queue_status(cmd);
-               if (ret)
-                       goto out;
+               goto out;
        }
 
        switch (cmd->data_direction) {
index 78de7947afac39b7585977088010d407f81a8063..4246262c4bd249f5ee0d3cec5f3712d1fdd9bbc6 100644 (file)
@@ -167,13 +167,20 @@ static int get_property(unsigned int cpu, unsigned long input,
                        continue;
 
                /* get the frequency order */
-               if (freq != CPUFREQ_ENTRY_INVALID && descend != -1)
+               if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
                        descend = !!(freq > table[i].frequency);
 
                freq = table[i].frequency;
                max_level++;
        }
 
+       /* No valid cpu frequency entry */
+       if (max_level == 0)
+               return -EINVAL;
+
+       /* max_level is an index, not a counter */
+       max_level--;
+
        /* get max level */
        if (property == GET_MAXL) {
                *output = (unsigned int)max_level;
@@ -181,7 +188,7 @@ static int get_property(unsigned int cpu, unsigned long input,
        }
 
        if (property == GET_FREQ)
-               level = descend ? input : (max_level - input - 1);
+               level = descend ? input : (max_level - input);
 
        for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
                /* ignore invalid entry */
@@ -197,7 +204,7 @@ static int get_property(unsigned int cpu, unsigned long input,
 
                if (property == GET_LEVEL && (unsigned int)input == freq) {
                        /* get level by frequency */
-                       *output = descend ? j : (max_level - j - 1);
+                       *output = descend ? j : (max_level - j);
                        return 0;
                }
                if (property == GET_FREQ && level == j) {
@@ -322,6 +329,8 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
 
        if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
                max_freq = notify_device->cpufreq_val;
+       else
+               return 0;
 
        /* Never exceed user_policy.max */
        if (max_freq > policy->user_policy.max)
@@ -473,7 +482,7 @@ __cpufreq_cooling_register(struct device_node *np,
        if (IS_ERR(cool_dev)) {
                release_idr(&cpufreq_idr, cpufreq_dev->id);
                kfree(cpufreq_dev);
-               return ERR_PTR(-EINVAL);
+               return cool_dev;
        }
        cpufreq_dev->cool_dev = cool_dev;
        cpufreq_dev->cpufreq_state = 0;
@@ -540,8 +549,12 @@ EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
  */
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 {
-       struct cpufreq_cooling_device *cpufreq_dev = cdev->devdata;
+       struct cpufreq_cooling_device *cpufreq_dev;
+
+       if (!cdev)
+               return;
 
+       cpufreq_dev = cdev->devdata;
        mutex_lock(&cooling_cpufreq_lock);
        cpufreq_dev_count--;
 
index 04b1be7fa018ef5caef4ff748d6fb4d1b4d93ddc..4b2b999b7611cb04390dfa1c6c3b5ac208d26069 100644 (file)
@@ -156,8 +156,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
 
                        ret = thermal_zone_bind_cooling_device(thermal,
                                                tbp->trip_id, cdev,
-                                               tbp->min,
-                                               tbp->max);
+                                               tbp->max,
+                                               tbp->min);
                        if (ret)
                                return ret;
                }
@@ -712,11 +712,12 @@ thermal_of_build_thermal_zone(struct device_node *np)
        }
 
        i = 0;
-       for_each_child_of_node(child, gchild)
+       for_each_child_of_node(child, gchild) {
                ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
                                                      tz->trips, tz->ntrips);
                if (ret)
                        goto free_tbps;
+       }
 
 finish:
        of_node_put(child);
index 83cc99bb5aeb71076155a05649bb0b397bbf0bd8..edc0cb88f1d0bb4f43270684619c15dc0427ec47 100644 (file)
@@ -56,10 +56,15 @@ static LIST_HEAD(thermal_governor_list);
 static DEFINE_MUTEX(thermal_list_lock);
 static DEFINE_MUTEX(thermal_governor_lock);
 
+static struct thermal_governor *def_governor;
+
 static struct thermal_governor *__find_governor(const char *name)
 {
        struct thermal_governor *pos;
 
+       if (!name || !name[0])
+               return def_governor;
+
        list_for_each_entry(pos, &thermal_governor_list, governor_list)
                if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH))
                        return pos;
@@ -82,17 +87,23 @@ int thermal_register_governor(struct thermal_governor *governor)
        if (__find_governor(governor->name) == NULL) {
                err = 0;
                list_add(&governor->governor_list, &thermal_governor_list);
+               if (!def_governor && !strncmp(governor->name,
+                       DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH))
+                       def_governor = governor;
        }
 
        mutex_lock(&thermal_list_lock);
 
        list_for_each_entry(pos, &thermal_tz_list, node) {
+               /*
+                * only thermal zones with specified tz->tzp->governor_name
+                * may run with tz->govenor unset
+                */
                if (pos->governor)
                        continue;
-               if (pos->tzp)
-                       name = pos->tzp->governor_name;
-               else
-                       name = DEFAULT_THERMAL_GOVERNOR;
+
+               name = pos->tzp->governor_name;
+
                if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH))
                        pos->governor = governor;
        }
@@ -238,10 +249,11 @@ static void bind_cdev(struct thermal_cooling_device *cdev)
                if (!pos->tzp && !pos->ops->bind)
                        continue;
 
-               if (!pos->tzp && pos->ops->bind) {
+               if (pos->ops->bind) {
                        ret = pos->ops->bind(pos, cdev);
                        if (ret)
                                print_bind_err_msg(pos, cdev, ret);
+                       continue;
                }
 
                tzp = pos->tzp;
@@ -272,8 +284,8 @@ static void bind_tz(struct thermal_zone_device *tz)
 
        mutex_lock(&thermal_list_lock);
 
-       /* If there is no platform data, try to use ops->bind */
-       if (!tzp && tz->ops->bind) {
+       /* If there is ops->bind, try to use ops->bind */
+       if (tz->ops->bind) {
                list_for_each_entry(pos, &thermal_cdev_list, node) {
                        ret = tz->ops->bind(tz, pos);
                        if (ret)
@@ -329,8 +341,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
 static void handle_non_critical_trips(struct thermal_zone_device *tz,
                        int trip, enum thermal_trip_type trip_type)
 {
-       if (tz->governor)
-               tz->governor->throttle(tz, trip);
+       tz->governor ? tz->governor->throttle(tz, trip) :
+                      def_governor->throttle(tz, trip);
 }
 
 static void handle_critical_trips(struct thermal_zone_device *tz,
@@ -765,6 +777,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
                ret = tz->ops->set_emul_temp(tz, temperature);
        }
 
+       if (!ret)
+               thermal_zone_device_update(tz);
+
        return ret ? ret : count;
 }
 static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
@@ -1087,7 +1102,7 @@ __thermal_cooling_device_register(struct device_node *np,
        INIT_LIST_HEAD(&cdev->thermal_instances);
        cdev->np = np;
        cdev->ops = ops;
-       cdev->updated = true;
+       cdev->updated = false;
        cdev->device.class = &thermal_class;
        cdev->devdata = devdata;
        dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -1513,7 +1528,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
        if (tz->tzp)
                tz->governor = __find_governor(tz->tzp->governor_name);
        else
-               tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR);
+               tz->governor = def_governor;
 
        mutex_unlock(&thermal_governor_lock);
 
index ab9096dc384976de15c41f0d5f39b6975b72a45d..148ffe4c232f6bd8322d3387d41f227d5d811263 100644 (file)
@@ -192,21 +192,28 @@ int serial8250_request_dma(struct uart_8250_port *p)
 
        dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
                                        &dma->rx_addr, GFP_KERNEL);
-       if (!dma->rx_buf) {
-               dma_release_channel(dma->rxchan);
-               dma_release_channel(dma->txchan);
-               return -ENOMEM;
-       }
+       if (!dma->rx_buf)
+               goto err;
 
        /* TX buffer */
        dma->tx_addr = dma_map_single(dma->txchan->device->dev,
                                        p->port.state->xmit.buf,
                                        UART_XMIT_SIZE,
                                        DMA_TO_DEVICE);
+       if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
+               dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
+                                 dma->rx_buf, dma->rx_addr);
+               goto err;
+       }
 
        dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
 
        return 0;
+err:
+       dma_release_channel(dma->rxchan);
+       dma_release_channel(dma->txchan);
+
+       return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(serial8250_request_dma);
 
index e83c9db3300cac9525889d437fb4204cf53eb3ef..4858b8a99d3b4fcdbd4702c08a89442c6fb795fa 100644 (file)
@@ -144,8 +144,11 @@ static int __init early_serial8250_setup(struct earlycon_device *device,
        if (!(device->port.membase || device->port.iobase))
                return 0;
 
-       if (!device->baud)
+       if (!device->baud) {
                device->baud = probe_baud(&device->port);
+               snprintf(device->options, sizeof(device->options), "%u",
+                        device->baud);
+       }
 
        init_port(device);
 
@@ -156,6 +159,16 @@ static int __init early_serial8250_setup(struct earlycon_device *device,
 EARLYCON_DECLARE(uart8250, early_serial8250_setup);
 EARLYCON_DECLARE(uart, early_serial8250_setup);
 
+int __init setup_early_serial8250_console(char *cmdline)
+{
+       char match[] = "uart8250";
+
+       if (cmdline && cmdline[4] == ',')
+               match[4] = '\0';
+
+       return setup_earlycon(cmdline, match, early_serial8250_setup);
+}
+
 int serial8250_find_port_for_earlycon(void)
 {
        struct earlycon_device *device = early_device;
index 73bf1e21aae042275088173ed9caa9ae68136947..4f27f788ac6f4008244b022f1aed180c0f8b3736 100644 (file)
@@ -23,7 +23,7 @@
 #include <asm/serial.h>
 
 static struct console early_con = {
-       .name =         "earlycon",
+       .name =         "uart", /* 8250 console switch requires this name */
        .flags =        CON_PRINTBUFFER | CON_BOOT,
        .index =        -1,
 };
index 1f1f61d43a163ef34c048495b1ad025ec7cc9c45..ea915c6a41b4881f4e99789146af681a48af51a5 100644 (file)
@@ -327,7 +327,7 @@ static inline void serial_out(struct uart_rk_port *up, int offset, unsigned char
        dwapb_save_out_value(up, offset, value);\r
        __raw_writel(value, up->port.membase + (offset << 2));\r
        if (offset != UART_TX)\r
-               dsb();\r
+               dsb(sy);\r
        dwapb_check_clear_ier(up, offset);\r
 }\r
 \r
index 7855f3a4ad05ceeda9f96068b45b37e632dda7d1..57977ec53a7a0c782ae120b4c94f5ef71ac2975b 100644 (file)
@@ -244,6 +244,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
                /*
                 * Turn off DTR and RTS early.
                 */
+               if (uart_console(uport) && tty)
+                       uport->cons->cflag = tty->termios.c_cflag;
+
                if (!tty || (tty->termios.c_cflag & HUPCL))
                        uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
 
@@ -359,7 +362,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
                 * The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
                 * Die! Die! Die!
                 */
-               if (baud == 38400)
+               if (try == 0 && baud == 38400)
                        baud = altbaud;
 
                /*
index a422c8b55a47b427b34ec09a3bda5b563ea6d8a0..aa53fee1df63c870afb5d407ef47fac42c609e56 100644 (file)
@@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *up,
            (up->port.line == up->port.cons->index))
                saw_console_brk = 1;
 
+       if (count == 0) {
+               if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) {
+                       stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR |
+                                            SAB82532_ISR0_FERR);
+                       up->port.icount.brk++;
+                       uart_handle_break(&up->port);
+               }
+       }
+
        for (i = 0; i < count; i++) {
                unsigned char ch = buf[i], flag;
 
index 3723c0ebb3161b6ae03560c44742a259de4f31eb..d35afccdb6c9ea18d0f6fef5f7bb50ee68f3cf84 100644 (file)
@@ -1698,6 +1698,7 @@ int tty_release(struct inode *inode, struct file *filp)
        int     pty_master, tty_closing, o_tty_closing, do_sleep;
        int     idx;
        char    buf[64];
+       long    timeout = 0;
 
        if (tty_paranoia_check(tty, inode, __func__))
                return 0;
@@ -1782,7 +1783,11 @@ int tty_release(struct inode *inode, struct file *filp)
                                __func__, tty_name(tty, buf));
                tty_unlock_pair(tty, o_tty);
                mutex_unlock(&tty_mutex);
-               schedule();
+               schedule_timeout_killable(timeout);
+               if (timeout < 120 * HZ)
+                       timeout = 2 * timeout + 1;
+               else
+                       timeout = MAX_SCHEDULE_TIMEOUT;
        }
 
        /*
index fbf3f11aed2c6c2914e8c7c0ea9fdf2f2e1465a1..1e71f918eb9fbbfbdf979826a4d01f022d0f8101 100644 (file)
@@ -883,11 +883,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
        /* FIXME: Needs to clear unsupported bits in the termios */
        acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
 
-       if (!newline.dwDTERate) {
+       if (C_BAUD(tty) == B0) {
                newline.dwDTERate = acm->line.dwDTERate;
                newctrl &= ~ACM_CTRL_DTR;
-       } else
+       } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
                newctrl |=  ACM_CTRL_DTR;
+       }
 
        if (newctrl != acm->ctrlout)
                acm_set_control(acm, acm->ctrlout = newctrl);
@@ -1588,6 +1589,7 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
        .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
        },
+       { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
        { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
        },
        /* Motorola H24 HSPA module: */
index 01a518b927ea6cb94bf5dd11c3cf1a87c89bc1a4..b62703b89f7db2d7acd6c94517ced237dea9faae 100755 (executable)
@@ -1951,6 +1951,8 @@ int usb_alloc_streams(struct usb_interface *interface,
                return -EINVAL;
        if (dev->speed != USB_SPEED_SUPER)
                return -EINVAL;
+       if (dev->state < USB_STATE_CONFIGURED)
+               return -ENODEV;
 
        /* Streams only apply to bulk endpoints. */
        for (i = 0; i < num_eps; i++)
index eff3ee6a62e07f36c0e3c965440f20ecde646811..2260781b50af26e5be83c84b7f6f72aa415a939f 100755 (executable)
@@ -887,6 +887,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
        if (!hub_is_superspeed(hub->hdev))
                return -EINVAL;
 
+       ret = hub_port_status(hub, port1, &portstatus, &portchange);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
+        * Controller [1022:7814] will have spurious result making the following
+        * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
+        * as high-speed device if we set the usb 3.0 port link state to
+        * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
+        * check the state here to avoid the bug.
+        */
+       if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                               USB_SS_PORT_LS_RX_DETECT) {
+               dev_dbg(&hub->ports[port1 - 1]->dev,
+                        "Not disabling port; link state is RxDetect\n");
+               return ret;
+       }
+
        ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
        if (ret)
                return ret;
@@ -1146,7 +1165,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
                        /* Tell khubd to disconnect the device or
                         * check for a new connection
                         */
-                       if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
+                       if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
+                           (portstatus & USB_PORT_STAT_OVERCURRENT))
                                set_bit(port1, hub->change_bits);
 
                } else if (portstatus & USB_PORT_STAT_ENABLE) {
@@ -1681,8 +1701,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
         * - Change autosuspend delay of hub can avoid unnecessary auto
         *   suspend timer for hub, also may decrease power consumption
         *   of USB bus.
+        *
+        * - If user has indicated to prevent autosuspend by passing
+        *   usbcore.autosuspend = -1 then keep autosuspend disabled.
         */
-       pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
+#ifdef CONFIG_PM_RUNTIME
+       if (hdev->dev.power.autosuspend_delay >= 0)
+               pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
+#endif
 
        /*
         * Hubs have proper suspend/resume support, except for root hubs
@@ -1929,8 +1955,10 @@ void usb_set_device_state(struct usb_device *udev,
                                        || new_state == USB_STATE_SUSPENDED)
                                ;       /* No change to wakeup settings */
                        else if (new_state == USB_STATE_CONFIGURED)
-                               wakeup = udev->actconfig->desc.bmAttributes
-                                        & USB_CONFIG_ATT_WAKEUP;
+                               wakeup = (udev->quirks &
+                                       USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 :
+                                       udev->actconfig->desc.bmAttributes &
+                                       USB_CONFIG_ATT_WAKEUP;
                        else
                                wakeup = 0;
                }
@@ -3150,6 +3178,43 @@ static int finish_port_resume(struct usb_device *udev)
        return status;
 }
 
+/*
+ * There are some SS USB devices which take longer time for link training.
+ * XHCI specs 4.19.4 says that when Link training is successful, port
+ * sets CSC bit to 1. So if SW reads port status before successful link
+ * training, then it will not find device to be present.
+ * USB Analyzer log with such buggy devices show that in some cases
+ * device switch on the RX termination after long delay of host enabling
+ * the VBUS. In few other cases it has been seen that device fails to
+ * negotiate link training in first attempt. It has been
+ * reported till now that few devices take as long as 2000 ms to train
+ * the link after host enabling its VBUS and termination. Following
+ * routine implements a 2000 ms timeout for link training. If in a case
+ * link trains before timeout, loop will exit earlier.
+ *
+ * FIXME: If a device was connected before suspend, but was removed
+ * while system was asleep, then the loop in the following routine will
+ * only exit at timeout.
+ *
+ * This routine should only be called when persist is enabled for a SS
+ * device.
+ */
+static int wait_for_ss_port_enable(struct usb_device *udev,
+               struct usb_hub *hub, int *port1,
+               u16 *portchange, u16 *portstatus)
+{
+       int status = 0, delay_ms = 0;
+
+       while (delay_ms < 2000) {
+               if (status || *portstatus & USB_PORT_STAT_CONNECTION)
+                       break;
+               msleep(20);
+               delay_ms += 20;
+               status = hub_port_status(hub, *port1, portstatus, portchange);
+       }
+       return status;
+}
+
 /*
  * usb_port_resume - re-activate a suspended usb device's upstream port
  * @udev: device to re-activate, not a root hub
@@ -3252,6 +3317,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
 
        clear_bit(port1, hub->busy_bits);
 
+       if (udev->persist_enabled && hub_is_superspeed(hub->hdev))
+               status = wait_for_ss_port_enable(udev, hub, &port1, &portchange,
+                               &portstatus);
+
        status = check_port_resume_type(udev,
                        hub, port1, status, portchange, portstatus);
        if (status == 0)
@@ -4646,9 +4715,10 @@ static void hub_events(void)
 
                hub = list_entry(tmp, struct usb_hub, event_list);
                kref_get(&hub->kref);
+               hdev = hub->hdev;
+               usb_get_dev(hdev);
                spin_unlock_irq(&hub_event_lock);
 
-               hdev = hub->hdev;
                hub_dev = hub->intfdev;
                intf = to_usb_interface(hub_dev);
                dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
@@ -4863,6 +4933,7 @@ static void hub_events(void)
                usb_autopm_put_interface(intf);
  loop_disconnected:
                usb_unlock_device(hdev);
+               usb_put_dev(hdev);
                kref_put(&hub->kref, hub_release);
 
         } /* end while (1) */
index 1053eb651b2f945d2647715a3990a51138fcb0e0..a301b3fa622b6a3e7502b032f5897f1704c8f2bb 100644 (file)
@@ -162,6 +162,10 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
          .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* ASUS Base Station(T100) */
+       { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
        { }  /* terminating entry must be last */
 };
 
index 358375e0b291098d8be40d65bdfbbff7f97f0826..1d386030d3c41a84ce32021535c6e0fde1c31cc1 100644 (file)
@@ -603,12 +603,6 @@ static int dwc3_remove(struct platform_device *pdev)
 {
        struct dwc3     *dwc = platform_get_drvdata(pdev);
 
-       usb_phy_set_suspend(dwc->usb2_phy, 1);
-       usb_phy_set_suspend(dwc->usb3_phy, 1);
-
-       pm_runtime_put(&pdev->dev);
-       pm_runtime_disable(&pdev->dev);
-
        dwc3_debugfs_exit(dwc);
 
        switch (dwc->mode) {
@@ -629,8 +623,15 @@ static int dwc3_remove(struct platform_device *pdev)
 
        dwc3_event_buffers_cleanup(dwc);
        dwc3_free_event_buffers(dwc);
+
+       usb_phy_set_suspend(dwc->usb2_phy, 1);
+       usb_phy_set_suspend(dwc->usb3_phy, 1);
+
        dwc3_core_exit(dwc);
 
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
        return 0;
 }
 
index 34638b92500d0606e1718a3e6ae43a7d3d2d0b46..cb5f8c44eb3a75106e5e94996ee98a5a5eaf9aee 100644 (file)
@@ -395,9 +395,9 @@ static int dwc3_omap_remove(struct platform_device *pdev)
        struct dwc3_omap        *omap = platform_get_drvdata(pdev);
 
        dwc3_omap_disable_irqs(omap);
+       device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
 
        return 0;
 }
index 3cea676ba901f95cb292afd5a0cf9f6980df9bdc..6cd418f6ac071a12305232673ba335e57598dfcc 100644 (file)
@@ -270,7 +270,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
 
        /* stall is always issued on EP0 */
        dep = dwc->eps[0];
-       __dwc3_gadget_ep_set_halt(dep, 1);
+       __dwc3_gadget_ep_set_halt(dep, 1, false);
        dep->flags = DWC3_EP_ENABLED;
        dwc->delayed_status = false;
 
@@ -480,7 +480,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
                                return -EINVAL;
                        if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
                                break;
-                       ret = __dwc3_gadget_ep_set_halt(dep, set);
+                       ret = __dwc3_gadget_ep_set_halt(dep, set, true);
                        if (ret)
                                return -EINVAL;
                        break;
index d868b62c1a16586355197dc1f9c7a0191e7fc264..8f8e75e392de0efc47caf4fb401351269b1bac42 100644 (file)
@@ -550,12 +550,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
                if (!usb_endpoint_xfer_isoc(desc))
                        return 0;
 
-               memset(&trb_link, 0, sizeof(trb_link));
-
                /* Link TRB for ISOC. The HWO bit is never reset */
                trb_st_hw = &dep->trb_pool[0];
 
                trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
+               memset(trb_link, 0, sizeof(*trb_link));
 
                trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
                trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
@@ -606,7 +605,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
 
        /* make sure HW endpoint isn't stalled */
        if (dep->flags & DWC3_EP_STALL)
-               __dwc3_gadget_ep_set_halt(dep, 0);
+               __dwc3_gadget_ep_set_halt(dep, 0, false);
 
        reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
        reg &= ~DWC3_DALEPENA_EP(dep->number);
@@ -1206,7 +1205,7 @@ out0:
        return ret;
 }
 
-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
 {
        struct dwc3_gadget_ep_cmd_params        params;
        struct dwc3                             *dwc = dep->dwc;
@@ -1215,6 +1214,14 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
        memset(&params, 0x00, sizeof(params));
 
        if (value) {
+               if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
+                               (!list_empty(&dep->req_queued) ||
+                                !list_empty(&dep->request_list)))) {
+                       dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
+                                       dep->name);
+                       return -EAGAIN;
+               }
+
                ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
                        DWC3_DEPCMD_SETSTALL, &params);
                if (ret)
@@ -1254,7 +1261,7 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
                goto out;
        }
 
-       ret = __dwc3_gadget_ep_set_halt(dep, value);
+       ret = __dwc3_gadget_ep_set_halt(dep, value, false);
 out:
        spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -1274,7 +1281,7 @@ static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
        if (dep->number == 0 || dep->number == 1)
                return dwc3_gadget_ep0_set_halt(ep, 1);
        else
-               return dwc3_gadget_ep_set_halt(ep, 1);
+               return __dwc3_gadget_ep_set_halt(dep, 1, false);
 }
 
 /* -------------------------------------------------------------------------- */
index 99e6d7248820c790aa968c086ac6ef1f70e91c03..b3f25c302e3548b98fbf5a22c4397e4bf2a423db 100644 (file)
@@ -114,7 +114,7 @@ void dwc3_ep0_out_start(struct dwc3 *dwc);
 int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
                gfp_t gfp_flags);
-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
+int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
                unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param);
index b9da543e8004ea3a63ba0e2ec5e73d044bbc5499..f729139257d69511b09f1a4a687771ae165566b3 100755 (executable)
@@ -594,7 +594,7 @@ uint64_t DWC_READ_REG64(volatile uint64_t *reg)
 void DWC_WRITE_REG32(volatile uint32_t *reg, uint32_t value)
 {
        writel_relaxed(value, reg);
-       dsb();
+       dsb(sy);
 }
 
 #if 0
@@ -606,7 +606,7 @@ void DWC_WRITE_REG64(volatile uint64_t *reg, uint64_t value)
 void DWC_MODIFY_REG32(volatile uint32_t *reg, uint32_t clear_mask, uint32_t set_mask)
 {
        writel_relaxed((readl_relaxed(reg) & ~clear_mask) | set_mask, reg);
-       dsb();
+       dsb(sy);
 }
 
 #if 0
index 62bacae0f17a6930c3fc88544c9b45502ed835ee..e9105afd989acb2c0e3f45ae8c5a259d108e1c70 100755 (executable)
@@ -61,7 +61,6 @@ extern "C" {
 # endif
 # include <linux/errno.h>
 # include <stdarg.h>
-# include <asm/system.h>
 #endif
 
 #if defined(DWC_FREEBSD) || defined(DWC_NETBSD)
index 53e50b5e86128a70d07c044581b3d1a2ae5995f0..a401acdceb4dc0f98dc7cd6c016aa95a7b41ec14 100644 (file)
@@ -662,10 +662,17 @@ static ssize_t acc_write(struct file *fp, const char __user *buf,
                        break;
                }
 
-               if (count > BULK_BUFFER_SIZE)
+               if (count > BULK_BUFFER_SIZE) {
                        xfer = BULK_BUFFER_SIZE;
-               else
+                       /* ZLP, They will be more TX requests so not yet. */
+                       req->zero = 0;
+               } else {
                        xfer = count;
+                       /* If the data length is a multple of the
+                        * maxpacket size then send a zero length packet(ZLP).
+                       */
+                       req->zero = ((xfer % dev->ep_in->maxpacket) == 0);
+               }
                if (copy_from_user(req->buf, buf, xfer)) {
                        r = -EFAULT;
                        break;
index ab1065afbbd0c0cac55788dad3b805f9a240b2a9..3384486c288476ae7f538b0bfe41f346ff4ee414 100644 (file)
@@ -430,11 +430,12 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
                if (acm->notify->driver_data) {
                        VDBG(cdev, "reset acm control interface %d\n", intf);
                        usb_ep_disable(acm->notify);
-               } else {
-                       VDBG(cdev, "init acm ctrl interface %d\n", intf);
+               }
+
+               if (!acm->notify->desc)
                        if (config_ep_by_speed(cdev->gadget, f, acm->notify))
                                return -EINVAL;
-               }
+
                usb_ep_enable(acm->notify);
                acm->notify->driver_data = acm;
 
index 56dcf217cfe5647e592304ef94f8cc70890f0243..21ced13c83d801b39c3bbb564bb5005706d3f42c 100644 (file)
@@ -24,7 +24,7 @@
 #define SAMPLE_RATE 44100
 #define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
 
-#define IN_EP_MAX_PACKET_SIZE 384
+#define IN_EP_MAX_PACKET_SIZE 256
 
 /* Number of requests to allocate */
 #define IN_EP_REQ_COUNT 4
@@ -580,12 +580,18 @@ audio_bind(struct usb_configuration *c, struct usb_function *f)
                goto fail;
        ac_interface_desc.bInterfaceNumber = status;
 
+       /* AUDIO_AC_INTERFACE */
+       ac_header_desc.baInterfaceNr[0] = status;
+
        status = usb_interface_id(c, f);
        if (status < 0)
                goto fail;
        as_interface_alt_0_desc.bInterfaceNumber = status;
        as_interface_alt_1_desc.bInterfaceNumber = status;
 
+       /* AUDIO_AS_INTERFACE */
+       ac_header_desc.baInterfaceNr[1] = status;
+
        status = -ENODEV;
 
        /* allocate our endpoint */
index 21c5ee2482d6c3f3181fa14a75f8c375d4f971d5..5c274f1d7b7a325e2ff0a88a5c70ac1f2db8d717 100644 (file)
@@ -25,7 +25,6 @@
 #include "u_ether.h"
 #include "rndis.h"
 
-
 /*
  * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
  * been promoted instead of the standard CDC Ethernet.  The published RNDIS
  *   - MS-Windows drivers sometimes emit undocumented requests.
  */
 
+static unsigned int rndis_dl_max_pkt_per_xfer = 3;
+module_param(rndis_dl_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_dl_max_pkt_per_xfer,
+       "Maximum packets per transfer for DL aggregation");
+
+static unsigned int rndis_ul_max_pkt_per_xfer = 3;
+module_param(rndis_ul_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_ul_max_pkt_per_xfer,
+       "Maximum packets per transfer for UL aggregation");
+
 struct f_rndis {
        struct gether                   port;
        u8                              ctrl_id, data_id;
@@ -448,6 +457,7 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
 {
        struct f_rndis                  *rndis = req->context;
        int                             status;
+       rndis_init_msg_type             *buf;
 
        /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
 //     spin_lock(&dev->lock);
@@ -455,6 +465,21 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
        if (status < 0)
                pr_err("RNDIS command error %d, %d/%d\n",
                        status, req->actual, req->length);
+
+       buf = (rndis_init_msg_type *)req->buf;
+
+       if (buf->MessageType == RNDIS_MSG_INIT) {
+               if (buf->MaxTransferSize > 2048)
+                       rndis->port.multi_pkt_xfer = 1;
+               else
+                       rndis->port.multi_pkt_xfer = 0;
+               pr_info("%s: MaxTransferSize: %d : Multi_pkt_txr: %s\n",
+                               __func__, buf->MaxTransferSize,
+                               rndis->port.multi_pkt_xfer ? "enabled" :
+                                                           "disabled");
+               if (rndis_dl_max_pkt_per_xfer <= 1)
+                       rndis->port.multi_pkt_xfer = 0;
+       }
 //     spin_unlock(&dev->lock);
 }
 
@@ -748,6 +773,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
 
        rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
        rndis_set_host_mac(rndis->config, rndis->ethaddr);
+       rndis_set_max_pkt_xfer(rndis->config, rndis_ul_max_pkt_per_xfer);
 
        if (rndis->manufacturer && rndis->vendorID &&
                        rndis_set_param_vendor(rndis->config, rndis->vendorID,
@@ -854,6 +880,8 @@ rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
        rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
        rndis->port.wrap = rndis_add_header;
        rndis->port.unwrap = rndis_rm_hdr;
+       rndis->port.ul_max_pkts_per_xfer = rndis_ul_max_pkt_per_xfer;
+       rndis->port.dl_max_pkts_per_xfer = rndis_dl_max_pkt_per_xfer;
 
        rndis->port.func.name = "rndis";
        rndis->port.func.strings = rndis_strings;
index 693f0c24d51608955055a558fcc9d2e440495da3..cb2767df3fbab06cdd1120ef1a46d6370723c37a 100644 (file)
@@ -59,6 +59,16 @@ MODULE_PARM_DESC (rndis_debug, "enable debugging");
 
 #define RNDIS_MAX_CONFIGS      1
 
+int rndis_ul_max_pkt_per_xfer_rcvd;
+module_param(rndis_ul_max_pkt_per_xfer_rcvd, int, S_IRUGO);
+MODULE_PARM_DESC(rndis_ul_max_pkt_per_xfer_rcvd,
+               "Max num of REMOTE_NDIS_PACKET_MSGs received in a single transfer");
+
+int rndis_ul_max_xfer_size_rcvd;
+module_param(rndis_ul_max_xfer_size_rcvd, int, S_IRUGO);
+MODULE_PARM_DESC(rndis_ul_max_xfer_size_rcvd,
+               "Max size of bus transfer received");
+
 
 static rndis_params rndis_per_dev_params[RNDIS_MAX_CONFIGS];
 
@@ -585,12 +595,12 @@ static int rndis_init_response(int configNr, rndis_init_msg_type *buf)
        resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
        resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
        resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
-       resp->MaxPacketsPerTransfer = cpu_to_le32(1);
-       resp->MaxTransferSize = cpu_to_le32(
-                 params->dev->mtu
+       resp->MaxPacketsPerTransfer = cpu_to_le32(params->max_pkt_per_xfer);
+       resp->MaxTransferSize = cpu_to_le32(params->max_pkt_per_xfer *
+               (params->dev->mtu
                + sizeof(struct ethhdr)
                + sizeof(struct rndis_packet_msg_type)
-               + 22);
+               + 22));
        resp->PacketAlignmentFactor = cpu_to_le32(0);
        resp->AFListOffset = cpu_to_le32(0);
        resp->AFListSize = cpu_to_le32(0);
@@ -686,6 +696,12 @@ static int rndis_reset_response(int configNr, rndis_reset_msg_type *buf)
        rndis_reset_cmplt_type *resp;
        rndis_resp_t *r;
        struct rndis_params *params = rndis_per_dev_params + configNr;
+       u32 length;
+       u8 *xbuf;
+
+       /* drain the response queue */
+       while ((xbuf = rndis_get_next_response(configNr, &length)))
+               rndis_free_response(configNr, xbuf);
 
        r = rndis_add_response(configNr, sizeof(rndis_reset_cmplt_type));
        if (!r)
@@ -910,6 +926,8 @@ int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter)
        rndis_per_dev_params[configNr].dev = dev;
        rndis_per_dev_params[configNr].filter = cdc_filter;
 
+       rndis_ul_max_xfer_size_rcvd = 0;
+       rndis_ul_max_pkt_per_xfer_rcvd = 0;
        return 0;
 }
 
@@ -936,6 +954,13 @@ int rndis_set_param_medium(u8 configNr, u32 medium, u32 speed)
        return 0;
 }
 
+void rndis_set_max_pkt_xfer(u8 configNr, u8 max_pkt_per_xfer)
+{
+       pr_debug("%s:\n", __func__);
+
+       rndis_per_dev_params[configNr].max_pkt_per_xfer = max_pkt_per_xfer;
+}
+
 void rndis_add_hdr(struct sk_buff *skb)
 {
        struct rndis_packet_msg_type *header;
@@ -1008,23 +1033,73 @@ int rndis_rm_hdr(struct gether *port,
                        struct sk_buff *skb,
                        struct sk_buff_head *list)
 {
-       /* tmp points to a struct rndis_packet_msg_type */
-       __le32 *tmp = (void *)skb->data;
+       int num_pkts = 1;
 
-       /* MessageType, MessageLength */
-       if (cpu_to_le32(RNDIS_MSG_PACKET)
-                       != get_unaligned(tmp++)) {
-               dev_kfree_skb_any(skb);
-               return -EINVAL;
-       }
-       tmp++;
+       if (skb->len > rndis_ul_max_xfer_size_rcvd)
+               rndis_ul_max_xfer_size_rcvd = skb->len;
+
+       while (skb->len) {
+               struct rndis_packet_msg_type *hdr;
+               struct sk_buff          *skb2;
+               u32             msg_len, data_offset, data_len;
+
+               /* some rndis hosts send extra byte to avoid zlp, ignore it */
+               if (skb->len == 1) {
+                       dev_kfree_skb_any(skb);
+                       return 0;
+               }
+
+               if (skb->len < sizeof *hdr) {
+                       pr_err("invalid rndis pkt: skblen:%u hdr_len:%u",
+                                       skb->len, sizeof *hdr);
+                       dev_kfree_skb_any(skb);
+                       return -EINVAL;
+               }
+
+               hdr = (void *)skb->data;
+               msg_len = le32_to_cpu(hdr->MessageLength);
+               data_offset = le32_to_cpu(hdr->DataOffset);
+               data_len = le32_to_cpu(hdr->DataLength);
+
+               if (skb->len < msg_len ||
+                               ((data_offset + data_len + 8) > msg_len)) {
+                       pr_err("invalid rndis message: %d/%d/%d/%d, len:%d\n",
+                                       le32_to_cpu(hdr->MessageType),
+                                       msg_len, data_offset, data_len, skb->len);
+                       dev_kfree_skb_any(skb);
+                       return -EOVERFLOW;
+               }
+               if (le32_to_cpu(hdr->MessageType) != RNDIS_MSG_PACKET) {
+                       pr_err("invalid rndis message: %d/%d/%d/%d, len:%d\n",
+                                       le32_to_cpu(hdr->MessageType),
+                                       msg_len, data_offset, data_len, skb->len);
+                       dev_kfree_skb_any(skb);
+                       return -EINVAL;
+               }
+
+               skb_pull(skb, data_offset + 8);
 
-       /* DataOffset, DataLength */
-       if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
-               dev_kfree_skb_any(skb);
-               return -EOVERFLOW;
+               if (msg_len == skb->len) {
+                       skb_trim(skb, data_len);
+                       break;
+               }
+
+               skb2 = skb_clone(skb, GFP_ATOMIC);
+               if (!skb2) {
+                       pr_err("%s:skb clone failed\n", __func__);
+                       dev_kfree_skb_any(skb);
+                       return -ENOMEM;
+               }
+
+               skb_pull(skb, msg_len - sizeof *hdr);
+               skb_trim(skb2, data_len);
+               skb_queue_tail(list, skb2);
+
+               num_pkts++;
        }
-       skb_trim(skb, get_unaligned_le32(tmp++));
+
+       if (num_pkts > rndis_ul_max_pkt_per_xfer_rcvd)
+               rndis_ul_max_pkt_per_xfer_rcvd = num_pkts;
 
        skb_queue_tail(list, skb);
        return 0;
index 0647f2f34e898770dac9c6021f0207e2ce6e3c28..12045b31a311e299a6673577638fc3842211c0bc 100644 (file)
@@ -189,6 +189,7 @@ typedef struct rndis_params
        struct net_device       *dev;
 
        u32                     vendorID;
+       u8                      max_pkt_per_xfer;
        const char              *vendorDescr;
        void                    (*resp_avail)(void *v);
        void                    *v;
@@ -204,6 +205,7 @@ int  rndis_set_param_dev (u8 configNr, struct net_device *dev,
 int  rndis_set_param_vendor (u8 configNr, u32 vendorID,
                            const char *vendorDescr);
 int  rndis_set_param_medium (u8 configNr, u32 medium, u32 speed);
+void rndis_set_max_pkt_xfer(u8 configNr, u8 max_pkt_per_xfer);
 void rndis_add_hdr (struct sk_buff *skb);
 int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
                        struct sk_buff_head *list);
index 4b76124ce96b8dfaeab5ab0f0a89bf4f5abf15a6..14f587efc0f6eb66c97fbfd924c86a9d17adac18 100644 (file)
@@ -48,6 +48,8 @@
 
 #define UETH__VERSION  "29-May-2008"
 
+static struct workqueue_struct *uether_wq;
+
 struct eth_dev {
        /* lock is held while accessing port_usb
         */
@@ -59,17 +61,25 @@ struct eth_dev {
 
        spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
        struct list_head        tx_reqs, rx_reqs;
-       atomic_t                tx_qlen;
+       unsigned                tx_qlen;
+/* Minimum number of TX USB request queued to UDC */
+#define TX_REQ_THRESHOLD       5
+       int                     no_tx_req_used;
+       int                     tx_skb_hold_count;
+       u32                     tx_req_bufsize;
 
        struct sk_buff_head     rx_frames;
 
        unsigned                header_len;
+       unsigned                ul_max_pkts_per_xfer;
+       unsigned                dl_max_pkts_per_xfer;
        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
        int                     (*unwrap)(struct gether *,
                                                struct sk_buff *skb,
                                                struct sk_buff_head *list);
 
        struct work_struct      work;
+       struct work_struct      rx_work;
 
        unsigned long           todo;
 #define        WORK_RX_MEMORY          0
@@ -84,7 +94,7 @@ struct eth_dev {
 
 #define DEFAULT_QLEN   2       /* double buffering by default */
 
-static unsigned qmult = 5;
+static unsigned qmult = 10;
 module_param(qmult, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
 
@@ -226,9 +236,13 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
        size += out->maxpacket - 1;
        size -= size % out->maxpacket;
 
+       if (dev->ul_max_pkts_per_xfer)
+               size *= dev->ul_max_pkts_per_xfer;
+
        if (dev->port_usb->is_fixed)
                size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 
+       DBG(dev, "%s: size: %d\n", __func__, size);
        skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
        if (skb == NULL) {
                DBG(dev, "no rx skb\n");
@@ -254,18 +268,16 @@ enomem:
                DBG(dev, "rx submit --> %d\n", retval);
                if (skb)
                        dev_kfree_skb_any(skb);
-               spin_lock_irqsave(&dev->req_lock, flags);
-               list_add(&req->list, &dev->rx_reqs);
-               spin_unlock_irqrestore(&dev->req_lock, flags);
        }
        return retval;
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-       struct sk_buff  *skb = req->context, *skb2;
+       struct sk_buff  *skb = req->context;
        struct eth_dev  *dev = ep->driver_data;
        int             status = req->status;
+       bool            queue = 0;
 
        switch (status) {
 
@@ -281,6 +293,10 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                                status = dev->unwrap(dev->port_usb,
                                                        skb,
                                                        &dev->rx_frames);
+                               if (status == -EINVAL)
+                                       dev->net->stats.rx_errors++;
+                               else if (status == -EOVERFLOW)
+                                       dev->net->stats.rx_over_errors++;
                        } else {
                                dev_kfree_skb_any(skb);
                                status = -ENOTCONN;
@@ -289,30 +305,9 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                } else {
                        skb_queue_tail(&dev->rx_frames, skb);
                }
-               skb = NULL;
-
-               skb2 = skb_dequeue(&dev->rx_frames);
-               while (skb2) {
-                       if (status < 0
-                                       || ETH_HLEN > skb2->len
-                                       || skb2->len > VLAN_ETH_FRAME_LEN) {
-                               dev->net->stats.rx_errors++;
-                               dev->net->stats.rx_length_errors++;
-                               DBG(dev, "rx length %d\n", skb2->len);
-                               dev_kfree_skb_any(skb2);
-                               goto next_frame;
-                       }
-                       skb2->protocol = eth_type_trans(skb2, dev->net);
-                       dev->net->stats.rx_packets++;
-                       dev->net->stats.rx_bytes += skb2->len;
 
-                       /* no buffer copies needed, unless hardware can't
-                        * use skb buffers.
-                        */
-                       status = netif_rx(skb2);
-next_frame:
-                       skb2 = skb_dequeue(&dev->rx_frames);
-               }
+               if (!status)
+                       queue = 1;
                break;
 
        /* software-driven interface shutdown */
@@ -335,22 +330,20 @@ quiesce:
                /* FALLTHROUGH */
 
        default:
+               queue = 1;
+               dev_kfree_skb_any(skb);
                dev->net->stats.rx_errors++;
                DBG(dev, "rx status %d\n", status);
                break;
        }
 
-       if (skb)
-               dev_kfree_skb_any(skb);
-       if (!netif_running(dev->net)) {
 clean:
-               spin_lock(&dev->req_lock);
-               list_add(&req->list, &dev->rx_reqs);
-               spin_unlock(&dev->req_lock);
-               req = NULL;
-       }
-       if (req)
-               rx_submit(dev, req, GFP_ATOMIC);
+       spin_lock(&dev->req_lock);
+       list_add(&req->list, &dev->rx_reqs);
+       spin_unlock(&dev->req_lock);
+
+       if (queue)
+               queue_work(uether_wq, &dev->rx_work);
 }
 
 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -415,16 +408,24 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 {
        struct usb_request      *req;
        unsigned long           flags;
+       int                     req_cnt = 0;
 
        /* fill unused rxq slots with some skb */
        spin_lock_irqsave(&dev->req_lock, flags);
        while (!list_empty(&dev->rx_reqs)) {
+               /* break the nexus of continuous completion and re-submission*/
+               if (++req_cnt > qlen(dev->gadget))
+                       break;
+
                req = container_of(dev->rx_reqs.next,
                                struct usb_request, list);
                list_del_init(&req->list);
                spin_unlock_irqrestore(&dev->req_lock, flags);
 
                if (rx_submit(dev, req, gfp_flags) < 0) {
+                       spin_lock_irqsave(&dev->req_lock, flags);
+                       list_add(&req->list, &dev->rx_reqs);
+                       spin_unlock_irqrestore(&dev->req_lock, flags);
                        defer_kevent(dev, WORK_RX_MEMORY);
                        return;
                }
@@ -434,6 +435,36 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
        spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
+static void process_rx_w(struct work_struct *work)
+{
+       struct eth_dev  *dev = container_of(work, struct eth_dev, rx_work);
+       struct sk_buff  *skb;
+       int             status = 0;
+
+       if (!dev->port_usb)
+               return;
+
+       while ((skb = skb_dequeue(&dev->rx_frames))) {
+               if (status < 0
+                               || ETH_HLEN > skb->len
+                               || skb->len > ETH_FRAME_LEN) {
+                       dev->net->stats.rx_errors++;
+                       dev->net->stats.rx_length_errors++;
+                       DBG(dev, "rx length %d\n", skb->len);
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+               skb->protocol = eth_type_trans(skb, dev->net);
+               dev->net->stats.rx_packets++;
+               dev->net->stats.rx_bytes += skb->len;
+
+               status = netif_rx_ni(skb);
+       }
+
+       if (netif_running(dev->net))
+               rx_fill(dev, GFP_KERNEL);
+}
+
 static void eth_work(struct work_struct *work)
 {
        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
@@ -451,6 +482,11 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 {
        struct sk_buff  *skb = req->context;
        struct eth_dev  *dev = ep->driver_data;
+       struct net_device *net = dev->net;
+       struct usb_request *new_req;
+       struct usb_ep *in;
+       int length;
+       int retval;
 
        switch (req->status) {
        default:
@@ -461,16 +497,74 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
        case -ESHUTDOWN:                /* disconnect etc */
                break;
        case 0:
-               dev->net->stats.tx_bytes += skb->len;
+               if (!req->zero)
+                       dev->net->stats.tx_bytes += req->length-1;
+               else
+                       dev->net->stats.tx_bytes += req->length;
        }
        dev->net->stats.tx_packets++;
 
        spin_lock(&dev->req_lock);
-       list_add(&req->list, &dev->tx_reqs);
-       spin_unlock(&dev->req_lock);
-       dev_kfree_skb_any(skb);
+       list_add_tail(&req->list, &dev->tx_reqs);
+
+       if (dev->port_usb->multi_pkt_xfer) {
+               dev->no_tx_req_used--;
+               req->length = 0;
+               in = dev->port_usb->in_ep;
+
+               if (!list_empty(&dev->tx_reqs)) {
+                       new_req = container_of(dev->tx_reqs.next,
+                                       struct usb_request, list);
+                       list_del(&new_req->list);
+                       spin_unlock(&dev->req_lock);
+                       if (new_req->length > 0) {
+                               length = new_req->length;
+
+                               /* NCM requires no zlp if transfer is
+                                * dwNtbInMaxSize */
+                               if (dev->port_usb->is_fixed &&
+                                       length == dev->port_usb->fixed_in_len &&
+                                       (length % in->maxpacket) == 0)
+                                       new_req->zero = 0;
+                               else
+                                       new_req->zero = 1;
+
+                               /* use zlp framing on tx for strict CDC-Ether
+                                * conformance, though any robust network rx
+                                * path ignores extra padding. and some hardware
+                                * doesn't like to write zlps.
+                                */
+                               if (new_req->zero && !dev->zlp &&
+                                               (length % in->maxpacket) == 0) {
+                                       new_req->zero = 0;
+                                       length++;
+                               }
+
+                               new_req->length = length;
+                               retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
+                               switch (retval) {
+                               default:
+                                       DBG(dev, "tx queue err %d\n", retval);
+                                       break;
+                               case 0:
+                                       spin_lock(&dev->req_lock);
+                                       dev->no_tx_req_used++;
+                                       spin_unlock(&dev->req_lock);
+                                       net->trans_start = jiffies;
+                               }
+                       } else {
+                               spin_lock(&dev->req_lock);
+                               list_add(&new_req->list, &dev->tx_reqs);
+                               spin_unlock(&dev->req_lock);
+                       }
+               } else {
+                       spin_unlock(&dev->req_lock);
+               }
+       } else {
+               spin_unlock(&dev->req_lock);
+               dev_kfree_skb_any(skb);
+       }
 
-       atomic_dec(&dev->tx_qlen);
        if (netif_carrier_ok(dev->net))
                netif_wake_queue(dev->net);
 }
@@ -480,6 +574,26 @@ static inline int is_promisc(u16 cdc_filter)
        return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 }
 
+static void alloc_tx_buffer(struct eth_dev *dev)
+{
+       struct list_head        *act;
+       struct usb_request      *req;
+
+       dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
+                               (dev->net->mtu
+                               + sizeof(struct ethhdr)
+                               /* size of rndis_packet_msg_type */
+                               + 44
+                               + 22));
+
+       list_for_each(act, &dev->tx_reqs) {
+               req = container_of(act, struct usb_request, list);
+               if (!req->buf)
+                       req->buf = kmalloc(dev->tx_req_bufsize,
+                                               GFP_ATOMIC);
+       }
+}
+
 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                                        struct net_device *net)
 {
@@ -506,6 +620,10 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
+       /* Allocate memory for tx_reqs to support multi packet transfer */
+       if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
+               alloc_tx_buffer(dev);
+
        /* apply outgoing CDC or RNDIS filters */
        if (!is_promisc(cdc_filter)) {
                u8              *dest = skb->data;
@@ -560,11 +678,39 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                spin_unlock_irqrestore(&dev->lock, flags);
                if (!skb)
                        goto drop;
+       }
+
+       spin_lock_irqsave(&dev->req_lock, flags);
+       dev->tx_skb_hold_count++;
+       spin_unlock_irqrestore(&dev->req_lock, flags);
+
+       if (dev->port_usb->multi_pkt_xfer) {
+               memcpy(req->buf + req->length, skb->data, skb->len);
+               req->length = req->length + skb->len;
+               length = req->length;
+               dev_kfree_skb_any(skb);
+
+               spin_lock_irqsave(&dev->req_lock, flags);
+               if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
+                       if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
+                               list_add(&req->list, &dev->tx_reqs);
+                               spin_unlock_irqrestore(&dev->req_lock, flags);
+                               goto success;
+                       }
+               }
+
+               dev->no_tx_req_used++;
+               spin_unlock_irqrestore(&dev->req_lock, flags);
 
+               spin_lock_irqsave(&dev->lock, flags);
+               dev->tx_skb_hold_count = 0;
+               spin_unlock_irqrestore(&dev->lock, flags);
+       } else {
                length = skb->len;
+               req->buf = skb->data;
+               req->context = skb;
        }
-       req->buf = skb->data;
-       req->context = skb;
+
        req->complete = tx_complete;
 
        /* NCM requires no zlp if transfer is dwNtbInMaxSize */
@@ -579,17 +725,26 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
         * though any robust network rx path ignores extra padding.
         * and some hardware doesn't like to write zlps.
         */
-       if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+       if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
+               req->zero = 0;
                length++;
+       }
 
        req->length = length;
 
-       /* throttle high/super speed IRQ rate back slightly */
-       if (gadget_is_dualspeed(dev->gadget))
-               req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
-                                    dev->gadget->speed == USB_SPEED_SUPER)
-                       ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
-                       : 0;
+       /* throttle highspeed IRQ rate back slightly */
+       if (gadget_is_dualspeed(dev->gadget) &&
+                        (dev->gadget->speed == USB_SPEED_HIGH)) {
+               dev->tx_qlen++;
+               if (dev->tx_qlen == (qmult/2)) {
+                       req->no_interrupt = 0;
+                       dev->tx_qlen = 0;
+               } else {
+                       req->no_interrupt = 1;
+               }
+       } else {
+               req->no_interrupt = 0;
+       }
 
        retval = usb_ep_queue(in, req, GFP_ATOMIC);
        switch (retval) {
@@ -598,11 +753,11 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                break;
        case 0:
                net->trans_start = jiffies;
-               atomic_inc(&dev->tx_qlen);
        }
 
        if (retval) {
-               dev_kfree_skb_any(skb);
+               if (!dev->port_usb->multi_pkt_xfer)
+                       dev_kfree_skb_any(skb);
 drop:
                dev->net->stats.tx_dropped++;
                spin_lock_irqsave(&dev->req_lock, flags);
@@ -611,6 +766,7 @@ drop:
                list_add(&req->list, &dev->tx_reqs);
                spin_unlock_irqrestore(&dev->req_lock, flags);
        }
+success:
        return NETDEV_TX_OK;
 }
 
@@ -624,7 +780,7 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
        rx_fill(dev, gfp_flags);
 
        /* and open the tx floodgates */
-       atomic_set(&dev->tx_qlen, 0);
+       dev->tx_qlen = 0;
        netif_wake_queue(dev->net);
 }
 
@@ -697,6 +853,8 @@ static int eth_stop(struct net_device *net)
 
 /*-------------------------------------------------------------------------*/
 
+static u8 host_ethaddr[ETH_ALEN];
+
 /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
 static char *dev_addr;
 module_param(dev_addr, charp, S_IRUGO);
@@ -728,6 +886,17 @@ static int get_ether_addr(const char *str, u8 *dev_addr)
        return 1;
 }
 
+static int get_host_ether_addr(u8 *str, u8 *dev_addr)
+{
+       memcpy(dev_addr, str, ETH_ALEN);
+       if (is_valid_ether_addr(dev_addr))
+               return 0;
+
+       random_ether_addr(dev_addr);
+       memcpy(str, dev_addr, ETH_ALEN);
+       return 1;
+}
+
 static const struct net_device_ops eth_netdev_ops = {
        .ndo_open               = eth_open,
        .ndo_stop               = eth_stop,
@@ -770,6 +939,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
+       INIT_WORK(&dev->rx_work, process_rx_w);
        INIT_LIST_HEAD(&dev->tx_reqs);
        INIT_LIST_HEAD(&dev->rx_reqs);
 
@@ -782,9 +952,11 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
        if (get_ether_addr(dev_addr, net->dev_addr))
                dev_warn(&g->dev,
                        "using random %s ethernet address\n", "self");
-       if (get_ether_addr(host_addr, dev->host_mac))
-               dev_warn(&g->dev,
-                       "using random %s ethernet address\n", "host");
+
+       if (get_host_ether_addr(host_ethaddr, dev->host_mac))
+               dev_warn(&g->dev, "using random %s ethernet address\n", "host");
+       else
+               dev_warn(&g->dev, "using previous %s ethernet address\n", "host");
 
        if (ethaddr)
                memcpy(ethaddr, dev->host_mac, ETH_ALEN);
@@ -882,8 +1054,13 @@ struct net_device *gether_connect(struct gether *link)
                dev->header_len = link->header_len;
                dev->unwrap = link->unwrap;
                dev->wrap = link->wrap;
+               dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
+               dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
 
                spin_lock(&dev->lock);
+               dev->tx_skb_hold_count = 0;
+               dev->no_tx_req_used = 0;
+               dev->tx_req_bufsize = 0;
                dev->port_usb = link;
                if (netif_running(dev->net)) {
                        if (link->open)
@@ -927,6 +1104,7 @@ void gether_disconnect(struct gether *link)
 {
        struct eth_dev          *dev = link->ioport;
        struct usb_request      *req;
+       struct sk_buff          *skb;
 
        WARN_ON(!dev);
        if (!dev)
@@ -949,6 +1127,8 @@ void gether_disconnect(struct gether *link)
                list_del(&req->list);
 
                spin_unlock(&dev->req_lock);
+               if (link->multi_pkt_xfer)
+                       kfree(req->buf);
                usb_ep_free_request(link->in_ep, req);
                spin_lock(&dev->req_lock);
        }
@@ -968,6 +1148,12 @@ void gether_disconnect(struct gether *link)
                spin_lock(&dev->req_lock);
        }
        spin_unlock(&dev->req_lock);
+
+       spin_lock(&dev->rx_frames.lock);
+       while ((skb = __skb_dequeue(&dev->rx_frames)))
+               dev_kfree_skb_any(skb);
+       spin_unlock(&dev->rx_frames.lock);
+
        link->out_ep->driver_data = NULL;
        link->out_ep->desc = NULL;
 
@@ -980,3 +1166,23 @@ void gether_disconnect(struct gether *link)
        dev->port_usb = NULL;
        spin_unlock(&dev->lock);
 }
+
+static int __init gether_init(void)
+{
+       uether_wq  = create_singlethread_workqueue("uether");
+       if (!uether_wq) {
+               pr_err("%s: Unable to create workqueue: uether\n", __func__);
+               return -ENOMEM;
+       }
+       return 0;
+}
+module_init(gether_init);
+
+static void __exit gether_exit(void)
+{
+       destroy_workqueue(uether_wq);
+
+}
+module_exit(gether_exit);
+MODULE_DESCRIPTION("ethernet over USB driver");
+MODULE_LICENSE("GPL v2");
index 02522338a7081abad0f92c3506a84724f61df23b..67eda50ae995354cefe5be57bbea2054824a065c 100644 (file)
@@ -54,6 +54,9 @@ struct gether {
        bool                            is_fixed;
        u32                             fixed_out_len;
        u32                             fixed_in_len;
+       unsigned                ul_max_pkts_per_xfer;
+       unsigned                dl_max_pkts_per_xfer;
+       bool                            multi_pkt_xfer;
        struct sk_buff                  *(*wrap)(struct gether *port,
                                                struct sk_buff *skb);
        int                             (*unwrap)(struct gether *port,
index afe9b9e50cc44ec9a62cfa54264ff35285277eae..58a861395ea05d7c08a151400ed29fc7b56b678c 100644 (file)
@@ -447,6 +447,11 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
 {
        struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
 
+       if (!udc->driver) {
+               dev_err(dev, "soft-connect without a gadget driver\n");
+               return -EOPNOTSUPP;
+       }
+
        if (sysfs_streq(buf, "connect")) {
                usb_gadget_udc_start(udc->gadget, udc->driver);
                usb_gadget_connect(udc->gadget);
index 3ee3c7aa6e5b5a4ea2ba394171629e11cc2314e0..7724bab1828b51d214a6edc1cf5ac09ca65c06b2 100644 (file)
@@ -58,12 +58,12 @@ struct h20ahb_hcd {
 
 static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
 {
-       __raw_writel(val, base + reg);
+       writel_relaxed(val, base + reg);
 }
 
 static inline u32 ehci_read(void __iomem *base, u32 reg)
 {
-       return __raw_readl(base + reg);
+       return readl_relaxed(base + reg);
 }
 
 /* configure so an HC device and id are always provided */
index 248bd465f1adfd666d997f3c2f45ecabcdf47091..32025a9a31a7e170149c62e302b3cb4ab5a9ca69 100755 (executable)
@@ -977,8 +977,6 @@ rescan:
        }
 
        qh->exception = 1;
-       if (ehci->rh_state < EHCI_RH_RUNNING)
-               qh->qh_state = QH_STATE_IDLE;
        switch (qh->qh_state) {
        case QH_STATE_LINKED:
        case QH_STATE_COMPLETING:
index 8fe401c7d152f493c39b3e072e6f88418d906e6e..fe131565d09006117e40f289658e1e926a7de070 100644 (file)
@@ -35,6 +35,21 @@ static const char hcd_name[] = "ehci-pci";
 #define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70
 
 /*-------------------------------------------------------------------------*/
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC            0x0939
+static inline bool is_intel_quark_x1000(struct pci_dev *pdev)
+{
+       return pdev->vendor == PCI_VENDOR_ID_INTEL &&
+               pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC;
+}
+
+/*
+ * 0x84 is the offset of in/out threshold register,
+ * and it is the same offset as the register of 'hostpc'.
+ */
+#define        intel_quark_x1000_insnreg01     hostpc
+
+/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */
+#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD   0x007f007f
 
 /* called after powerup, by probe or system-pm "wakeup" */
 static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
@@ -50,6 +65,16 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
        if (!retval)
                ehci_dbg(ehci, "MWI active\n");
 
+       /* Reset the threshold limit */
+       if (is_intel_quark_x1000(pdev)) {
+               /*
+                * For the Intel QUARK X1000, raise the I/O threshold to the
+                * maximum usable value in order to improve performance.
+                */
+               ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD,
+                       ehci->regs->intel_quark_x1000_insnreg01);
+       }
+
        return 0;
 }
 
index 37dc8373200a1f896b8a9781c49bb7325b7258ef..1e1563da1812688dd91b8b6537e8c6a92dd93d4c 100644 (file)
@@ -314,8 +314,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
  *  - ED_OPER: when there's any request queued, the ED gets rescheduled
  *    immediately.  HC should be working on them.
  *
- *  - ED_IDLE:  when there's no TD queue. there's no reason for the HC
- *    to care about this ED; safe to disable the endpoint.
+ *  - ED_IDLE: when there's no TD queue or the HC isn't running.
  *
  * When finish_unlinks() runs later, after SOF interrupt, it will often
  * complete one or more URB unlinks before making that state change.
@@ -928,6 +927,10 @@ rescan_all:
                int                     completed, modified;
                __hc32                  *prev;
 
+               /* Is this ED already invisible to the hardware? */
+               if (ed->state == ED_IDLE)
+                       goto ed_idle;
+
                /* only take off EDs that the HC isn't using, accounting for
                 * frame counter wraps and EDs with partially retired TDs
                 */
@@ -957,12 +960,20 @@ skip_ed:
                        }
                }
 
+               /* ED's now officially unlinked, hc doesn't see */
+               ed->state = ED_IDLE;
+               if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
+                       ohci->eds_scheduled--;
+               ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
+               ed->hwNextED = 0;
+               wmb();
+               ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
+ed_idle:
+
                /* reentrancy:  if we drop the schedule lock, someone might
                 * have modified this list.  normally it's just prepending
                 * entries (which we'd ignore), but paranoia won't hurt.
                 */
-               *last = ed->ed_next;
-               ed->ed_next = NULL;
                modified = 0;
 
                /* unlink urbs as requested, but rescan the list after
@@ -1020,19 +1031,20 @@ rescan_this:
                if (completed && !list_empty (&ed->td_list))
                        goto rescan_this;
 
-               /* ED's now officially unlinked, hc doesn't see */
-               ed->state = ED_IDLE;
-               if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
-                       ohci->eds_scheduled--;
-               ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
-               ed->hwNextED = 0;
-               wmb ();
-               ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
-
-               /* but if there's work queued, reschedule */
-               if (!list_empty (&ed->td_list)) {
-                       if (ohci->rh_state == OHCI_RH_RUNNING)
-                               ed_schedule (ohci, ed);
+               /*
+                * If no TDs are queued, take ED off the ed_rm_list.
+                * Otherwise, if the HC is running, reschedule.
+                * If not, leave it on the list for further dequeues.
+                */
+               if (list_empty(&ed->td_list)) {
+                       *last = ed->ed_next;
+                       ed->ed_next = NULL;
+               } else if (ohci->rh_state == OHCI_RH_RUNNING) {
+                       *last = ed->ed_next;
+                       ed->ed_next = NULL;
+                       ed_schedule(ohci, ed);
+               } else {
+                       last = &ed->ed_next;
                }
 
                if (modified)
index 7cdcfd024744f05e198690d5dae93d5804888f7c..d939376c5deee42de81af3e95bd7bc143f82ea46 100644 (file)
@@ -462,7 +462,8 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
 }
 
 /* Updates Link Status for super Speed port */
-static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+static void xhci_hub_report_link_state(struct xhci_hcd *xhci,
+               u32 *status, u32 status_reg)
 {
        u32 pls = status_reg & PORT_PLS_MASK;
 
@@ -501,7 +502,8 @@ static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
                 * in which sometimes the port enters compliance mode
                 * caused by a delay on the host-device negotiation.
                 */
-               if (pls == USB_SS_PORT_LS_COMP_MOD)
+               if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+                               (pls == USB_SS_PORT_LS_COMP_MOD))
                        pls |= USB_PORT_STAT_CONNECTION;
        }
 
@@ -686,7 +688,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                }
                /* Update Port Link State for super speed ports*/
                if (hcd->speed == HCD_USB3) {
-                       xhci_hub_report_link_state(&status, temp);
+                       xhci_hub_report_link_state(xhci, &status, temp);
                        /*
                         * Verify if all USB3 Ports Have entered U0 already.
                         * Delete Compliance Mode Timer if so.
index d007f09201263532c0400a2b88022d5c663e5817..677f032482f74ef207a739801b1cf1927579da8e 100644 (file)
@@ -1795,7 +1795,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        }
 
        num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
-       for (i = 0; i < num_ports; i++) {
+       for (i = 0; i < num_ports && xhci->rh_bw; i++) {
                struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
                for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
                        struct list_head *ep = &bwt->interval_bw[j].endpoints;
index a736d82695cb692a8da5a0a7ad41cbd848a7f228..0e57bcb8e3f79a3c464eda255d322f8a876770c2 100644 (file)
@@ -87,6 +87,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        /* AMD PLL quirk */
        if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
                xhci->quirks |= XHCI_AMD_PLL_FIX;
+
+       if (pdev->vendor == PCI_VENDOR_ID_AMD)
+               xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
                xhci->quirks |= XHCI_INTEL_HOST;
index fe42cae6d1efd1fb10b0645e328c557b111576ec..df5834bd71f3589b1e30d700f922a69692efdd41 100644 (file)
@@ -2532,7 +2532,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                 * last TRB of the previous TD. The command completion handle
                 * will take care the rest.
                 */
-               if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
+               if (!event_seg && (trb_comp_code == COMP_STOP ||
+                                  trb_comp_code == COMP_STOP_INVAL)) {
                        ret = 0;
                        goto cleanup;
                }
index 9a7088bc634dd9b4fc1e70e7830070f29bbd7a02..10223f2b18d20d0884b8e4328b86e56ced608666 100644 (file)
@@ -4407,13 +4407,21 @@ static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
        int ret;
 
        spin_lock_irqsave(&xhci->lock, flags);
-       if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
+
+       virt_dev = xhci->devs[udev->slot_id];
+
+       /*
+        * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
+        * xHC was re-initialized. Exit latency will be set later after
+        * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
+        */
+
+       if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
                spin_unlock_irqrestore(&xhci->lock, flags);
                return 0;
        }
 
        /* Attempt to issue an Evaluate Context command to change the MEL. */
-       virt_dev = xhci->devs[udev->slot_id];
        command = xhci->lpm_command;
        xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
        spin_unlock_irqrestore(&xhci->lock, flags);
index de98906f786d08ebac625b3cc8213de5d79cf68e..0aef801edbc15235cfedf131aae8e3a5b81f20c9 100644 (file)
@@ -3248,6 +3248,7 @@ static const struct usb_device_id sisusb_table[] = {
        { USB_DEVICE(0x0711, 0x0918) },
        { USB_DEVICE(0x0711, 0x0920) },
        { USB_DEVICE(0x0711, 0x0950) },
+       { USB_DEVICE(0x0711, 0x5200) },
        { USB_DEVICE(0x182d, 0x021c) },
        { USB_DEVICE(0x182d, 0x0269) },
        { }
index b14379659e351342dfa0a57a32c0c7a19d6af4e6..e9183eda39e084f6e4873f3968ba6a45165957be 100644 (file)
@@ -122,6 +122,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+       { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
@@ -154,7 +155,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
        { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+       { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
        { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+       { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
        { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
        { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
        { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */
index 9e75e3eaea4fb1e94d04cc4ba71eabbdea80e944..768c2b4722d1e972b995d5d5adc7a3cf8ce0c98e 100644 (file)
@@ -148,12 +148,14 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
  * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
  */
 static struct usb_device_id id_table_combined [] = {
+       { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
@@ -676,6 +678,10 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
+       { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
+       { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
+       { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
+       { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
        { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
@@ -741,6 +747,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
                .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
        { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+       { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
@@ -948,6 +955,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
+       /* ekey Devices */
+       { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
        /* Infineon Devices */
        { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
        { },                                    /* Optional parameter entry */
index c4777bc6aee0189d69eb56c970c8e8a9c19507ee..302ab9a71f060055825f48aac211551391407920 100644 (file)
 
 /*** third-party PIDs (using FTDI_VID) ***/
 
+/*
+ * Certain versions of the official Windows FTDI driver reprogrammed
+ * counterfeit FTDI devices to PID 0. Support these devices anyway.
+ */
+#define FTDI_BRICK_PID         0x0000
+
 #define FTDI_LUMEL_PD12_PID    0x6002
 
 /*
@@ -42,6 +48,8 @@
 /* www.candapter.com Ewert Energy Systems CANdapter device */
 #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
 
+#define FTDI_BM_ATOM_NANO_PID  0xa559  /* Basic Micro ATOM Nano USB2Serial */
+
 /*
  * Texas Instruments XDS100v2 JTAG / BeagleBone A3
  * http://processors.wiki.ti.com/index.php/XDS100
 /*
  * Xsens Technologies BV products (http://www.xsens.com).
  */
-#define XSENS_CONVERTER_0_PID  0xD388
-#define XSENS_CONVERTER_1_PID  0xD389
+#define XSENS_VID              0x2639
+#define XSENS_AWINDA_STATION_PID 0x0101
+#define XSENS_AWINDA_DONGLE_PID 0x0102
+#define XSENS_MTW_PID          0x0200  /* Xsens MTw */
+#define XSENS_CONVERTER_PID    0xD00D  /* Xsens USB-serial converter */
+
+/* Xsens devices using FTDI VID */
+#define XSENS_CONVERTER_0_PID  0xD388  /* Xsens USB converter */
+#define XSENS_CONVERTER_1_PID  0xD389  /* Xsens Wireless Receiver */
 #define XSENS_CONVERTER_2_PID  0xD38A
-#define XSENS_CONVERTER_3_PID  0xD38B
-#define XSENS_CONVERTER_4_PID  0xD38C
-#define XSENS_CONVERTER_5_PID  0xD38D
+#define XSENS_CONVERTER_3_PID  0xD38B  /* Xsens USB-serial converter */
+#define XSENS_CONVERTER_4_PID  0xD38C  /* Xsens Wireless Receiver */
+#define XSENS_CONVERTER_5_PID  0xD38D  /* Xsens Awinda Station */
 #define XSENS_CONVERTER_6_PID  0xD38E
 #define XSENS_CONVERTER_7_PID  0xD38F
 
 #define TELLDUS_VID                    0x1781  /* Vendor ID */
 #define TELLDUS_TELLSTICK_PID          0x0C30  /* RF control dongle 433 MHz using FT232RL */
 
+/*
+ * NOVITUS printers
+ */
+#define NOVITUS_VID                    0x1a28
+#define NOVITUS_BONO_E_PID             0x6010
+
 /*
  * RT Systems programming cables for various ham radios
  */
 #define BRAINBOXES_US_160_6_PID                0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
 #define BRAINBOXES_US_160_7_PID                0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
 #define BRAINBOXES_US_160_8_PID                0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
+
+/*
+ * ekey biometric systems GmbH (http://ekey.net/)
+ */
+#define FTDI_EKEY_CONV_USB_PID         0xCB08  /* Converter USB */
index 5f4b0cd0f6e9734193dac84ba23373f13e301d65..b0eb1dfc601ad10941c15ea1b68dec8f69b1e3e7 100644 (file)
@@ -219,7 +219,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
 
        /* The conncected devices do not have a bulk write endpoint,
         * to transmit data to de barcode device the control endpoint is used */
-       dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+       dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
        if (!dr) {
                dev_err(&port->dev, "out of memory\n");
                count = -ENOMEM;
index 3088c85d610fdeda38b32f6e38bb7e83bfc88e15..b631a44fa7e7573019b6eecfac11e15a763db83f 100644 (file)
@@ -269,14 +269,19 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_DE910_DUAL               0x1010
 #define TELIT_PRODUCT_UE910_V2                 0x1012
 #define TELIT_PRODUCT_LE920                    0x1200
+#define TELIT_PRODUCT_LE910                    0x1201
 
 /* ZTE PRODUCTS */
 #define ZTE_VENDOR_ID                          0x19d2
 #define ZTE_PRODUCT_MF622                      0x0001
 #define ZTE_PRODUCT_MF628                      0x0015
 #define ZTE_PRODUCT_MF626                      0x0031
-#define ZTE_PRODUCT_MC2718                     0xffe8
 #define ZTE_PRODUCT_AC2726                     0xfff1
+#define ZTE_PRODUCT_CDMA_TECH                  0xfffe
+#define ZTE_PRODUCT_AC8710T                    0xffff
+#define ZTE_PRODUCT_MC2718                     0xffe8
+#define ZTE_PRODUCT_AD3812                     0xffeb
+#define ZTE_PRODUCT_MC2716                     0xffed
 
 #define BENQ_VENDOR_ID                         0x04a5
 #define BENQ_PRODUCT_H10                       0x4068
@@ -357,6 +362,7 @@ static void option_instat_callback(struct urb *urb);
 
 /* Haier products */
 #define HAIER_VENDOR_ID                                0x201e
+#define HAIER_PRODUCT_CE81B                    0x10f8
 #define HAIER_PRODUCT_CE100                    0x2009
 
 /* Cinterion (formerly Siemens) products */
@@ -494,6 +500,10 @@ static void option_instat_callback(struct urb *urb);
 #define INOVIA_VENDOR_ID                       0x20a6
 #define INOVIA_SEW858                          0x1105
 
+/* VIA Telecom */
+#define VIATELECOM_VENDOR_ID                   0x15eb
+#define VIATELECOM_PRODUCT_CDS7                        0x0001
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -527,10 +537,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
        .reserved = BIT(4),
 };
 
+static const struct option_blacklist_info zte_ad3812_z_blacklist = {
+       .sendsetup = BIT(0) | BIT(1) | BIT(2),
+};
+
 static const struct option_blacklist_info zte_mc2718_z_blacklist = {
        .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
 };
 
+static const struct option_blacklist_info zte_mc2716_z_blacklist = {
+       .sendsetup = BIT(1) | BIT(2) | BIT(3),
+};
+
 static const struct option_blacklist_info huawei_cdc12_blacklist = {
        .reserved = BIT(1) | BIT(2),
 };
@@ -572,6 +590,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
        .reserved = BIT(3) | BIT(4),
 };
 
+static const struct option_blacklist_info telit_le910_blacklist = {
+       .sendsetup = BIT(0),
+       .reserved = BIT(1) | BIT(2),
+};
+
 static const struct option_blacklist_info telit_le920_blacklist = {
        .sendsetup = BIT(0),
        .reserved = BIT(1) | BIT(5),
@@ -1071,6 +1094,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) },
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
@@ -1121,6 +1145,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+               .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
                .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
@@ -1545,13 +1571,18 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
 
-       /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
         .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
+        .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+        .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
 
        { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
        { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
@@ -1591,6 +1622,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
        { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) },
        /* Pirelli  */
        { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
        { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) },
@@ -1725,6 +1757,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+       { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1919,6 +1952,8 @@ static void option_instat_callback(struct urb *urb)
                        dev_dbg(dev, "%s: type %x req %x\n", __func__,
                                req_pkt->bRequestType, req_pkt->bRequest);
                }
+       } else if (status == -ENOENT || status == -ESHUTDOWN) {
+               dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status);
        } else
                dev_err(dev, "%s: error %d\n", __func__, status);
 
index a0b58e252073c64b7ebc6fbb4f7fad59676d4ce0..de3e15d8eb1054e8506a044a8ac43b10487f96bc 100644 (file)
@@ -47,6 +47,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
        { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
        { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
+       { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
index 42bc082896ac8e9d8f913168a0f209a7bebb673e..71fd9da1d6e7ac6e36ecdf38e8f8192c60fbbc39 100644 (file)
@@ -22,6 +22,7 @@
 #define PL2303_PRODUCT_ID_GPRS         0x0609
 #define PL2303_PRODUCT_ID_HCR331       0x331a
 #define PL2303_PRODUCT_ID_MOTOROLA     0x0307
+#define PL2303_PRODUCT_ID_ZTEK         0xe1f1
 
 #define ATEN_VENDOR_ID         0x0557
 #define ATEN_VENDOR_ID2                0x0547
index 4e45908541238f992a07a5b646fd168613576aca..5aaa2b675116effe0149b7294bfae1be18dc2977 100644 (file)
@@ -282,14 +282,19 @@ static const struct usb_device_id id_table[] = {
        /* Sierra Wireless HSPA Non-Composite Device */
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
        { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
-       { USB_DEVICE(0x1199, 0x68A3),   /* Sierra Wireless Direct IP modems */
+       /* Sierra Wireless Direct IP modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF),
+         .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+       },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
        /* AT&T Direct IP LTE modems */
        { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
-       { USB_DEVICE(0x0f3d, 0x68A3),   /* Airprime/Sierra Wireless Direct IP modems */
+       /* Airprime/Sierra Wireless Direct IP modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
 
index c1032d42b9d57738c4f371923f10a6b99a4d6671..80d689f0fda9ee834d4ea944a8e4e56f689f1ef9 100644 (file)
@@ -778,29 +778,39 @@ static int usb_serial_probe(struct usb_interface *interface,
                if (usb_endpoint_is_bulk_in(endpoint)) {
                        /* we found a bulk in endpoint */
                        dev_dbg(ddev, "found bulk in on endpoint %d\n", i);
-                       bulk_in_endpoint[num_bulk_in] = endpoint;
-                       ++num_bulk_in;
+                       if (num_bulk_in < MAX_NUM_PORTS) {
+                               bulk_in_endpoint[num_bulk_in] = endpoint;
+                               ++num_bulk_in;
+                       }
                }
 
                if (usb_endpoint_is_bulk_out(endpoint)) {
                        /* we found a bulk out endpoint */
                        dev_dbg(ddev, "found bulk out on endpoint %d\n", i);
-                       bulk_out_endpoint[num_bulk_out] = endpoint;
-                       ++num_bulk_out;
+                       if (num_bulk_out < MAX_NUM_PORTS) {
+                               bulk_out_endpoint[num_bulk_out] = endpoint;
+                               ++num_bulk_out;
+                       }
                }
 
                if (usb_endpoint_is_int_in(endpoint)) {
                        /* we found a interrupt in endpoint */
                        dev_dbg(ddev, "found interrupt in on endpoint %d\n", i);
-                       interrupt_in_endpoint[num_interrupt_in] = endpoint;
-                       ++num_interrupt_in;
+                       if (num_interrupt_in < MAX_NUM_PORTS) {
+                               interrupt_in_endpoint[num_interrupt_in] =
+                                               endpoint;
+                               ++num_interrupt_in;
+                       }
                }
 
                if (usb_endpoint_is_int_out(endpoint)) {
                        /* we found an interrupt out endpoint */
                        dev_dbg(ddev, "found interrupt out on endpoint %d\n", i);
-                       interrupt_out_endpoint[num_interrupt_out] = endpoint;
-                       ++num_interrupt_out;
+                       if (num_interrupt_out < MAX_NUM_PORTS) {
+                               interrupt_out_endpoint[num_interrupt_out] =
+                                               endpoint;
+                               ++num_interrupt_out;
+                       }
                }
        }
 
@@ -823,8 +833,10 @@ static int usb_serial_probe(struct usb_interface *interface,
                                if (usb_endpoint_is_int_in(endpoint)) {
                                        /* we found a interrupt in endpoint */
                                        dev_dbg(ddev, "found interrupt in for Prolific device on separate interface\n");
-                                       interrupt_in_endpoint[num_interrupt_in] = endpoint;
-                                       ++num_interrupt_in;
+                                       if (num_interrupt_in < MAX_NUM_PORTS) {
+                                               interrupt_in_endpoint[num_interrupt_in] = endpoint;
+                                               ++num_interrupt_in;
+                                       }
                                }
                        }
                }
@@ -864,6 +876,11 @@ static int usb_serial_probe(struct usb_interface *interface,
                        num_ports = type->num_ports;
        }
 
+       if (num_ports > MAX_NUM_PORTS) {
+               dev_warn(ddev, "too many ports requested: %d\n", num_ports);
+               num_ports = MAX_NUM_PORTS;
+       }
+
        serial->num_ports = num_ports;
        serial->num_bulk_in = num_bulk_in;
        serial->num_bulk_out = num_bulk_out;
index 347caad47a121d3f7a26ec7f3246a6960114ffc2..5e3dd9f87ff5bdd59044da0068ae8be84c6eac0e 100644 (file)
@@ -521,6 +521,10 @@ static void command_port_read_callback(struct urb *urb)
                dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
                return;
        }
+       if (!urb->actual_length) {
+               dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__);
+               return;
+       }
        if (status) {
                dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
                if (status != -ENOENT)
@@ -541,7 +545,8 @@ static void command_port_read_callback(struct urb *urb)
                /* These are unsolicited reports from the firmware, hence no
                   waiting command to wakeup */
                dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
-       } else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
+       } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) &&
+               (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) {
                memcpy(command_info->result_buffer, &data[1],
                                                urb->actual_length - 1);
                command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
index eae2c873b39ff7dbb2ecd8d7624a1e2aec14e8ca..d6a3fbd029be2d3ee069bccb371b2002f2fc4953 100644 (file)
@@ -273,28 +273,16 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
 }
 
 static const struct usb_device_id id_table[] = {
-       /* AC8710, AC8710T */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) },
-        /* AC8700 */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) },
-       /* MG880 */
-       { USB_DEVICE(0x19d2, 0xfffd) },
-       { USB_DEVICE(0x19d2, 0xfffc) },
-       { USB_DEVICE(0x19d2, 0xfffb) },
-       /* AC8710_V3 */
+       { USB_DEVICE(0x19d2, 0xffec) },
+       { USB_DEVICE(0x19d2, 0xffee) },
        { USB_DEVICE(0x19d2, 0xfff6) },
        { USB_DEVICE(0x19d2, 0xfff7) },
        { USB_DEVICE(0x19d2, 0xfff8) },
        { USB_DEVICE(0x19d2, 0xfff9) },
-       { USB_DEVICE(0x19d2, 0xffee) },
-       /* AC2716, MC2716 */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) },
-       /* AD3812 */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) },
-       { USB_DEVICE(0x19d2, 0xffec) },
-       { USB_DEVICE(0x05C6, 0x3197) },
-       { USB_DEVICE(0x05C6, 0x6000) },
-       { USB_DEVICE(0x05C6, 0x9008) },
+       { USB_DEVICE(0x19d2, 0xfffb) },
+       { USB_DEVICE(0x19d2, 0xfffc) },
+       /* MG880 */
+       { USB_DEVICE(0x19d2, 0xfffd) },
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 22c7d4360fa222722369b6861da34bafdd4b5b96..b1d815eb6d0bb34d8edeff31581eafd1ad983a0c 100644 (file)
@@ -1118,6 +1118,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
                 */
                if (result == USB_STOR_XFER_LONG)
                        fake_sense = 1;
+
+               /*
+                * Sometimes a device will mistakenly skip the data phase
+                * and go directly to the status phase without sending a
+                * zero-length packet.  If we get a 13-byte response here,
+                * check whether it really is a CSW.
+                */
+               if (result == USB_STOR_XFER_SHORT &&
+                               srb->sc_data_direction == DMA_FROM_DEVICE &&
+                               transfer_length - scsi_get_resid(srb) ==
+                                       US_BULK_CS_WRAP_LEN) {
+                       struct scatterlist *sg = NULL;
+                       unsigned int offset = 0;
+
+                       if (usb_stor_access_xfer_buf((unsigned char *) bcs,
+                                       US_BULK_CS_WRAP_LEN, srb, &sg,
+                                       &offset, FROM_XFER_BUF) ==
+                                               US_BULK_CS_WRAP_LEN &&
+                                       bcs->Signature ==
+                                               cpu_to_le32(US_BULK_CS_SIGN)) {
+                               usb_stor_dbg(us, "Device skipped data phase\n");
+                               scsi_set_resid(srb, transfer_length);
+                               goto skipped_data_phase;
+                       }
+               }
        }
 
        /* See flow chart on pg 15 of the Bulk Only Transport spec for
@@ -1153,6 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
        if (result != USB_STOR_XFER_GOOD)
                return USB_STOR_TRANSPORT_ERROR;
 
+ skipped_data_phase:
        /* check bulk status */
        residue = le32_to_cpu(bcs->Residue);
        usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
index 042c83b010463078c19e03b92940521fa69fd921..7f625306ea803d8452db6c70dcaeb505c8f9687a 100644 (file)
@@ -101,6 +101,12 @@ UNUSUAL_DEV(  0x03f0, 0x4002, 0x0001, 0x0001,
                "PhotoSmart R707",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY),
 
+UNUSUAL_DEV(  0x03f3, 0x0001, 0x0000, 0x9999,
+               "Adaptec",
+               "USBConnect 2000",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net>
  * and Olaf Hering <olh@suse.de> (different bcd's, same vendor/product)
  * for USB floppies that need the SINGLE_LUN enforcement.
@@ -741,6 +747,12 @@ UNUSUAL_DEV(  0x059b, 0x0001, 0x0100, 0x0100,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_SINGLE_LUN ),
 
+UNUSUAL_DEV(  0x059b, 0x0040, 0x0100, 0x0100,
+               "Iomega",
+               "Jaz USB Adapter",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_SINGLE_LUN ),
+
 /* Reported by <Hendryk.Pfeiffer@gmx.de> */
 UNUSUAL_DEV(  0x059f, 0x0643, 0x0000, 0x0000,
                "LaCie",
@@ -1113,6 +1125,18 @@ UNUSUAL_DEV(  0x0851, 0x1543, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NOT_LOCKABLE),
 
+UNUSUAL_DEV(  0x085a, 0x0026, 0x0100, 0x0133,
+               "Xircom",
+               "PortGear USB-SCSI (Mac USB Dock)",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
+UNUSUAL_DEV(  0x085a, 0x0028, 0x0100, 0x0133,
+               "Xircom",
+               "PortGear USB to SCSI Converter",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Submitted by Jan De Luyck <lkml@kcore.org> */
 UNUSUAL_DEV(  0x08bd, 0x1100, 0x0000, 0x0000,
                "CITIZEN",
@@ -1945,6 +1969,14 @@ UNUSUAL_DEV(  0x152d, 0x2329, 0x0100, 0x0100,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
 
+/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
+ * and Mac USB Dock USB-SCSI */
+UNUSUAL_DEV(  0x1645, 0x0007, 0x0100, 0x0133,
+               "Entrega Technologies",
+               "USB to SCSI Converter",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Reported by Robert Schedel <r.schedel@yahoo.de>
  * Note: this is a 'super top' device like the above 14cd/6600 device */
 UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
@@ -1967,6 +1999,12 @@ UNUSUAL_DEV(  0x177f, 0x0400, 0x0000, 0x0000,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
 
+UNUSUAL_DEV(  0x1822, 0x0001, 0x0000, 0x9999,
+               "Ariston Technologies",
+               "iConnect USB to SCSI adapter",
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+
 /* Reported by Hans de Goede <hdegoede@redhat.com>
  * These Appotech controllers are found in Picture Frames, they provide a
  * (buggy) emulation of a cdrom drive which contains the windows software
index 231881c2b355ef080aa1240079e0d9e1eb416ba5..42c30c05826a27689b5d61ae993294595988c8f3 100644 (file)
@@ -613,6 +613,10 @@ void adf_device_destroy(struct adf_device *dev)
        }
        mutex_destroy(&dev->post_lock);
        mutex_destroy(&dev->client_lock);
+
+       if (dev->timeline)
+               sync_timeline_destroy(&dev->timeline->obj);
+
        adf_obj_destroy(&dev->base, &adf_devices);
 }
 EXPORT_SYMBOL(adf_device_destroy);
index 61b182bf32a22962492cc79a3b1e60f3f49ef388..dbfe4eecf12e56d328478dff9a84064c591bfe85 100644 (file)
@@ -205,7 +205,6 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
 static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
                              int bottom_only)
 {
-       int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
        unsigned int cw = vc->vc_font.width;
        unsigned int ch = vc->vc_font.height;
        unsigned int rw = info->var.xres - (vc->vc_cols*cw);
@@ -214,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
        unsigned int bs = info->var.yres - bh;
        struct fb_fillrect region;
 
-       region.color = attr_bgcol_ec(bgshift, vc, info);
+       region.color = 0;
        region.rop = ROP_COPY;
 
        if (rw && !bottom_only) {
index 41b32ae23dacb9f3dac50a52066ef5f276729d2c..5a3cbf6dff4d944ff5a0ac3fb1fd62c2fba3106a 100644 (file)
@@ -197,9 +197,8 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
        unsigned int bh = info->var.xres - (vc->vc_rows*ch);
        unsigned int bs = vc->vc_rows*ch;
        struct fb_fillrect region;
-       int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
 
-       region.color = attr_bgcol_ec(bgshift,vc,info);
+       region.color = 0;
        region.rop = ROP_COPY;
 
        if (rw && !bottom_only) {
index a93670ef7f89ea03fa1bd59ec64636d4b755f4a8..e7ee44db4e98b1318cf8e9f679843354e6b84862 100644 (file)
@@ -180,9 +180,8 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
        unsigned int bh = info->var.xres - (vc->vc_rows*ch);
        unsigned int rs = info->var.yres - rw;
        struct fb_fillrect region;
-       int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
 
-       region.color = attr_bgcol_ec(bgshift,vc,info);
+       region.color = 0;
        region.rop = ROP_COPY;
 
        if (rw && !bottom_only) {
index ff0872c0498bd6e32bdb22b6a91f55f40796e7ef..19e3714abfe8fb765eaaea2ebeb9a21c5c2725de 100644 (file)
@@ -227,9 +227,8 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
        unsigned int rw = info->var.xres - (vc->vc_cols*cw);
        unsigned int bh = info->var.yres - (vc->vc_rows*ch);
        struct fb_fillrect region;
-       int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
 
-       region.color = attr_bgcol_ec(bgshift,vc,info);
+       region.color = 0;
        region.rop = ROP_COPY;
 
        if (rw && !bottom_only) {
index f3adbabbaeb80c9eaac94068a9afb65362c61d96..00247d0c13e9bf721bf0600674c909f73d28e63e 100755 (executable)
@@ -2,10 +2,10 @@ obj-$(CONFIG_FB_ROCKCHIP) += rk_fb.o rkfb_sysfs.o bmp_helper.o screen/
 obj-$(CONFIG_DRM_ROCKCHIP)  += rk_drm_fb.o screen/
 obj-$(CONFIG_RK_TRSM) += transmitter/
 obj-$(CONFIG_DRM_ROCKCHIP)  += lcdc/
-obj-$(CONFIG_FB_ROCKCHIP) += lcdc/
+obj-$(CONFIG_FB_ROCKCHIP) += display-sys.o lcdc/
 obj-$(CONFIG_ROCKCHIP_RGA) += rga/
 obj-$(CONFIG_ROCKCHIP_RGA2) += rga2/
-obj-$(CONFIG_RK_HDMI) += display-sys.o hdmi/
+obj-$(CONFIG_RK_HDMI) += hdmi/
 obj-$(CONFIG_IEP) += iep/
 obj-$(CONFIG_RK_TVENCODER) += tve/
 
index 9cca081f5d966f8f65731aefe24b4367b6f39f9f..11a2eb57eb321ac00ab42bbf1cb2179664405422 100644 (file)
@@ -36,7 +36,7 @@
 #define grf_writel(v, offset)                          \
 do {                                                   \
        writel_relaxed(v, RK_GRF_VIRT + offset);        \
-       dsb();                                          \
+       dsb(sy);                                                \
 } while (0)
 #define HDMI_PD_ON             (1 << 0)
 #define HDMI_PCLK_ON           (1 << 1)
index 5368cc56c0efd9cf12f393023f1d8fee7fb15276..65109f52a7b3b32cde03d39930ed71515eb39462 100755 (executable)
@@ -432,7 +432,7 @@ static void iep_reg_copy_to_hw(struct iep_reg *reg)
     //dmac_flush_range(&pbase[0], &pbase[IEP_REG_LEN]);
     //outer_flush_range(virt_to_phys(&pbase[0]),virt_to_phys(&pbase[IEP_REG_LEN]));
 
-    dsb();
+    dsb(sy);
 }
 
 /** switch fields order before the next lcdc frame start
index 5c69c75ffd161807ab8f14ef2ffc8943dad906df..4731bdb7207e31d0f1edc31848aa1d7ce488c342 100755 (executable)
@@ -43,3 +43,8 @@ config LCDC_RK312X
         depends on DRM_ROCKCHIP || FB_ROCKCHIP
         help
           Driver for rk312x lcdc.
+config LCDC_RK3368
+               bool "rk3368 lcdc support"
+                       depends on DRM_ROCKCHIP || FB_ROCKCHIP
+                       help
+                       Driver for rk3368 lcdc.There are one lcd controllers on rk3368
index 5f075557dbe77181afb86c6c5b19a2f8e52d5e51..8c878d025af91cf2a568e848a157b6ee21310d21 100644 (file)
@@ -5,3 +5,4 @@ obj-$(CONFIG_LCDC_RK3188) += rk3188_lcdc.o
 obj-$(CONFIG_LCDC_RK3288) += rk3288_lcdc.o
 obj-$(CONFIG_LCDC_RK3036) += rk3036_lcdc.o
 obj-$(CONFIG_LCDC_RK312X) += rk312x_lcdc.o
+obj-$(CONFIG_LCDC_RK3368) += rk3368_lcdc.o
diff --git a/drivers/video/rockchip/lcdc/rk3368_lcdc.c b/drivers/video/rockchip/lcdc/rk3368_lcdc.c
new file mode 100644 (file)
index 0000000..2340d65
--- /dev/null
@@ -0,0 +1,4431 @@
+/*
+ * drivers/video/rockchip/lcdc/rk3368_lcdc.c
+ *
+ * Copyright (C) 2014 ROCKCHIP, Inc.
+ *Author:hjc<hjc@rock-chips.com>
+ *This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/rockchip-iovmm.h>
+#include <asm/div64.h>
+#include <linux/uaccess.h>
+#include <linux/rockchip/cpu.h>
+#include <linux/rockchip/iomap.h>
+#include <linux/rockchip/grf.h>
+#include <linux/rockchip/common.h>
+#include <dt-bindings/clock/rk_system_status.h>
+
+#include "rk3368_lcdc.h"
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif
+#define CONFIG_RK_FPGA 1
+
+static int dbg_thresd;
+module_param(dbg_thresd, int, S_IRUGO | S_IWUSR);
+
+#define DBG(level, x...) do {                  \
+       if (unlikely(dbg_thresd >= level))      \
+               pr_info(x);\
+       } while (0)
+
+static struct rk_lcdc_win lcdc_win[] = {
+       [0] = {
+              .name = "win0",
+              .id = 0,
+              .support_3d = false,
+              },
+       [1] = {
+              .name = "win1",
+              .id = 1,
+              .support_3d = false,
+              },
+       [2] = {
+              .name = "win2",
+              .id = 2,
+              .support_3d = false,
+              },
+       [3] = {
+              .name = "win3",
+              .id = 3,
+              .support_3d = false,
+              },
+       [4] = {
+              .name = "hwc",
+              .id = 4,
+              .support_3d = false,
+              }
+};
+
+static int rk3368_lcdc_set_bcsh(struct rk_lcdc_driver *dev_drv, bool enable);
+
+/*#define WAIT_FOR_SYNC 1*/
+u32 rk3368_get_hard_ware_vskiplines(u32 srch, u32 dsth)
+{
+       u32 vscalednmult;
+
+       if (srch >= (u32) (4 * dsth * MIN_SCALE_FACTOR_AFTER_VSKIP))
+               vscalednmult = 4;
+       else if (srch >= (u32) (2 * dsth * MIN_SCALE_FACTOR_AFTER_VSKIP))
+               vscalednmult = 2;
+       else
+               vscalednmult = 1;
+
+       return vscalednmult;
+}
+
+static int rk3368_lcdc_set_lut(struct rk_lcdc_driver *dev_drv)
+{
+       int i, j;
+       int __iomem *c;
+       u32 v, r, g, b;
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, m_DSP_LUT_EN, v_DSP_LUT_EN(0));
+       lcdc_cfg_done(lcdc_dev);
+       mdelay(25);
+       for (i = 0; i < 256; i++) {
+               v = dev_drv->cur_screen->dsp_lut[i];
+               c = lcdc_dev->dsp_lut_addr_base + (i << 2);
+               b = (v & 0xff) << 2;
+               g = (v & 0xff00) << 4;
+               r = (v & 0xff0000) << 6;
+               v = r + g + b;
+               for (j = 0; j < 4; j++) {
+                       writel_relaxed(v, c);
+                       v += (1 + (1 << 10) + (1 << 20));
+                       c++;
+               }
+       }
+       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, m_DSP_LUT_EN, v_DSP_LUT_EN(1));
+
+       return 0;
+}
+
+static int rk3368_lcdc_clk_enable(struct lcdc_device *lcdc_dev)
+{
+#ifdef CONFIG_RK_FPGA
+       lcdc_dev->clk_on = 1;
+       return 0;
+#endif
+       if (!lcdc_dev->clk_on) {
+               clk_prepare_enable(lcdc_dev->hclk);
+               clk_prepare_enable(lcdc_dev->dclk);
+               clk_prepare_enable(lcdc_dev->aclk);
+               clk_prepare_enable(lcdc_dev->pd);
+               spin_lock(&lcdc_dev->reg_lock);
+               lcdc_dev->clk_on = 1;
+               spin_unlock(&lcdc_dev->reg_lock);
+       }
+
+       return 0;
+}
+
+static int rk3368_lcdc_clk_disable(struct lcdc_device *lcdc_dev)
+{
+#ifdef CONFIG_RK_FPGA
+       lcdc_dev->clk_on = 0;
+       return 0;
+#endif
+       if (lcdc_dev->clk_on) {
+               spin_lock(&lcdc_dev->reg_lock);
+               lcdc_dev->clk_on = 0;
+               spin_unlock(&lcdc_dev->reg_lock);
+               mdelay(25);
+               clk_disable_unprepare(lcdc_dev->dclk);
+               clk_disable_unprepare(lcdc_dev->hclk);
+               clk_disable_unprepare(lcdc_dev->aclk);
+               clk_disable_unprepare(lcdc_dev->pd);
+       }
+
+       return 0;
+}
+
+static int rk3368_lcdc_disable_irq(struct lcdc_device *lcdc_dev)
+{
+       u32 mask, val;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               mask = m_FS_INTR_EN | m_FS_NEW_INTR_EN |
+                   m_ADDR_SAME_INTR_EN | m_LINE_FLAG0_INTR_EN |
+                   m_LINE_FLAG1_INTR_EN | m_BUS_ERROR_INTR_EN |
+                   m_WIN0_EMPTY_INTR_EN | m_WIN1_EMPTY_INTR_EN |
+                   m_WIN2_EMPTY_INTR_EN | m_WIN3_EMPTY_INTR_EN |
+                   m_HWC_EMPTY_INTR_EN | m_POST_BUF_EMPTY_INTR_EN |
+                   m_PWM_GEN_INTR_EN | m_DSP_HOLD_VALID_INTR_EN;
+               val = v_FS_INTR_EN(0) | v_FS_NEW_INTR_EN(0) |
+                   v_ADDR_SAME_INTR_EN(0) |
+                   v_LINE_FLAG0_INTR_EN(0) | v_LINE_FLAG1_INTR_EN(0) |
+                   v_BUS_ERROR_INTR_EN(0) | v_WIN0_EMPTY_INTR_EN(0) |
+                   v_WIN1_EMPTY_INTR_EN(0) | v_WIN2_EMPTY_INTR_EN(0) |
+                   v_WIN3_EMPTY_INTR_EN(0) | v_HWC_EMPTY_INTR_EN(0) |
+                   v_POST_BUF_EMPTY_INTR_EN(0) |
+                   v_PWM_GEN_INTR_EN(0) | v_DSP_HOLD_VALID_INTR_EN(0);
+               lcdc_msk_reg(lcdc_dev, INTR_EN, mask, val);
+
+               mask = m_FS_INTR_CLR | m_FS_NEW_INTR_CLR |
+                   m_ADDR_SAME_INTR_CLR | m_LINE_FLAG0_INTR_CLR |
+                   m_LINE_FLAG1_INTR_CLR | m_BUS_ERROR_INTR_CLR |
+                   m_WIN0_EMPTY_INTR_CLR | m_WIN1_EMPTY_INTR_CLR |
+                   m_WIN2_EMPTY_INTR_CLR | m_WIN3_EMPTY_INTR_CLR |
+                   m_HWC_EMPTY_INTR_CLR | m_POST_BUF_EMPTY_INTR_CLR |
+                   m_PWM_GEN_INTR_CLR | m_DSP_HOLD_VALID_INTR_CLR;
+               val = v_FS_INTR_CLR(1) | v_FS_NEW_INTR_CLR(1) |
+                   v_ADDR_SAME_INTR_CLR(1) |
+                   v_LINE_FLAG0_INTR_CLR(1) | v_LINE_FLAG1_INTR_CLR(1) |
+                   v_BUS_ERROR_INTR_CLR(1) | v_WIN0_EMPTY_INTR_CLR(1) |
+                   v_WIN1_EMPTY_INTR_CLR(1) | v_WIN2_EMPTY_INTR_CLR(1) |
+                   v_WIN3_EMPTY_INTR_CLR(1) | v_HWC_EMPTY_INTR_CLR(1) |
+                   v_POST_BUF_EMPTY_INTR_CLR(1) |
+                   v_PWM_GEN_INTR_CLR(1) | v_DSP_HOLD_VALID_INTR_CLR(1);
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, mask, val);
+               lcdc_cfg_done(lcdc_dev);
+               spin_unlock(&lcdc_dev->reg_lock);
+       } else {
+               spin_unlock(&lcdc_dev->reg_lock);
+       }
+       mdelay(1);
+       return 0;
+}
+
+static int rk3368_lcdc_reg_dump(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       int *cbase = (int *)lcdc_dev->regs;
+       int *regsbak = (int *)lcdc_dev->regsbak;
+       int i, j, val;
+       char dbg_message[30];
+       char buf[10];
+
+       pr_info("lcd back up reg:\n");
+       memset(dbg_message, 0, sizeof(dbg_message));
+       memset(buf, 0, sizeof(buf));
+       for (i = 0; i <= (0x200 >> 4); i++) {
+               val = sprintf(dbg_message, "0x%04x: ", i * 16);
+               for (j = 0; j < 4; j++) {
+                       val = sprintf(buf, "%08x  ", *(regsbak + i * 4 + j));
+                       strcat(dbg_message, buf);
+               }
+               pr_info("%s\n", dbg_message);
+               memset(dbg_message, 0, sizeof(dbg_message));
+               memset(buf, 0, sizeof(buf));
+       }
+
+       pr_info("lcdc reg:\n");
+       for (i = 0; i <= (0x200 >> 4); i++) {
+               val = sprintf(dbg_message, "0x%04x: ", i * 16);
+               for (j = 0; j < 4; j++) {
+                       sprintf(buf, "%08x  ",
+                               readl_relaxed(cbase + i * 4 + j));
+                       strcat(dbg_message, buf);
+               }
+               pr_info("%s\n", dbg_message);
+               memset(dbg_message, 0, sizeof(dbg_message));
+               memset(buf, 0, sizeof(buf));
+       }
+
+       return 0;
+}
+
+#define WIN_EN(id)             \
+static int win##id##_enable(struct lcdc_device *lcdc_dev, int en)      \
+{ \
+       u32 msk, val;                                                   \
+       spin_lock(&lcdc_dev->reg_lock);                                 \
+       msk =  m_WIN##id##_EN;                                          \
+       val  =  v_WIN##id##_EN(en);                                     \
+       lcdc_msk_reg(lcdc_dev, WIN##id##_CTRL0, msk, val);              \
+       lcdc_cfg_done(lcdc_dev);                                        \
+       val = lcdc_read_bit(lcdc_dev, WIN##id##_CTRL0, msk);            \
+       while (val !=  (!!en))  {                                       \
+               val = lcdc_read_bit(lcdc_dev, WIN##id##_CTRL0, msk);    \
+       }                                                               \
+       spin_unlock(&lcdc_dev->reg_lock);                               \
+       return 0;                                                       \
+}
+
+WIN_EN(0);
+WIN_EN(1);
+WIN_EN(2);
+WIN_EN(3);
+/*enable/disable win directly*/
+static int rk3368_lcdc_win_direct_en(struct rk_lcdc_driver *drv,
+                                    int win_id, int en)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(drv, struct lcdc_device, driver);
+       if (win_id == 0)
+               win0_enable(lcdc_dev, en);
+       else if (win_id == 1)
+               win1_enable(lcdc_dev, en);
+       else if (win_id == 2)
+               win2_enable(lcdc_dev, en);
+       else if (win_id == 3)
+               win3_enable(lcdc_dev, en);
+       else
+               dev_err(lcdc_dev->dev, "invalid win number:%d\n", win_id);
+       return 0;
+}
+
+#define SET_WIN_ADDR(id) \
+static int set_win##id##_addr(struct lcdc_device *lcdc_dev, u32 addr) \
+{                                                      \
+       u32 msk, val;                                   \
+       spin_lock(&lcdc_dev->reg_lock);                 \
+       lcdc_writel(lcdc_dev, WIN##id##_YRGB_MST, addr);        \
+       msk =  m_WIN##id##_EN;                          \
+       val  =  v_WIN0_EN(1);                           \
+       lcdc_msk_reg(lcdc_dev, WIN##id##_CTRL0, msk, val);      \
+       lcdc_cfg_done(lcdc_dev);                        \
+       spin_unlock(&lcdc_dev->reg_lock);               \
+       return 0;                                       \
+}
+
+SET_WIN_ADDR(0);
+SET_WIN_ADDR(1);
+int rk3368_lcdc_direct_set_win_addr(struct rk_lcdc_driver *dev_drv,
+                                   int win_id, u32 addr)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       if (win_id == 0)
+               set_win0_addr(lcdc_dev, addr);
+       else
+               set_win1_addr(lcdc_dev, addr);
+
+       return 0;
+}
+
+static void lcdc_read_reg_defalut_cfg(struct lcdc_device *lcdc_dev)
+{
+       int reg = 0;
+       u32 val = 0;
+       struct rk_screen *screen = lcdc_dev->driver.cur_screen;
+       u32 h_pw_bp = screen->mode.hsync_len + screen->mode.left_margin;
+       u32 v_pw_bp = screen->mode.vsync_len + screen->mode.upper_margin;
+       u32 st_x, st_y;
+       struct rk_lcdc_win *win0 = lcdc_dev->driver.win[0];
+
+       spin_lock(&lcdc_dev->reg_lock);
+       for (reg = 0; reg < FRC_LOWER11_1; reg += 4) {
+               val = lcdc_readl(lcdc_dev, reg);
+               switch (reg) {
+               case WIN0_ACT_INFO:
+                       win0->area[0].xact = (val & m_WIN0_ACT_WIDTH) + 1;
+                       win0->area[0].yact =
+                           ((val & m_WIN0_ACT_HEIGHT) >> 16) + 1;
+                       break;
+               case WIN0_DSP_INFO:
+                       win0->area[0].xsize = (val & m_WIN0_DSP_WIDTH) + 1;
+                       win0->area[0].ysize =
+                           ((val & m_WIN0_DSP_HEIGHT) >> 16) + 1;
+                       break;
+               case WIN0_DSP_ST:
+                       st_x = val & m_WIN0_DSP_XST;
+                       st_y = (val & m_WIN0_DSP_YST) >> 16;
+                       win0->area[0].xpos = st_x - h_pw_bp;
+                       win0->area[0].ypos = st_y - v_pw_bp;
+                       break;
+               case WIN0_CTRL0:
+                       win0->state = val & m_WIN0_EN;
+                       win0->area[0].fmt_cfg = (val & m_WIN0_DATA_FMT) >> 1;
+                       win0->fmt_10 = (val & m_WIN0_FMT_10) >> 4;
+                       win0->area[0].format = win0->area[0].fmt_cfg;
+                       break;
+               case WIN0_VIR:
+                       win0->area[0].y_vir_stride = val & m_WIN0_VIR_STRIDE;
+                       win0->area[0].uv_vir_stride =
+                           (val & m_WIN0_VIR_STRIDE_UV) >> 16;
+                       if (win0->area[0].format == ARGB888)
+                               win0->area[0].xvir = win0->area[0].y_vir_stride;
+                       else if (win0->area[0].format == RGB888)
+                               win0->area[0].xvir =
+                                   win0->area[0].y_vir_stride * 4 / 3;
+                       else if (win0->area[0].format == RGB565)
+                               win0->area[0].xvir =
+                                   2 * win0->area[0].y_vir_stride;
+                       else    /* YUV */
+                               win0->area[0].xvir =
+                                   4 * win0->area[0].y_vir_stride;
+                       break;
+               case WIN0_YRGB_MST:
+                       win0->area[0].smem_start = val;
+                       break;
+               case WIN0_CBR_MST:
+                       win0->area[0].cbr_start = val;
+                       break;
+               default:
+                       break;
+               }
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+}
+
+/********do basic init*********/
+static int rk3368_lcdc_pre_init(struct rk_lcdc_driver *dev_drv)
+{
+       u32 mask, val;
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       if (lcdc_dev->pre_init)
+               return 0;
+
+       lcdc_dev->hclk = devm_clk_get(lcdc_dev->dev, "hclk_lcdc");
+       lcdc_dev->aclk = devm_clk_get(lcdc_dev->dev, "aclk_lcdc");
+       lcdc_dev->dclk = devm_clk_get(lcdc_dev->dev, "dclk_lcdc");
+       lcdc_dev->pll_sclk = devm_clk_get(lcdc_dev->dev, "sclk_pll");
+       lcdc_dev->pd = devm_clk_get(lcdc_dev->dev, "pd_lcdc");
+
+       if (IS_ERR(lcdc_dev->pd) || (IS_ERR(lcdc_dev->aclk)) ||
+           (IS_ERR(lcdc_dev->dclk)) || (IS_ERR(lcdc_dev->hclk))) {
+               dev_err(lcdc_dev->dev, "failed to get lcdc%d clk source\n",
+                       lcdc_dev->id);
+       }
+
+       rk_disp_pwr_enable(dev_drv);
+       rk3368_lcdc_clk_enable(lcdc_dev);
+
+       /*backup reg config at uboot */
+       lcdc_read_reg_defalut_cfg(lcdc_dev);
+       lcdc_writel(lcdc_dev, CABC_GAUSS_LINE0_0, 0x15110903);
+       lcdc_writel(lcdc_dev, CABC_GAUSS_LINE0_1, 0x00030911);
+       lcdc_writel(lcdc_dev, CABC_GAUSS_LINE1_0, 0x1a150b04);
+       lcdc_writel(lcdc_dev, CABC_GAUSS_LINE1_1, 0x00040b15);
+       lcdc_writel(lcdc_dev, CABC_GAUSS_LINE2_0, 0x15110903);
+       lcdc_writel(lcdc_dev, CABC_GAUSS_LINE2_1, 0x00030911);
+
+       lcdc_writel(lcdc_dev, FRC_LOWER01_0, 0x12844821);
+       lcdc_writel(lcdc_dev, FRC_LOWER01_1, 0x21488412);
+       lcdc_writel(lcdc_dev, FRC_LOWER10_0, 0xa55a9696);
+       lcdc_writel(lcdc_dev, FRC_LOWER10_1, 0x5aa56969);
+       lcdc_writel(lcdc_dev, FRC_LOWER11_0, 0xdeb77deb);
+       lcdc_writel(lcdc_dev, FRC_LOWER11_1, 0xed7bb7de);
+
+       mask = m_AUTO_GATING_EN;
+       val = v_AUTO_GATING_EN(0);
+       lcdc_cfg_done(lcdc_dev);
+       /*disable win0 to workaround iommu pagefault */
+       /*if (dev_drv->iommu_enabled) */
+       /*      win0_enable(lcdc_dev, 0); */
+       lcdc_dev->pre_init = true;
+
+       return 0;
+}
+
+static void rk3368_lcdc_deint(struct lcdc_device *lcdc_dev)
+{
+       rk3368_lcdc_disable_irq(lcdc_dev);
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               lcdc_dev->clk_on = 0;
+               lcdc_set_bit(lcdc_dev, SYS_CTRL, m_STANDBY_EN);
+               lcdc_cfg_done(lcdc_dev);
+               spin_unlock(&lcdc_dev->reg_lock);
+       } else {
+               spin_unlock(&lcdc_dev->reg_lock);
+       }
+       mdelay(1);
+}
+
+static int rk3368_lcdc_post_cfg(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_screen *screen = dev_drv->cur_screen;
+       u16 x_res = screen->mode.xres;
+       u16 y_res = screen->mode.yres;
+       u32 mask, val;
+       u16 h_total, v_total;
+       u16 post_hsd_en, post_vsd_en;
+       u16 post_dsp_hact_st, post_dsp_hact_end;
+       u16 post_dsp_vact_st, post_dsp_vact_end;
+       u16 post_dsp_vact_st_f1, post_dsp_vact_end_f1;
+       u16 post_h_fac, post_v_fac;
+
+       h_total = screen->mode.hsync_len + screen->mode.left_margin +
+           x_res + screen->mode.right_margin;
+       v_total = screen->mode.vsync_len + screen->mode.upper_margin +
+           y_res + screen->mode.lower_margin;
+
+       if (screen->post_dsp_stx + screen->post_xsize > x_res) {
+               dev_warn(lcdc_dev->dev, "post:stx[%d]+xsize[%d]>x_res[%d]\n",
+                        screen->post_dsp_stx, screen->post_xsize, x_res);
+               screen->post_dsp_stx = x_res - screen->post_xsize;
+       }
+       if (screen->x_mirror == 0) {
+               post_dsp_hact_st = screen->post_dsp_stx +
+                   screen->mode.hsync_len + screen->mode.left_margin;
+               post_dsp_hact_end = post_dsp_hact_st + screen->post_xsize;
+       } else {
+               post_dsp_hact_end = h_total - screen->mode.right_margin -
+                   screen->post_dsp_stx;
+               post_dsp_hact_st = post_dsp_hact_end - screen->post_xsize;
+       }
+       if ((screen->post_xsize < x_res) && (screen->post_xsize != 0)) {
+               post_hsd_en = 1;
+               post_h_fac =
+                   GET_SCALE_FACTOR_BILI_DN(x_res, screen->post_xsize);
+       } else {
+               post_hsd_en = 0;
+               post_h_fac = 0x1000;
+       }
+
+       if (screen->post_dsp_sty + screen->post_ysize > y_res) {
+               dev_warn(lcdc_dev->dev, "post:sty[%d]+ysize[%d]> y_res[%d]\n",
+                        screen->post_dsp_sty, screen->post_ysize, y_res);
+               screen->post_dsp_sty = y_res - screen->post_ysize;
+       }
+
+       if (screen->y_mirror == 0) {
+               post_dsp_vact_st = screen->post_dsp_sty +
+                   screen->mode.vsync_len + screen->mode.upper_margin;
+               post_dsp_vact_end = post_dsp_vact_st + screen->post_ysize;
+       } else {
+               post_dsp_vact_end = v_total - screen->mode.lower_margin -
+                   screen->post_dsp_sty;
+               post_dsp_vact_st = post_dsp_vact_end - screen->post_ysize;
+       }
+       if ((screen->post_ysize < y_res) && (screen->post_ysize != 0)) {
+               post_vsd_en = 1;
+               post_v_fac = GET_SCALE_FACTOR_BILI_DN(y_res,
+                                                     screen->post_ysize);
+       } else {
+               post_vsd_en = 0;
+               post_v_fac = 0x1000;
+       }
+
+       if (screen->interlace == 1) {
+               post_dsp_vact_st_f1 = v_total + post_dsp_vact_st;
+               post_dsp_vact_end_f1 = post_dsp_vact_st_f1 + screen->post_ysize;
+       } else {
+               post_dsp_vact_st_f1 = 0;
+               post_dsp_vact_end_f1 = 0;
+       }
+       DBG(1, "post:xsize=%d,ysize=%d,xpos=%d",
+           screen->post_xsize, screen->post_ysize, screen->xpos);
+       DBG(1, ",ypos=%d,hsd_en=%d,h_fac=%d,vsd_en=%d,v_fac=%d\n",
+           screen->ypos, post_hsd_en, post_h_fac, post_vsd_en, post_v_fac);
+       mask = m_DSP_HACT_END_POST | m_DSP_HACT_ST_POST;
+       val = v_DSP_HACT_END_POST(post_dsp_hact_end) |
+           v_DSP_HACT_ST_POST(post_dsp_hact_st);
+       lcdc_msk_reg(lcdc_dev, POST_DSP_HACT_INFO, mask, val);
+
+       mask = m_DSP_VACT_END_POST | m_DSP_VACT_ST_POST;
+       val = v_DSP_VACT_END_POST(post_dsp_vact_end) |
+           v_DSP_VACT_ST_POST(post_dsp_vact_st);
+       lcdc_msk_reg(lcdc_dev, POST_DSP_VACT_INFO, mask, val);
+
+       mask = m_POST_HS_FACTOR_YRGB | m_POST_VS_FACTOR_YRGB;
+       val = v_POST_HS_FACTOR_YRGB(post_h_fac) |
+           v_POST_VS_FACTOR_YRGB(post_v_fac);
+       lcdc_msk_reg(lcdc_dev, POST_SCL_FACTOR_YRGB, mask, val);
+
+       mask = m_DSP_VACT_END_POST_F1 | m_DSP_VACT_ST_POST_F1;
+       val = v_DSP_VACT_END_POST_F1(post_dsp_vact_end_f1) |
+           v_DSP_VACT_ST_POST_F1(post_dsp_vact_st_f1);
+       lcdc_msk_reg(lcdc_dev, POST_DSP_VACT_INFO_F1, mask, val);
+
+       mask = m_POST_HOR_SD_EN | m_POST_VER_SD_EN;
+       val = v_POST_HOR_SD_EN(post_hsd_en) | v_POST_VER_SD_EN(post_vsd_en);
+       lcdc_msk_reg(lcdc_dev, POST_SCL_CTRL, mask, val);
+       return 0;
+}
+
+static int rk3368_lcdc_clr_key_cfg(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_lcdc_win *win;
+       u32 colorkey_r, colorkey_g, colorkey_b;
+       int i, key_val;
+
+       for (i = 0; i < 4; i++) {
+               win = dev_drv->win[i];
+               key_val = win->color_key_val;
+               colorkey_r = (key_val & 0xff) << 2;
+               colorkey_g = ((key_val >> 8) & 0xff) << 12;
+               colorkey_b = ((key_val >> 16) & 0xff) << 22;
+               /*color key dither 565/888->aaa */
+               key_val = colorkey_r | colorkey_g | colorkey_b;
+               switch (i) {
+               case 0:
+                       lcdc_writel(lcdc_dev, WIN0_COLOR_KEY, key_val);
+                       break;
+               case 1:
+                       lcdc_writel(lcdc_dev, WIN1_COLOR_KEY, key_val);
+                       break;
+               case 2:
+                       lcdc_writel(lcdc_dev, WIN2_COLOR_KEY, key_val);
+                       break;
+               case 3:
+                       lcdc_writel(lcdc_dev, WIN3_COLOR_KEY, key_val);
+                       break;
+               default:
+                       pr_info("%s:un support win num:%d\n",
+                               __func__, i);
+                       break;
+               }
+       }
+       return 0;
+}
+
+static int rk3368_lcdc_alpha_cfg(struct rk_lcdc_driver *dev_drv, int win_id)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_lcdc_win *win = dev_drv->win[win_id];
+       struct alpha_config alpha_config;
+       u32 mask, val;
+       int ppixel_alpha = 0, global_alpha = 0, i;
+       u32 src_alpha_ctl, dst_alpha_ctl;
+
+       for (i = 0; i < win->area_num; i++) {
+               ppixel_alpha |= ((win->area[i].format == ARGB888) ||
+                                (win->area[i].format == ABGR888)) ? 1 : 0;
+       }
+       global_alpha = (win->g_alpha_val == 0) ? 0 : 1;
+       alpha_config.src_global_alpha_val = win->g_alpha_val;
+       win->alpha_mode = AB_SRC_OVER;
+       /*printk("%s,alpha_mode=%d,alpha_en=%d,ppixel_a=%d,gla_a=%d\n",
+          __func__,win->alpha_mode,win->alpha_en,ppixel_alpha,
+          global_alpha); */
+       switch (win->alpha_mode) {
+       case AB_USER_DEFINE:
+               break;
+       case AB_CLEAR:
+               alpha_config.src_factor_mode = AA_ZERO;
+               alpha_config.dst_factor_mode = AA_ZERO;
+               break;
+       case AB_SRC:
+               alpha_config.src_factor_mode = AA_ONE;
+               alpha_config.dst_factor_mode = AA_ZERO;
+               break;
+       case AB_DST:
+               alpha_config.src_factor_mode = AA_ZERO;
+               alpha_config.dst_factor_mode = AA_ONE;
+               break;
+       case AB_SRC_OVER:
+               alpha_config.src_color_mode = AA_SRC_PRE_MUL;
+               if (global_alpha)
+                       alpha_config.src_factor_mode = AA_SRC_GLOBAL;
+               else
+                       alpha_config.src_factor_mode = AA_ONE;
+               alpha_config.dst_factor_mode = AA_SRC_INVERSE;
+               break;
+       case AB_DST_OVER:
+               alpha_config.src_color_mode = AA_SRC_PRE_MUL;
+               alpha_config.src_factor_mode = AA_SRC_INVERSE;
+               alpha_config.dst_factor_mode = AA_ONE;
+               break;
+       case AB_SRC_IN:
+               alpha_config.src_color_mode = AA_SRC_PRE_MUL;
+               alpha_config.src_factor_mode = AA_SRC;
+               alpha_config.dst_factor_mode = AA_ZERO;
+               break;
+       case AB_DST_IN:
+               alpha_config.src_factor_mode = AA_ZERO;
+               alpha_config.dst_factor_mode = AA_SRC;
+               break;
+       case AB_SRC_OUT:
+               alpha_config.src_color_mode = AA_SRC_PRE_MUL;
+               alpha_config.src_factor_mode = AA_SRC_INVERSE;
+               alpha_config.dst_factor_mode = AA_ZERO;
+               break;
+       case AB_DST_OUT:
+               alpha_config.src_factor_mode = AA_ZERO;
+               alpha_config.dst_factor_mode = AA_SRC_INVERSE;
+               break;
+       case AB_SRC_ATOP:
+               alpha_config.src_color_mode = AA_SRC_PRE_MUL;
+               alpha_config.src_factor_mode = AA_SRC;
+               alpha_config.dst_factor_mode = AA_SRC_INVERSE;
+               break;
+       case AB_DST_ATOP:
+               alpha_config.src_color_mode = AA_SRC_PRE_MUL;
+               alpha_config.src_factor_mode = AA_SRC_INVERSE;
+               alpha_config.dst_factor_mode = AA_SRC;
+               break;
+       case XOR:
+               alpha_config.src_color_mode = AA_SRC_PRE_MUL;
+               alpha_config.src_factor_mode = AA_SRC_INVERSE;
+               alpha_config.dst_factor_mode = AA_SRC_INVERSE;
+               break;
+       case AB_SRC_OVER_GLOBAL:
+               alpha_config.src_global_alpha_mode = AA_PER_PIX_GLOBAL;
+               alpha_config.src_color_mode = AA_SRC_NO_PRE_MUL;
+               alpha_config.src_factor_mode = AA_SRC_GLOBAL;
+               alpha_config.dst_factor_mode = AA_SRC_INVERSE;
+               break;
+       default:
+               pr_err("alpha mode error\n");
+               break;
+       }
+       if ((ppixel_alpha == 1) && (global_alpha == 1))
+               alpha_config.src_global_alpha_mode = AA_PER_PIX_GLOBAL;
+       else if (ppixel_alpha == 1)
+               alpha_config.src_global_alpha_mode = AA_PER_PIX;
+       else if (global_alpha == 1)
+               alpha_config.src_global_alpha_mode = AA_GLOBAL;
+       else
+               dev_warn(lcdc_dev->dev, "alpha_en should be 0\n");
+       alpha_config.src_alpha_mode = AA_STRAIGHT;
+       alpha_config.src_alpha_cal_m0 = AA_NO_SAT;
+
+       switch (win_id) {
+       case 0:
+               src_alpha_ctl = 0x60;
+               dst_alpha_ctl = 0x64;
+               break;
+       case 1:
+               src_alpha_ctl = 0xa0;
+               dst_alpha_ctl = 0xa4;
+               break;
+       case 2:
+               src_alpha_ctl = 0xdc;
+               dst_alpha_ctl = 0xec;
+               break;
+       case 3:
+               src_alpha_ctl = 0x12c;
+               dst_alpha_ctl = 0x13c;
+               break;
+       case 4:
+               src_alpha_ctl = 0x160;
+               dst_alpha_ctl = 0x164;
+               break;
+       }
+       mask = m_WIN0_DST_FACTOR_M0;
+       val = v_WIN0_DST_FACTOR_M0(alpha_config.dst_factor_mode);
+       lcdc_msk_reg(lcdc_dev, dst_alpha_ctl, mask, val);
+       mask = m_WIN0_SRC_ALPHA_EN | m_WIN0_SRC_COLOR_M0 |
+           m_WIN0_SRC_ALPHA_M0 | m_WIN0_SRC_BLEND_M0 |
+           m_WIN0_SRC_ALPHA_CAL_M0 | m_WIN0_SRC_FACTOR_M0 |
+           m_WIN0_SRC_GLOBAL_ALPHA;
+       val = v_WIN0_SRC_ALPHA_EN(1) |
+           v_WIN0_SRC_COLOR_M0(alpha_config.src_color_mode) |
+           v_WIN0_SRC_ALPHA_M0(alpha_config.src_alpha_mode) |
+           v_WIN0_SRC_BLEND_M0(alpha_config.src_global_alpha_mode) |
+           v_WIN0_SRC_ALPHA_CAL_M0(alpha_config.src_alpha_cal_m0) |
+           v_WIN0_SRC_FACTOR_M0(alpha_config.src_factor_mode) |
+           v_WIN0_SRC_GLOBAL_ALPHA(alpha_config.src_global_alpha_val);
+       lcdc_msk_reg(lcdc_dev, src_alpha_ctl, mask, val);
+
+       return 0;
+}
+
+static int rk3368_lcdc_area_xst(struct rk_lcdc_win *win, int area_num)
+{
+       struct rk_lcdc_win_area area_temp;
+       int i, j;
+
+       for (i = 0; i < area_num; i++) {
+               for (j = i + 1; j < area_num; j++) {
+                       if (win->area[i].dsp_stx >  win->area[j].dsp_stx) {
+                               memcpy(&area_temp, &win->area[i],
+                                      sizeof(struct rk_lcdc_win_area));
+                               memcpy(&win->area[i], &win->area[j],
+                                      sizeof(struct rk_lcdc_win_area));
+                               memcpy(&win->area[j], &area_temp,
+                                      sizeof(struct rk_lcdc_win_area));
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int rk3368_lcdc_area_swap(struct rk_lcdc_win *win, int area_num)
+{
+       struct rk_lcdc_win_area area_temp;
+
+       switch (area_num) {
+       case 2:
+               area_temp = win->area[0];
+               win->area[0] = win->area[1];
+               win->area[1] = area_temp;
+               break;
+       case 3:
+               area_temp = win->area[0];
+               win->area[0] = win->area[2];
+               win->area[2] = area_temp;
+               break;
+       case 4:
+               area_temp = win->area[0];
+               win->area[0] = win->area[3];
+               win->area[3] = area_temp;
+
+               area_temp = win->area[1];
+               win->area[1] = win->area[2];
+               win->area[2] = area_temp;
+               break;
+       default:
+               pr_info("un supported area num!\n");
+               break;
+       }
+       return 0;
+}
+
+static int rk3368_win_area_check_var(int win_id, int area_num,
+                                    struct rk_lcdc_win_area *area_pre,
+                                    struct rk_lcdc_win_area *area_now)
+{
+       if ((area_pre->xpos > area_now->xpos) ||
+           ((area_pre->xpos + area_pre->xsize > area_now->xpos) &&
+            (area_pre->ypos + area_pre->ysize > area_now->ypos))) {
+               area_now->state = 0;
+               pr_err("win[%d]:\n"
+                      "area_pre[%d]:xpos[%d],xsize[%d],ypos[%d],ysize[%d]\n"
+                      "area_now[%d]:xpos[%d],xsize[%d],ypos[%d],ysize[%d]\n",
+                      win_id,
+                      area_num - 1, area_pre->xpos, area_pre->xsize,
+                      area_pre->ypos, area_pre->ysize,
+                      area_num, area_now->xpos, area_now->xsize,
+                      area_now->ypos, area_now->ysize);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int __maybe_unused rk3368_get_fbdc_idle(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       u32 val, i;
+
+       for (i = 0; i < 100; i++) {
+               val = lcdc_readl(lcdc_dev, IFBDC_DEBUG0);
+               val &= m_DBG_IFBDC_IDLE;
+               if (val)
+                       continue;
+               else
+                       mdelay(10);
+       };
+       return val;
+}
+
+static int rk3368_fbdc_reg_update(struct rk_lcdc_driver *dev_drv, int win_id)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_lcdc_win *win = dev_drv->win[win_id];
+       u32 mask, val;
+
+       mask = m_IFBDC_CTRL_FBDC_EN | m_IFBDC_CTRL_FBDC_COR_EN |
+           m_IFBDC_CTRL_FBDC_WIN_SEL | m_IFBDC_CTRL_FBDC_ROTATION_MODE |
+           m_IFBDC_CTRL_FBDC_FMT | m_IFBDC_CTRL_WIDTH_RATIO;
+       val = v_IFBDC_CTRL_FBDC_EN(win->area[0].fbdc_en) |
+           v_IFBDC_CTRL_FBDC_COR_EN(win->area[0].fbdc_cor_en) |
+           v_IFBDC_CTRL_FBDC_WIN_SEL(win->id) |
+           v_IFBDC_CTRL_FBDC_ROTATION_MODE(win->mirror_en << 1) |
+           v_IFBDC_CTRL_FBDC_FMT(win->area[0].fbdc_fmt_cfg) |
+           v_IFBDC_CTRL_WIDTH_RATIO(win->area[0].fbdc_dsp_width_ratio);
+       lcdc_msk_reg(lcdc_dev, IFBDC_CTRL, mask, val);
+
+       mask = m_IFBDC_TILES_NUM;
+       val = v_IFBDC_TILES_NUM(win->area[0].fbdc_num_tiles);
+       lcdc_msk_reg(lcdc_dev, IFBDC_TILES_NUM, mask, val);
+
+       mask = m_IFBDC_BASE_ADDR;
+       val = v_IFBDC_BASE_ADDR(win->area[0].y_addr);
+       lcdc_msk_reg(lcdc_dev, IFBDC_BASE_ADDR, mask, val);
+
+       mask = m_IFBDC_MB_SIZE_WIDTH | m_IFBDC_MB_SIZE_HEIGHT;
+       val = v_IFBDC_MB_SIZE_WIDTH(win->area[0].fbdc_mb_width) |
+           v_IFBDC_MB_SIZE_HEIGHT(win->area[0].fbdc_mb_height);
+       lcdc_msk_reg(lcdc_dev, IFBDC_MB_SIZE, mask, val);
+
+       mask = m_IFBDC_CMP_INDEX_INIT;
+       val = v_IFBDC_CMP_INDEX_INIT(win->area[0].fbdc_cmp_index_init);
+       lcdc_msk_reg(lcdc_dev, IFBDC_CMP_INDEX_INIT, mask, val);
+
+       mask = m_IFBDC_MB_VIR_WIDTH;
+       val = v_IFBDC_MB_VIR_WIDTH(win->area[0].fbdc_mb_vir_width);
+       lcdc_msk_reg(lcdc_dev, IFBDC_MB_VIR_WIDTH, mask, val);
+
+       return 0;
+}
+
+static int rk3368_init_fbdc_config(struct rk_lcdc_driver *dev_drv, int win_id)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_lcdc_win *win = dev_drv->win[win_id];
+       u8 fbdc_dsp_width_ratio;
+       u16 fbdc_mb_vir_width, fbdc_mb_vir_height;
+       u16 fbdc_mb_width, fbdc_mb_height;
+       u16 fbdc_mb_xst, fbdc_mb_yst, fbdc_num_tiles;
+       u16 fbdc_cmp_index_init;
+       u8 mb_w_size, mb_h_size;
+       struct rk_screen *screen = dev_drv->cur_screen;
+
+       if (screen->mode.flag == FB_VMODE_INTERLACED) {
+               dev_err(lcdc_dev->dev, "unsupport fbdc+interlace!\n");
+               return 0;
+       }
+
+       switch (win->area[0].fmt_cfg) {
+       case VOP_FORMAT_ARGB888:
+               fbdc_dsp_width_ratio = 0;
+               mb_w_size = 16;
+               break;
+       case VOP_FORMAT_RGB888:
+               fbdc_dsp_width_ratio = 0;
+               mb_w_size = 16;
+               break;
+       case VOP_FORMAT_RGB565:
+               mb_w_size = 32;
+               break;
+       default:
+               dev_err(lcdc_dev->dev,
+                       "in fbdc mode,unsupport fmt:%d!\n",
+                       win->area[0].fmt_cfg);
+               break;
+       }
+       mb_h_size = 4;
+
+       /*macro block xvir and yvir */
+       if ((win->area[0].xvir % mb_w_size == 0) &&
+           (win->area[0].yvir % mb_h_size == 0)) {
+               fbdc_mb_vir_width = win->area[0].xvir / mb_w_size;
+               fbdc_mb_vir_height = win->area[0].yvir / mb_h_size;
+       } else {
+               pr_err("fbdc fmt[%d]:", win->area[0].fmt_cfg);
+               pr_err("xvir[%d]/yvir[%d] should %d/%d pix align!\n",
+                      win->area[0].xvir, win->area[0].yvir,
+                      mb_w_size, mb_h_size);
+       }
+       /*macro block xact and yact */
+       if ((win->area[0].xact % mb_w_size == 0) &&
+           (win->area[0].yact % mb_h_size == 0)) {
+               fbdc_mb_width = win->area[0].xact / mb_w_size;
+               fbdc_mb_height = win->area[0].yact / mb_h_size;
+       } else {
+               pr_err("fbdc fmt[%d]:", win->area[0].fmt_cfg);
+               pr_err("xact[%d]/yact[%d] should %d/%d pix align!\n",
+                      win->area[0].xact, win->area[0].yact,
+                      mb_w_size, mb_h_size);
+       }
+       /*macro block xoff and yoff */
+       if ((win->area[0].xoff % mb_w_size == 0) &&
+           (win->area[0].yoff % mb_h_size == 0)) {
+               fbdc_mb_xst = win->area[0].xoff / mb_w_size;
+               fbdc_mb_yst = win->area[0].yoff / mb_h_size;
+       } else {
+               pr_err("fbdc fmt[%d]:", win->area[0].fmt_cfg);
+               pr_err("xoff[%d]/yoff[%d] should %d/%d pix align!\n",
+                      win->area[0].xoff, win->area[0].yoff,
+                      mb_w_size, mb_h_size);
+       }
+
+       /*FBDC tiles */
+       fbdc_num_tiles = fbdc_mb_vir_width * fbdc_mb_vir_height;
+
+       /*
+          switch (fbdc_rotation_mode)  {
+          case FBDC_ROT_NONE:
+          fbdc_cmp_index_init =
+          (fbdc_mb_yst*fbdc_mb_vir_width) +  fbdc_mb_xst;
+          break;
+          case FBDC_X_MIRROR:
+          fbdc_cmp_index_init =
+          (fbdc_mb_yst*fbdc_mb_vir_width) + (fbdc_mb_xst+
+          (fbdc_mb_width-1));
+          break;
+          case FBDC_Y_MIRROR:
+          fbdc_cmp_index_init =
+          ((fbdc_mb_yst+(fbdc_mb_height-1))*fbdc_mb_vir_width)  +
+          fbdc_mb_xst;
+          break;
+          case FBDC_ROT_180:
+          fbdc_cmp_index_init =
+          ((fbdc_mb_yst+(fbdc_mb_height-1))*fbdc_mb_vir_width) +
+          (fbdc_mb_xst+(fbdc_mb_width-1));
+          break;
+          }
+        */
+       if ((win->mirror_en) && ((win_id == 2) || (win_id == 3))) {
+               fbdc_cmp_index_init =
+                   ((fbdc_mb_yst + (fbdc_mb_height - 1)) * fbdc_mb_vir_width) +
+                   (fbdc_mb_xst + (fbdc_mb_width - 1));
+       } else {
+               fbdc_cmp_index_init =
+                   (fbdc_mb_yst * fbdc_mb_vir_width) + fbdc_mb_xst;
+       }
+       /*fbdc fmt maybe need to change*/
+       win->area[0].fbdc_fmt_cfg = win->area[0].fbdc_data_format;
+       win->area[0].fbdc_dsp_width_ratio = fbdc_dsp_width_ratio;
+       win->area[0].fbdc_mb_vir_width = fbdc_mb_vir_width;
+       win->area[0].fbdc_mb_vir_height = fbdc_mb_vir_height;
+       win->area[0].fbdc_mb_width = fbdc_mb_width;
+       win->area[0].fbdc_mb_height = fbdc_mb_height;
+       win->area[0].fbdc_mb_xst = fbdc_mb_xst;
+       win->area[0].fbdc_mb_yst = fbdc_mb_yst;
+       win->area[0].fbdc_num_tiles = fbdc_num_tiles;
+       win->area[0].fbdc_cmp_index_init = fbdc_cmp_index_init;
+
+       return 0;
+}
+
+static void rk3368_lcdc_csc_mode(struct lcdc_device *lcdc_dev,
+                                struct rk_lcdc_win *win)
+{
+       struct rk_lcdc_driver *dev_drv = &lcdc_dev->driver;
+       struct rk_screen *screen = dev_drv->cur_screen;
+
+       if (dev_drv->overlay_mode == VOP_YUV_DOMAIN) {
+               switch (win->area[0].fmt_cfg) {
+               case VOP_FORMAT_ARGB888:
+               case VOP_FORMAT_RGB888:
+               case VOP_FORMAT_RGB565:
+                       if ((screen->mode.xres < 1280) &&
+                           (screen->mode.yres < 720)) {
+                               win->csc_mode = VOP_R2Y_CSC_BT601;
+                       } else {
+                               win->csc_mode = VOP_R2Y_CSC_BT709;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       } else if (dev_drv->overlay_mode == VOP_RGB_DOMAIN) {
+               switch (win->area[0].fmt_cfg) {
+               case VOP_FORMAT_YCBCR420:
+                       if ((win->id == 0) || (win->id == 1))
+                               win->csc_mode = VOP_Y2R_CSC_MPEG;
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+static int rk3368_win_0_1_reg_update(struct rk_lcdc_driver *dev_drv, int win_id)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_lcdc_win *win = dev_drv->win[win_id];
+       unsigned int mask, val, off;
+
+       off = win_id * 0x40;
+       /*if(win->win_lb_mode == 5)
+          win->win_lb_mode = 4;
+          for rk3288 to fix hw bug? */
+
+       if (win->state == 1) {
+               rk3368_lcdc_csc_mode(lcdc_dev, win);
+               if (win->area[0].fbdc_en)
+                       rk3368_fbdc_reg_update(&lcdc_dev->driver, win_id);
+               mask = m_WIN0_EN | m_WIN0_DATA_FMT | m_WIN0_FMT_10 |
+                   m_WIN0_LB_MODE | m_WIN0_RB_SWAP | m_WIN0_X_MIRROR |
+                   m_WIN0_Y_MIRROR | m_WIN0_CSC_MODE;
+               val = v_WIN0_EN(win->state) |
+                   v_WIN0_DATA_FMT(win->area[0].fmt_cfg) |
+                   v_WIN0_FMT_10(win->fmt_10) |
+                   v_WIN0_LB_MODE(win->win_lb_mode) |
+                   v_WIN0_RB_SWAP(win->area[0].swap_rb) |
+                   v_WIN0_X_MIRROR(win->mirror_en) |
+                   v_WIN0_Y_MIRROR(win->mirror_en) |
+                   v_WIN0_CSC_MODE(win->csc_mode);
+               lcdc_msk_reg(lcdc_dev, WIN0_CTRL0 + off, mask, val);
+
+               mask = m_WIN0_BIC_COE_SEL |
+                   m_WIN0_VSD_YRGB_GT4 | m_WIN0_VSD_YRGB_GT2 |
+                   m_WIN0_VSD_CBR_GT4 | m_WIN0_VSD_CBR_GT2 |
+                   m_WIN0_YRGB_HOR_SCL_MODE | m_WIN0_YRGB_VER_SCL_MODE |
+                   m_WIN0_YRGB_HSD_MODE | m_WIN0_YRGB_VSU_MODE |
+                   m_WIN0_YRGB_VSD_MODE | m_WIN0_CBR_HOR_SCL_MODE |
+                   m_WIN0_CBR_VER_SCL_MODE | m_WIN0_CBR_HSD_MODE |
+                   m_WIN0_CBR_VSU_MODE | m_WIN0_CBR_VSD_MODE;
+               val = v_WIN0_BIC_COE_SEL(win->bic_coe_el) |
+                   v_WIN0_VSD_YRGB_GT4(win->vsd_yrgb_gt4) |
+                   v_WIN0_VSD_YRGB_GT2(win->vsd_yrgb_gt2) |
+                   v_WIN0_VSD_CBR_GT4(win->vsd_cbr_gt4) |
+                   v_WIN0_VSD_CBR_GT2(win->vsd_cbr_gt2) |
+                   v_WIN0_YRGB_HOR_SCL_MODE(win->yrgb_hor_scl_mode) |
+                   v_WIN0_YRGB_VER_SCL_MODE(win->yrgb_ver_scl_mode) |
+                   v_WIN0_YRGB_HSD_MODE(win->yrgb_hsd_mode) |
+                   v_WIN0_YRGB_VSU_MODE(win->yrgb_vsu_mode) |
+                   v_WIN0_YRGB_VSD_MODE(win->yrgb_vsd_mode) |
+                   v_WIN0_CBR_HOR_SCL_MODE(win->cbr_hor_scl_mode) |
+                   v_WIN0_CBR_VER_SCL_MODE(win->cbr_ver_scl_mode) |
+                   v_WIN0_CBR_HSD_MODE(win->cbr_hsd_mode) |
+                   v_WIN0_CBR_VSU_MODE(win->cbr_vsu_mode) |
+                   v_WIN0_CBR_VSD_MODE(win->cbr_vsd_mode);
+               lcdc_msk_reg(lcdc_dev, WIN0_CTRL1 + off, mask, val);
+               val = v_WIN0_VIR_STRIDE(win->area[0].y_vir_stride) |
+                   v_WIN0_VIR_STRIDE_UV(win->area[0].uv_vir_stride);
+               lcdc_writel(lcdc_dev, WIN0_VIR + off, val);
+               /*lcdc_writel(lcdc_dev, WIN0_YRGB_MST+off,
+                               win->area[0].y_addr);
+                  lcdc_writel(lcdc_dev, WIN0_CBR_MST+off,
+                               win->area[0].uv_addr); */
+               val = v_WIN0_ACT_WIDTH(win->area[0].xact) |
+                   v_WIN0_ACT_HEIGHT(win->area[0].yact);
+               lcdc_writel(lcdc_dev, WIN0_ACT_INFO + off, val);
+
+               val = v_WIN0_DSP_WIDTH(win->area[0].xsize) |
+                   v_WIN0_DSP_HEIGHT(win->area[0].ysize);
+               lcdc_writel(lcdc_dev, WIN0_DSP_INFO + off, val);
+
+               val = v_WIN0_DSP_XST(win->area[0].dsp_stx) |
+                   v_WIN0_DSP_YST(win->area[0].dsp_sty);
+               lcdc_writel(lcdc_dev, WIN0_DSP_ST + off, val);
+
+               val = v_WIN0_HS_FACTOR_YRGB(win->scale_yrgb_x) |
+                   v_WIN0_VS_FACTOR_YRGB(win->scale_yrgb_y);
+               lcdc_writel(lcdc_dev, WIN0_SCL_FACTOR_YRGB + off, val);
+
+               val = v_WIN0_HS_FACTOR_CBR(win->scale_cbcr_x) |
+                   v_WIN0_VS_FACTOR_CBR(win->scale_cbcr_y);
+               lcdc_writel(lcdc_dev, WIN0_SCL_FACTOR_CBR + off, val);
+               if (win->alpha_en == 1) {
+                       rk3368_lcdc_alpha_cfg(dev_drv, win_id);
+               } else {
+                       mask = m_WIN0_SRC_ALPHA_EN;
+                       val = v_WIN0_SRC_ALPHA_EN(0);
+                       lcdc_msk_reg(lcdc_dev, WIN0_SRC_ALPHA_CTRL + off,
+                                    mask, val);
+               }
+       } else {
+               mask = m_WIN0_EN;
+               val = v_WIN0_EN(win->state);
+               lcdc_msk_reg(lcdc_dev, WIN0_CTRL0 + off, mask, val);
+       }
+       return 0;
+}
+
+static int rk3368_win_2_3_reg_update(struct rk_lcdc_driver *dev_drv, int win_id)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_lcdc_win *win = dev_drv->win[win_id];
+       struct rk_screen *screen = dev_drv->cur_screen;
+       unsigned int mask, val, off;
+
+       off = (win_id - 2) * 0x50;
+       rk3368_lcdc_area_xst(win, win->area_num);
+       if (((screen->y_mirror == 1) || (win->mirror_en)) &&
+           (win->area_num > 1)) {
+               rk3368_lcdc_area_swap(win, win->area_num);
+       }
+
+       if (win->state == 1) {
+               rk3368_lcdc_csc_mode(lcdc_dev, win);
+               if (win->area[0].fbdc_en)
+                       rk3368_fbdc_reg_update(&lcdc_dev->driver, win_id);
+
+               mask = m_WIN2_EN | m_WIN2_CSC_MODE;
+               val = v_WIN2_EN(1) | v_WIN1_CSC_MODE(win->csc_mode);
+               lcdc_msk_reg(lcdc_dev, WIN2_CTRL0 + off, mask, val);
+               /*area 0 */
+               if (win->area[0].state == 1) {
+                       mask = m_WIN2_MST0_EN | m_WIN2_DATA_FMT0 |
+                           m_WIN2_RB_SWAP0;
+                       val = v_WIN2_MST0_EN(win->area[0].state) |
+                           v_WIN2_DATA_FMT0(win->area[0].fmt_cfg) |
+                           v_WIN2_RB_SWAP0(win->area[0].swap_rb);
+                       lcdc_msk_reg(lcdc_dev, WIN2_CTRL0 + off, mask, val);
+
+                       mask = m_WIN2_VIR_STRIDE0;
+                       val = v_WIN2_VIR_STRIDE0(win->area[0].y_vir_stride);
+                       lcdc_msk_reg(lcdc_dev, WIN2_VIR0_1 + off, mask, val);
+
+                       /*lcdc_writel(lcdc_dev,WIN2_MST0+off,
+                          win->area[0].y_addr); */
+                       val = v_WIN2_DSP_WIDTH0(win->area[0].xsize) |
+                           v_WIN2_DSP_HEIGHT0(win->area[0].ysize);
+                       lcdc_writel(lcdc_dev, WIN2_DSP_INFO0 + off, val);
+                       val = v_WIN2_DSP_XST0(win->area[0].dsp_stx) |
+                           v_WIN2_DSP_YST0(win->area[0].dsp_sty);
+                       lcdc_writel(lcdc_dev, WIN2_DSP_ST0 + off, val);
+               } else {
+                       mask = m_WIN2_MST0_EN;
+                       val = v_WIN2_MST0_EN(0);
+                       lcdc_msk_reg(lcdc_dev, WIN2_CTRL0 + off, mask, val);
+               }
+               /*area 1 */
+               if (win->area[1].state == 1) {
+                       rk3368_win_area_check_var(win_id, 1,
+                                                 &win->area[0], &win->area[1]);
+
+                       mask = m_WIN2_MST1_EN | m_WIN2_DATA_FMT1 |
+                           m_WIN2_RB_SWAP1;
+                       val = v_WIN2_MST1_EN(win->area[1].state) |
+                           v_WIN2_DATA_FMT0(win->area[1].fmt_cfg) |
+                           v_WIN2_RB_SWAP0(win->area[1].swap_rb);
+                       lcdc_msk_reg(lcdc_dev, WIN2_CTRL0 + off, mask, val);
+
+                       mask = m_WIN2_VIR_STRIDE1;
+                       val = v_WIN2_VIR_STRIDE1(win->area[1].y_vir_stride);
+                       lcdc_msk_reg(lcdc_dev, WIN2_VIR0_1 + off, mask, val);
+
+                       /*lcdc_writel(lcdc_dev,WIN2_MST1+off,
+                          win->area[1].y_addr); */
+                       val = v_WIN2_DSP_WIDTH1(win->area[1].xsize) |
+                           v_WIN2_DSP_HEIGHT1(win->area[1].ysize);
+                       lcdc_writel(lcdc_dev, WIN2_DSP_INFO1 + off, val);
+                       val = v_WIN2_DSP_XST1(win->area[1].dsp_stx) |
+                           v_WIN2_DSP_YST1(win->area[1].dsp_sty);
+                       lcdc_writel(lcdc_dev, WIN2_DSP_ST1 + off, val);
+               } else {
+                       mask = m_WIN2_MST1_EN;
+                       val = v_WIN2_MST1_EN(0);
+                       lcdc_msk_reg(lcdc_dev, WIN2_CTRL0 + off, mask, val);
+               }
+               /*area 2 */
+               if (win->area[2].state == 1) {
+                       rk3368_win_area_check_var(win_id, 2,
+                                                 &win->area[1], &win->area[2]);
+
+                       mask = m_WIN2_MST2_EN | m_WIN2_DATA_FMT2 |
+                           m_WIN2_RB_SWAP2;
+                       val = v_WIN2_MST2_EN(win->area[2].state) |
+                           v_WIN2_DATA_FMT0(win->area[2].fmt_cfg) |
+                           v_WIN2_RB_SWAP0(win->area[2].swap_rb);
+                       lcdc_msk_reg(lcdc_dev, WIN2_CTRL0 + off, mask, val);
+
+                       mask = m_WIN2_VIR_STRIDE2;
+                       val = v_WIN2_VIR_STRIDE2(win->area[2].y_vir_stride);
+                       lcdc_msk_reg(lcdc_dev, WIN2_VIR2_3 + off, mask, val);
+
+                       /*lcdc_writel(lcdc_dev,WIN2_MST2+off,
+                          win->area[2].y_addr); */
+                       val = v_WIN2_DSP_WIDTH2(win->area[2].xsize) |
+                           v_WIN2_DSP_HEIGHT2(win->area[2].ysize);
+                       lcdc_writel(lcdc_dev, WIN2_DSP_INFO2 + off, val);
+                       val = v_WIN2_DSP_XST2(win->area[2].dsp_stx) |
+                           v_WIN2_DSP_YST2(win->area[2].dsp_sty);
+                       lcdc_writel(lcdc_dev, WIN2_DSP_ST2 + off, val);
+               } else {
+                       mask = m_WIN2_MST2_EN;
+                       val = v_WIN2_MST2_EN(0);
+                       lcdc_msk_reg(lcdc_dev, WIN2_CTRL0 + off, mask, val);
+               }
+               /*area 3 */
+               if (win->area[3].state == 1) {
+                       rk3368_win_area_check_var(win_id, 3,
+                                                 &win->area[2], &win->area[3]);
+
+                       mask = m_WIN2_MST3_EN | m_WIN2_DATA_FMT3 |
+                           m_WIN2_RB_SWAP3;
+                       val = v_WIN2_MST3_EN(win->area[3].state) |
+                           v_WIN2_DATA_FMT0(win->area[3].fmt_cfg) |
+                           v_WIN2_RB_SWAP0(win->area[3].swap_rb);
+                       lcdc_msk_reg(lcdc_dev, WIN2_CTRL0 + off, mask, val);
+
+                       mask = m_WIN2_VIR_STRIDE3;
+                       val = v_WIN2_VIR_STRIDE3(win->area[3].y_vir_stride);
+                       lcdc_msk_reg(lcdc_dev, WIN2_VIR2_3 + off, mask, val);
+
+                       /*lcdc_writel(lcdc_dev,WIN2_MST3+off,
+                          win->area[3].y_addr); */
+                       val = v_WIN2_DSP_WIDTH3(win->area[3].xsize) |
+                           v_WIN2_DSP_HEIGHT3(win->area[3].ysize);
+                       lcdc_writel(lcdc_dev, WIN2_DSP_INFO3 + off, val);
+                       val = v_WIN2_DSP_XST3(win->area[3].dsp_stx) |
+                           v_WIN2_DSP_YST3(win->area[3].dsp_sty);
+                       lcdc_writel(lcdc_dev, WIN2_DSP_ST3 + off, val);
+               } else {
+                       mask = m_WIN2_MST3_EN;
+                       val = v_WIN2_MST3_EN(0);
+                       lcdc_msk_reg(lcdc_dev, WIN2_CTRL0 + off, mask, val);
+               }
+
+               if (win->alpha_en == 1) {
+                       rk3368_lcdc_alpha_cfg(dev_drv, win_id);
+               } else {
+                       mask = m_WIN2_SRC_ALPHA_EN;
+                       val = v_WIN2_SRC_ALPHA_EN(0);
+                       lcdc_msk_reg(lcdc_dev, WIN2_SRC_ALPHA_CTRL + off,
+                                    mask, val);
+               }
+       } else {
+               mask = m_WIN2_EN | m_WIN2_MST0_EN |
+                   m_WIN2_MST0_EN | m_WIN2_MST2_EN | m_WIN2_MST3_EN;
+               val = v_WIN2_EN(win->state) | v_WIN2_MST0_EN(0) |
+                   v_WIN2_MST1_EN(0) | v_WIN2_MST2_EN(0) | v_WIN2_MST3_EN(0);
+               lcdc_msk_reg(lcdc_dev, WIN2_CTRL0 + off, mask, val);
+       }
+       return 0;
+}
+
+static int rk3368_hwc_reg_update(struct rk_lcdc_driver *dev_drv, int win_id)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_lcdc_win *win = dev_drv->win[win_id];
+       unsigned int mask, val, hwc_size = 0;
+
+       if (win->state == 1) {
+               rk3368_lcdc_csc_mode(lcdc_dev, win);
+               mask = m_HWC_EN | m_HWC_DATA_FMT |
+                   m_HWC_RB_SWAP | m_WIN0_CSC_MODE;
+               val = v_HWC_EN(1) | v_HWC_DATA_FMT(win->area[0].fmt_cfg) |
+                   v_HWC_RB_SWAP(win->area[0].swap_rb) |
+                   v_WIN0_CSC_MODE(win->csc_mode);
+               lcdc_msk_reg(lcdc_dev, HWC_CTRL0, mask, val);
+
+               if ((win->area[0].xsize == 32) && (win->area[0].ysize == 32))
+                       hwc_size = 0;
+               else if ((win->area[0].xsize == 64) &&
+                        (win->area[0].ysize == 64))
+                       hwc_size = 1;
+               else if ((win->area[0].xsize == 96) &&
+                        (win->area[0].ysize == 96))
+                       hwc_size = 2;
+               else if ((win->area[0].xsize == 128) &&
+                        (win->area[0].ysize == 128))
+                       hwc_size = 3;
+               else
+                       dev_err(lcdc_dev->dev, "un supported hwc size!\n");
+
+               mask = m_HWC_SIZE;
+               val = v_HWC_SIZE(hwc_size);
+               lcdc_msk_reg(lcdc_dev, HWC_CTRL0, mask, val);
+
+               mask = m_HWC_DSP_XST | m_HWC_DSP_YST;
+               val = v_HWC_DSP_XST(win->area[0].dsp_stx) |
+                   v_HWC_DSP_YST(win->area[0].dsp_sty);
+               lcdc_msk_reg(lcdc_dev, HWC_DSP_ST, mask, val);
+
+               if (win->alpha_en == 1) {
+                       rk3368_lcdc_alpha_cfg(dev_drv, win_id);
+               } else {
+                       mask = m_WIN2_SRC_ALPHA_EN;
+                       val = v_WIN2_SRC_ALPHA_EN(0);
+                       lcdc_msk_reg(lcdc_dev, WIN2_SRC_ALPHA_CTRL, mask, val);
+               }
+       } else {
+               mask = m_HWC_EN;
+               val = v_HWC_EN(win->state);
+               lcdc_msk_reg(lcdc_dev, HWC_CTRL0, mask, val);
+       }
+       return 0;
+}
+
+static int rk3368_lcdc_layer_update_regs(struct lcdc_device *lcdc_dev,
+                                        struct rk_lcdc_win *win)
+{
+       struct rk_lcdc_driver *dev_drv = &lcdc_dev->driver;
+       int timeout;
+       unsigned long flags;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               lcdc_msk_reg(lcdc_dev, SYS_CTRL, m_STANDBY_EN,
+                            v_STANDBY_EN(lcdc_dev->standby));
+               if ((win->id == 0) || (win->id == 1))
+                       rk3368_win_0_1_reg_update(dev_drv, win->id);
+               else if ((win->id == 2) || (win->id == 3))
+                       rk3368_win_2_3_reg_update(dev_drv, win->id);
+               else if (win->id == 4)
+                       rk3368_hwc_reg_update(dev_drv, win->id);
+               /*rk3368_lcdc_post_cfg(dev_drv); */
+               lcdc_cfg_done(lcdc_dev);
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+
+       /*if (dev_drv->wait_fs) { */
+       if (0) {
+               spin_lock_irqsave(&dev_drv->cpl_lock, flags);
+               init_completion(&dev_drv->frame_done);
+               spin_unlock_irqrestore(&dev_drv->cpl_lock, flags);
+               timeout =
+                   wait_for_completion_timeout(&dev_drv->frame_done,
+                                               msecs_to_jiffies
+                                               (dev_drv->cur_screen->ft + 5));
+               if (!timeout && (!dev_drv->frame_done.done)) {
+                       dev_warn(lcdc_dev->dev,
+                                "wait for new frame start time out!\n");
+                       return -ETIMEDOUT;
+               }
+       }
+       DBG(2, "%s for lcdc%d\n", __func__, lcdc_dev->id);
+       return 0;
+}
+
+static int rk3368_lcdc_reg_restore(struct lcdc_device *lcdc_dev)
+{
+       if (lcdc_dev->driver.iommu_enabled)
+               memcpy((u8 *)lcdc_dev->regs, (u8 *)lcdc_dev->regsbak, 0x330);
+       else
+               memcpy((u8 *)lcdc_dev->regs, (u8 *)lcdc_dev->regsbak, 0x260);
+       return 0;
+}
+
+static int __maybe_unused rk3368_lcdc_mmu_en(struct rk_lcdc_driver *dev_drv)
+{
+       u32 mask, val;
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       /*spin_lock(&lcdc_dev->reg_lock); */
+       if (likely(lcdc_dev->clk_on)) {
+               mask = m_MMU_EN;
+               val = v_MMU_EN(1);
+               lcdc_msk_reg(lcdc_dev, SYS_CTRL, mask, val);
+               mask = m_AXI_MAX_OUTSTANDING_EN | m_AXI_OUTSTANDING_MAX_NUM;
+               val = v_AXI_OUTSTANDING_MAX_NUM(31) |
+                   v_AXI_MAX_OUTSTANDING_EN(1);
+               lcdc_msk_reg(lcdc_dev, SYS_CTRL1, mask, val);
+       }
+       /*spin_unlock(&lcdc_dev->reg_lock); */
+#if defined(CONFIG_ROCKCHIP_IOMMU)
+       if (dev_drv->iommu_enabled) {
+               if (!lcdc_dev->iommu_status && dev_drv->mmu_dev) {
+                       lcdc_dev->iommu_status = 1;
+                       rockchip_iovmm_activate(dev_drv->dev);
+                       rk312x_lcdc_mmu_en(dev_drv);
+               }
+       }
+#endif
+       return 0;
+}
+
+static int rk3368_lcdc_set_dclk(struct rk_lcdc_driver *dev_drv)
+{
+       int ret = 0, fps = 0;
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_screen *screen = dev_drv->cur_screen;
+#ifdef CONFIG_RK_FPGA
+       return 0;
+#endif
+       /*set pll */
+       ret = clk_set_rate(lcdc_dev->pll_sclk, screen->mode.pixclock);
+       if (ret)
+               dev_err(dev_drv->dev, "set lcdc%d pll_sclk failed\n",
+                       lcdc_dev->id);
+
+       ret = clk_set_rate(lcdc_dev->dclk, screen->mode.pixclock);/*set pll */
+       if (ret)
+               dev_err(dev_drv->dev, "set lcdc%d dclk failed\n", lcdc_dev->id);
+       lcdc_dev->pixclock =
+           div_u64(1000000000000llu, clk_get_rate(lcdc_dev->dclk));
+       lcdc_dev->driver.pixclock = lcdc_dev->pixclock;
+
+       fps = rk_fb_calc_fps(screen, lcdc_dev->pixclock);
+       screen->ft = 1000 / fps;
+       dev_info(lcdc_dev->dev, "%s: dclk:%lu>>fps:%d ",
+                lcdc_dev->driver.name, clk_get_rate(lcdc_dev->dclk), fps);
+       return 0;
+}
+
+static int rk3368_config_timing(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_screen *screen = dev_drv->cur_screen;
+       u16 hsync_len = screen->mode.hsync_len;
+       u16 left_margin = screen->mode.left_margin;
+       u16 right_margin = screen->mode.right_margin;
+       u16 vsync_len = screen->mode.vsync_len;
+       u16 upper_margin = screen->mode.upper_margin;
+       u16 lower_margin = screen->mode.lower_margin;
+       u16 x_res = screen->mode.xres;
+       u16 y_res = screen->mode.yres;
+       u32 mask, val;
+       u16 h_total, v_total;
+       u16 vact_end_f1, vact_st_f1, vs_end_f1, vs_st_f1;
+
+       h_total = hsync_len + left_margin + x_res + right_margin;
+       v_total = vsync_len + upper_margin + y_res + lower_margin;
+
+       screen->post_dsp_stx = x_res * (100 - screen->overscan.left) / 200;
+       screen->post_dsp_sty = y_res * (100 - screen->overscan.top) / 200;
+       screen->post_xsize = x_res *
+           (screen->overscan.left + screen->overscan.right) / 200;
+       screen->post_ysize = y_res *
+           (screen->overscan.top + screen->overscan.bottom) / 200;
+
+       mask = m_DSP_HS_PW | m_DSP_HTOTAL;
+       val = v_DSP_HS_PW(hsync_len) | v_DSP_HTOTAL(h_total);
+       lcdc_msk_reg(lcdc_dev, DSP_HTOTAL_HS_END, mask, val);
+
+       mask = m_DSP_HACT_END | m_DSP_HACT_ST;
+       val = v_DSP_HACT_END(hsync_len + left_margin + x_res) |
+           v_DSP_HACT_ST(hsync_len + left_margin);
+       lcdc_msk_reg(lcdc_dev, DSP_HACT_ST_END, mask, val);
+
+       if (screen->mode.vmode == FB_VMODE_INTERLACED) {
+               /* First Field Timing */
+               mask = m_DSP_VS_PW | m_DSP_VTOTAL;
+               val = v_DSP_VS_PW(vsync_len) |
+                   v_DSP_VTOTAL(2 * (vsync_len + upper_margin +
+                                     lower_margin) + y_res + 1);
+               lcdc_msk_reg(lcdc_dev, DSP_VTOTAL_VS_END, mask, val);
+
+               mask = m_DSP_VACT_END | m_DSP_VACT_ST;
+               val = v_DSP_VACT_END(vsync_len + upper_margin + y_res / 2) |
+                   v_DSP_VACT_ST(vsync_len + upper_margin);
+               lcdc_msk_reg(lcdc_dev, DSP_VACT_ST_END, mask, val);
+
+               /* Second Field Timing */
+               mask = m_DSP_VS_ST_F1 | m_DSP_VS_END_F1;
+               vs_st_f1 = vsync_len + upper_margin + y_res / 2 + lower_margin;
+               vs_end_f1 = 2 * vsync_len + upper_margin + y_res / 2 +
+                   lower_margin;
+               val = v_DSP_VS_ST_F1(vs_st_f1) | v_DSP_VS_END_F1(vs_end_f1);
+               lcdc_msk_reg(lcdc_dev, DSP_VS_ST_END_F1, mask, val);
+
+               mask = m_DSP_VACT_END_F1 | m_DSP_VAC_ST_F1;
+               vact_end_f1 = 2 * (vsync_len + upper_margin) + y_res +
+                   lower_margin + 1;
+               vact_st_f1 = 2 * (vsync_len + upper_margin) + y_res / 2 +
+                   lower_margin + 1;
+               val =
+                   v_DSP_VACT_END_F1(vact_end_f1) |
+                   v_DSP_VAC_ST_F1(vact_st_f1);
+               lcdc_msk_reg(lcdc_dev, DSP_VACT_ST_END_F1, mask, val);
+
+               lcdc_msk_reg(lcdc_dev, DSP_CTRL0,
+                            m_DSP_INTERLACE | m_DSP_FIELD_POL,
+                            v_DSP_INTERLACE(1) | v_DSP_FIELD_POL(0));
+               mask =
+                   m_WIN0_INTERLACE_READ | m_WIN0_YRGB_DEFLICK |
+                   m_WIN0_CBR_DEFLICK;
+               val =
+                   v_WIN0_INTERLACE_READ(1) | v_WIN0_YRGB_DEFLICK(1) |
+                   v_WIN0_CBR_DEFLICK(1);
+               lcdc_msk_reg(lcdc_dev, WIN0_CTRL0, mask, val);
+
+               mask =
+                   m_WIN1_INTERLACE_READ | m_WIN1_YRGB_DEFLICK |
+                   m_WIN1_CBR_DEFLICK;
+               val =
+                   v_WIN1_INTERLACE_READ(1) | v_WIN1_YRGB_DEFLICK(1) |
+                   v_WIN1_CBR_DEFLICK(1);
+               lcdc_msk_reg(lcdc_dev, WIN1_CTRL0, mask, val);
+
+               mask = m_WIN2_INTERLACE_READ;
+               val = v_WIN2_INTERLACE_READ(1);
+               lcdc_msk_reg(lcdc_dev, WIN2_CTRL0, mask, val);
+
+               mask = m_WIN3_INTERLACE_READ;
+               val = v_WIN3_INTERLACE_READ(1);
+               lcdc_msk_reg(lcdc_dev, WIN3_CTRL0, mask, val);
+
+               mask = m_HWC_INTERLACE_READ;
+               val = v_HWC_INTERLACE_READ(1);
+               lcdc_msk_reg(lcdc_dev, HWC_CTRL0, mask, val);
+
+               mask = m_DSP_LINE_FLAG0_NUM;
+               val =
+                   v_DSP_LINE_FLAG0_NUM(vsync_len + upper_margin + y_res / 2);
+               lcdc_msk_reg(lcdc_dev, LINE_FLAG, mask, val);
+       } else {
+               mask = m_DSP_VS_PW | m_DSP_VTOTAL;
+               val = v_DSP_VS_PW(vsync_len) | v_DSP_VTOTAL(v_total);
+               lcdc_msk_reg(lcdc_dev, DSP_VTOTAL_VS_END, mask, val);
+
+               mask = m_DSP_VACT_END | m_DSP_VACT_ST;
+               val = v_DSP_VACT_END(vsync_len + upper_margin + y_res) |
+                   v_DSP_VACT_ST(vsync_len + upper_margin);
+               lcdc_msk_reg(lcdc_dev, DSP_VACT_ST_END, mask, val);
+
+               lcdc_msk_reg(lcdc_dev, DSP_CTRL0,
+                            m_DSP_INTERLACE | m_DSP_FIELD_POL,
+                            v_DSP_INTERLACE(0) | v_DSP_FIELD_POL(0));
+
+               mask =
+                   m_WIN0_INTERLACE_READ | m_WIN0_YRGB_DEFLICK |
+                   m_WIN0_CBR_DEFLICK;
+               val =
+                   v_WIN0_INTERLACE_READ(0) | v_WIN0_YRGB_DEFLICK(0) |
+                   v_WIN0_CBR_DEFLICK(0);
+               lcdc_msk_reg(lcdc_dev, WIN0_CTRL0, mask, val);
+
+               mask =
+                   m_WIN1_INTERLACE_READ | m_WIN1_YRGB_DEFLICK |
+                   m_WIN1_CBR_DEFLICK;
+               val =
+                   v_WIN1_INTERLACE_READ(0) | v_WIN1_YRGB_DEFLICK(0) |
+                   v_WIN1_CBR_DEFLICK(0);
+               lcdc_msk_reg(lcdc_dev, WIN1_CTRL0, mask, val);
+
+               mask = m_WIN2_INTERLACE_READ;
+               val = v_WIN2_INTERLACE_READ(0);
+               lcdc_msk_reg(lcdc_dev, WIN2_CTRL0, mask, val);
+
+               mask = m_WIN3_INTERLACE_READ;
+               val = v_WIN3_INTERLACE_READ(0);
+               lcdc_msk_reg(lcdc_dev, WIN3_CTRL0, mask, val);
+
+               mask = m_HWC_INTERLACE_READ;
+               val = v_HWC_INTERLACE_READ(0);
+               lcdc_msk_reg(lcdc_dev, HWC_CTRL0, mask, val);
+
+               mask = m_DSP_LINE_FLAG0_NUM;
+               val = v_DSP_LINE_FLAG0_NUM(vsync_len + upper_margin + y_res);
+               lcdc_msk_reg(lcdc_dev, LINE_FLAG, mask, val);
+       }
+       rk3368_lcdc_post_cfg(dev_drv);
+       return 0;
+}
+
+static void rk3368_lcdc_bcsh_path_sel(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       u32 bcsh_ctrl;
+
+       if (dev_drv->overlay_mode == VOP_YUV_DOMAIN) {
+               if (dev_drv->output_color == COLOR_YCBCR)       /* bypass */
+                       lcdc_msk_reg(lcdc_dev, BCSH_CTRL,
+                                    m_BCSH_Y2R_EN | m_BCSH_R2Y_EN,
+                                    v_BCSH_Y2R_EN(0) | v_BCSH_R2Y_EN(0));
+               else            /* YUV2RGB */
+                       lcdc_msk_reg(lcdc_dev, BCSH_CTRL,
+                                    m_BCSH_Y2R_EN | m_BCSH_Y2R_CSC_MODE |
+                                    m_BCSH_R2Y_EN,
+                                    v_BCSH_Y2R_EN(1) |
+                                    v_BCSH_Y2R_CSC_MODE(VOP_Y2R_CSC_MPEG) |
+                                    v_BCSH_R2Y_EN(0));
+       } else {                /* overlay_mode=VOP_RGB_DOMAIN */
+               /* bypass  --need check,if bcsh close? */
+               if (dev_drv->output_color == COLOR_RGB) {
+                       bcsh_ctrl = lcdc_readl(lcdc_dev, BCSH_CTRL);
+                       if ((bcsh_ctrl & m_BCSH_EN) == 1)/*bcsh enabled */
+                               lcdc_msk_reg(lcdc_dev, BCSH_CTRL,
+                                            m_BCSH_R2Y_EN |
+                                            m_BCSH_Y2R_EN,
+                                            v_BCSH_R2Y_EN(1) |
+                                            v_BCSH_Y2R_EN(1));
+                       else
+                               lcdc_msk_reg(lcdc_dev, BCSH_CTRL,
+                                            m_BCSH_R2Y_EN | m_BCSH_Y2R_EN,
+                                            v_BCSH_R2Y_EN(0) |
+                                            v_BCSH_Y2R_EN(0));
+               } else          /* RGB2YUV */
+                       lcdc_msk_reg(lcdc_dev, BCSH_CTRL,
+                                    m_BCSH_R2Y_EN |
+                                    m_BCSH_R2Y_CSC_MODE | m_BCSH_Y2R_EN,
+                                    v_BCSH_R2Y_EN(1) |
+                                    v_BCSH_R2Y_CSC_MODE(VOP_Y2R_CSC_MPEG) |
+                                    v_BCSH_Y2R_EN(0));
+       }
+}
+
+static int rk3368_load_screen(struct rk_lcdc_driver *dev_drv, bool initscreen)
+{
+       u16 face = 0;
+       u16 dclk_ddr = 0;
+       u32 v = 0;
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_screen *screen = dev_drv->cur_screen;
+       u32 mask, val;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               dev_drv->overlay_mode = VOP_RGB_DOMAIN;
+               if (!lcdc_dev->standby && !initscreen) {
+                       lcdc_msk_reg(lcdc_dev, SYS_CTRL, m_STANDBY_EN,
+                                    v_STANDBY_EN(1));
+                       lcdc_cfg_done(lcdc_dev);
+                       mdelay(50);
+               }
+               switch (screen->face) {
+               case OUT_P565:
+                       face = OUT_P565;
+                       mask = m_DITHER_DOWN_EN | m_DITHER_DOWN_MODE |
+                           m_DITHER_DOWN_SEL;
+                       val = v_DITHER_DOWN_EN(1) | v_DITHER_DOWN_MODE(0) |
+                           v_DITHER_DOWN_SEL(1);
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, mask, val);
+                       break;
+               case OUT_P666:
+                       face = OUT_P666;
+                       mask = m_DITHER_DOWN_EN | m_DITHER_DOWN_MODE |
+                           m_DITHER_DOWN_SEL;
+                       val = v_DITHER_DOWN_EN(1) | v_DITHER_DOWN_MODE(1) |
+                           v_DITHER_DOWN_SEL(1);
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, mask, val);
+                       break;
+               case OUT_D888_P565:
+                       face = OUT_P888;
+                       mask = m_DITHER_DOWN_EN | m_DITHER_DOWN_MODE |
+                           m_DITHER_DOWN_SEL;
+                       val = v_DITHER_DOWN_EN(1) | v_DITHER_DOWN_MODE(0) |
+                           v_DITHER_DOWN_SEL(1);
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, mask, val);
+                       break;
+               case OUT_D888_P666:
+                       face = OUT_P888;
+                       mask = m_DITHER_DOWN_EN | m_DITHER_DOWN_MODE |
+                           m_DITHER_DOWN_SEL;
+                       val = v_DITHER_DOWN_EN(1) | v_DITHER_DOWN_MODE(1) |
+                           v_DITHER_DOWN_SEL(1);
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, mask, val);
+                       break;
+               case OUT_P888:
+                       face = OUT_P888;
+                       mask = m_DITHER_DOWN_EN | m_DITHER_UP_EN;
+                       val = v_DITHER_DOWN_EN(0) | v_DITHER_UP_EN(0);
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, mask, val);
+                       break;
+               case OUT_YUV_420:
+                       /*yuv420 output prefer yuv domain overlay */
+                       face = OUT_YUV_420;
+                       dclk_ddr = 1;
+                       mask = m_DITHER_DOWN_EN | m_DITHER_UP_EN;
+                       val = v_DITHER_DOWN_EN(0) | v_DITHER_UP_EN(0);
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, mask, val);
+                       break;
+               default:
+                       dev_err(lcdc_dev->dev, "un supported interface!\n");
+                       break;
+               }
+               switch (screen->type) {
+               case SCREEN_RGB:
+                       mask = m_RGB_OUT_EN;
+                       val = v_RGB_OUT_EN(1);
+                       lcdc_msk_reg(lcdc_dev, SYS_CTRL, mask, val);
+                       mask = m_RGB_LVDS_HSYNC_POL | m_RGB_LVDS_VSYNC_POL |
+                           m_RGB_LVDS_DEN_POL | m_RGB_LVDS_DCLK_POL;
+                       val = v_RGB_LVDS_HSYNC_POL(screen->pin_hsync) |
+                           v_RGB_LVDS_VSYNC_POL(screen->pin_vsync) |
+                           v_RGB_LVDS_DEN_POL(screen->pin_den) |
+                           v_RGB_LVDS_DCLK_POL(screen->pin_dclk);
+                       v = 1 << 15 | (1 << (15 + 16));
+
+                       break;
+               case SCREEN_LVDS:
+                       mask = m_RGB_OUT_EN;
+                       val = v_RGB_OUT_EN(1);
+                       lcdc_msk_reg(lcdc_dev, SYS_CTRL, mask, val);
+                       mask = m_RGB_LVDS_HSYNC_POL | m_RGB_LVDS_VSYNC_POL |
+                           m_RGB_LVDS_DEN_POL | m_RGB_LVDS_DCLK_POL;
+                       val = v_RGB_LVDS_HSYNC_POL(screen->pin_hsync) |
+                           v_RGB_LVDS_VSYNC_POL(screen->pin_vsync) |
+                           v_RGB_LVDS_DEN_POL(screen->pin_den) |
+                           v_RGB_LVDS_DCLK_POL(screen->pin_dclk);
+                       v = 0 << 15 | (1 << (15 + 16));
+                       break;
+               case SCREEN_HDMI:
+                       face = OUT_RGB_AAA;
+                       mask = m_HDMI_OUT_EN;
+                       val = v_HDMI_OUT_EN(1);
+                       lcdc_msk_reg(lcdc_dev, SYS_CTRL, mask, val);
+                       mask = m_HDMI_HSYNC_POL | m_HDMI_VSYNC_POL |
+                           m_HDMI_DEN_POL | m_HDMI_DCLK_POL;
+                       val = v_HDMI_HSYNC_POL(screen->pin_hsync) |
+                           v_HDMI_VSYNC_POL(screen->pin_vsync) |
+                           v_HDMI_DEN_POL(screen->pin_den) |
+                           v_HDMI_DCLK_POL(screen->pin_dclk);
+                       break;
+               case SCREEN_MIPI:
+                       mask = m_MIPI_OUT_EN;
+                       val = v_MIPI_OUT_EN(1);
+                       lcdc_msk_reg(lcdc_dev, SYS_CTRL, mask, val);
+                       mask = m_MIPI_HSYNC_POL | m_MIPI_VSYNC_POL |
+                           m_MIPI_DEN_POL | m_MIPI_DCLK_POL;
+                       val = v_MIPI_HSYNC_POL(screen->pin_hsync) |
+                           v_MIPI_VSYNC_POL(screen->pin_vsync) |
+                           v_MIPI_DEN_POL(screen->pin_den) |
+                           v_MIPI_DCLK_POL(screen->pin_dclk);
+                       break;
+               case SCREEN_DUAL_MIPI:
+                       mask = m_MIPI_OUT_EN | m_DOUB_CHANNEL_EN;
+                       val = v_MIPI_OUT_EN(1) | v_DOUB_CHANNEL_EN(1);
+                       lcdc_msk_reg(lcdc_dev, SYS_CTRL, mask, val);
+                       mask = m_MIPI_HSYNC_POL | m_MIPI_VSYNC_POL |
+                           m_MIPI_DEN_POL | m_MIPI_DCLK_POL;
+                       val = v_MIPI_HSYNC_POL(screen->pin_hsync) |
+                           v_MIPI_VSYNC_POL(screen->pin_vsync) |
+                           v_MIPI_DEN_POL(screen->pin_den) |
+                           v_MIPI_DCLK_POL(screen->pin_dclk);
+                       break;
+               case SCREEN_EDP:
+                       face = OUT_RGB_AAA;     /*RGB AAA output */
+
+                       mask = m_EDP_OUT_EN;
+                       val = v_EDP_OUT_EN(1);
+                       lcdc_msk_reg(lcdc_dev, SYS_CTRL, mask, val);
+                       /*because edp have to sent aaa fmt */
+                       mask = m_DITHER_DOWN_EN | m_DITHER_UP_EN;
+                       val = v_DITHER_DOWN_EN(0) | v_DITHER_UP_EN(0);
+
+                       mask |= m_EDP_HSYNC_POL | m_EDP_VSYNC_POL |
+                           m_EDP_DEN_POL | m_EDP_DCLK_POL;
+                       val |= v_EDP_HSYNC_POL(screen->pin_hsync) |
+                           v_EDP_VSYNC_POL(screen->pin_vsync) |
+                           v_EDP_DEN_POL(screen->pin_den) |
+                           v_EDP_DCLK_POL(screen->pin_dclk);
+                       break;
+               }
+               /*hsync vsync den dclk polo,dither */
+               lcdc_msk_reg(lcdc_dev, DSP_CTRL1, mask, val);
+#ifndef CONFIG_RK_FPGA
+               /*writel_relaxed(v, RK_GRF_VIRT + rk3368_GRF_SOC_CON7);
+               move to  lvds driver*/
+               /*GRF_SOC_CON7 bit[15]:0->dsi/lvds mode,1->ttl mode */
+#endif
+               mask = m_DSP_OUT_MODE | m_DSP_DCLK_DDR | m_DSP_BG_SWAP |
+                   m_DSP_RB_SWAP | m_DSP_RG_SWAP | m_DSP_DELTA_SWAP |
+                   m_DSP_DUMMY_SWAP | m_DSP_OUT_ZERO | m_DSP_BLANK_EN |
+                   m_DSP_BLACK_EN | m_DSP_X_MIR_EN | m_DSP_Y_MIR_EN;
+               val = v_DSP_OUT_MODE(face) | v_DSP_DCLK_DDR(dclk_ddr) |
+                   v_DSP_BG_SWAP(screen->swap_gb) |
+                   v_DSP_RB_SWAP(screen->swap_rb) |
+                   v_DSP_RG_SWAP(screen->swap_rg) |
+                   v_DSP_DELTA_SWAP(screen->swap_delta) |
+                   v_DSP_DUMMY_SWAP(screen->swap_dumy) | v_DSP_OUT_ZERO(0) |
+                   v_DSP_BLANK_EN(0) | v_DSP_BLACK_EN(0) |
+                   v_DSP_X_MIR_EN(screen->x_mirror) |
+                   v_DSP_Y_MIR_EN(screen->y_mirror);
+               lcdc_msk_reg(lcdc_dev, DSP_CTRL0, mask, val);
+               /*BG color */
+               mask = m_DSP_BG_BLUE | m_DSP_BG_GREEN | m_DSP_BG_RED;
+               val = v_DSP_BG_BLUE(0) | v_DSP_BG_GREEN(0) | v_DSP_BG_RED(0);
+               lcdc_msk_reg(lcdc_dev, DSP_BG, mask, val);
+               rk3368_lcdc_bcsh_path_sel(dev_drv);
+               rk3368_config_timing(dev_drv);
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+       rk3368_lcdc_set_dclk(dev_drv);
+       if (screen->type != SCREEN_HDMI && dev_drv->trsm_ops &&
+           dev_drv->trsm_ops->enable)
+               dev_drv->trsm_ops->enable();
+       if (screen->init)
+               screen->init();
+       if (!lcdc_dev->standby)
+               lcdc_msk_reg(lcdc_dev, SYS_CTRL, m_STANDBY_EN, v_STANDBY_EN(0));
+       return 0;
+}
+
+
+/*enable layer,open:1,enable;0 disable*/
+static void rk3368_lcdc_layer_enable(struct lcdc_device *lcdc_dev,
+                                    unsigned int win_id, bool open)
+{
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on) &&
+           lcdc_dev->driver.win[win_id]->state != open) {
+               if (open) {
+                       if (!lcdc_dev->atv_layer_cnt) {
+                               dev_info(lcdc_dev->dev,
+                                        "wakeup from standby!\n");
+                               lcdc_dev->standby = 0;
+                       }
+                       lcdc_dev->atv_layer_cnt |= (1 << win_id);
+               } else {
+                       if (lcdc_dev->atv_layer_cnt & (1 << win_id))
+                               lcdc_dev->atv_layer_cnt &= ~(1 << win_id);
+               }
+               lcdc_dev->driver.win[win_id]->state = open;
+               if (!open) {
+                       /*rk3368_lcdc_reg_update(dev_drv);*/
+                       rk3368_lcdc_layer_update_regs
+                       (lcdc_dev, lcdc_dev->driver.win[win_id]);
+                       lcdc_cfg_done(lcdc_dev);
+               }
+               /*if no layer used,disable lcdc */
+               if (!lcdc_dev->atv_layer_cnt) {
+                       dev_info(lcdc_dev->dev,
+                                "no layer is used,go to standby!\n");
+                       lcdc_dev->standby = 1;
+               }
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+}
+
+static int rk3368_lcdc_enable_irq(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev = container_of(dev_drv,
+                                                   struct lcdc_device, driver);
+       u32 mask, val;
+       /*struct rk_screen *screen = dev_drv->cur_screen; */
+
+       mask = m_FS_INTR_CLR | m_FS_NEW_INTR_CLR | m_LINE_FLAG0_INTR_CLR |
+           m_LINE_FLAG1_INTR_CLR;
+       val = v_FS_INTR_CLR(1) | v_FS_NEW_INTR_CLR(1) |
+           v_LINE_FLAG0_INTR_CLR(1) | v_LINE_FLAG1_INTR_CLR(1);
+       lcdc_msk_reg(lcdc_dev, INTR_CLEAR, mask, val);
+
+       mask = m_FS_INTR_EN | m_LINE_FLAG0_INTR_EN | m_BUS_ERROR_INTR_EN;
+       val = v_FS_INTR_EN(1) | v_LINE_FLAG0_INTR_EN(1) |
+           v_BUS_ERROR_INTR_EN(1);
+       lcdc_msk_reg(lcdc_dev, INTR_EN, mask, val);
+
+#ifdef LCDC_IRQ_EMPTY_DEBUG
+       mask = m_WIN0_EMPTY_INTR_EN | m_WIN1_EMPTY_INTR_EN |
+           m_WIN2_EMPTY_INTR_EN |
+           m_WIN3_EMPTY_INTR_EN | m_HWC_EMPTY_INTR_EN |
+           m_POST_BUF_EMPTY_INTR_EN | m_PWM_GEN_INTR_EN;
+       val = v_WIN0_EMPTY_INTR_EN(1) | v_WIN1_EMPTY_INTR_EN(1) |
+           v_WIN2_EMPTY_INTR_EN(1) |
+           v_WIN3_EMPTY_INTR_EN(1) | v_HWC_EMPTY_INTR_EN(1) |
+           v_POST_BUF_EMPTY_INTR_EN(1) | v_PWM_GEN_INTR_EN(1);
+       lcdc_msk_reg(lcdc_dev, INTR_EN, mask, val);
+#endif
+       return 0;
+}
+
+static int rk3368_lcdc_open(struct rk_lcdc_driver *dev_drv, int win_id,
+                           bool open)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+#ifndef CONFIG_RK_FPGA
+       int sys_status =
+           (dev_drv->id == 0) ? SYS_STATUS_LCDC0 : SYS_STATUS_LCDC1;
+#endif
+       /*enable clk,when first layer open */
+       if ((open) && (!lcdc_dev->atv_layer_cnt)) {
+#ifndef CONFIG_RK_FPGA
+               rockchip_set_system_status(sys_status);
+#endif
+               rk3368_lcdc_pre_init(dev_drv);
+               rk3368_lcdc_clk_enable(lcdc_dev);
+#if defined(CONFIG_ROCKCHIP_IOMMU)
+               if (dev_drv->iommu_enabled) {
+                       if (!dev_drv->mmu_dev) {
+                               dev_drv->mmu_dev =
+                                   rk_fb_get_sysmmu_device_by_compatible
+                                   (dev_drv->mmu_dts_name);
+                               if (dev_drv->mmu_dev) {
+                                       rk_fb_platform_set_sysmmu
+                                           (dev_drv->mmu_dev, dev_drv->dev);
+                               } else {
+                                       dev_err(dev_drv->dev,
+                                               "fail get rk iommu device\n");
+                                       return -1;
+                               }
+                       }
+                       /*if (dev_drv->mmu_dev)
+                          rockchip_iovmm_activate(dev_drv->dev); */
+               }
+#endif
+               rk3368_lcdc_reg_restore(lcdc_dev);
+               /*if (dev_drv->iommu_enabled)
+                  rk3368_lcdc_mmu_en(dev_drv); */
+               if ((support_uboot_display() && (lcdc_dev->prop == PRMRY))) {
+                       /*rk3368_lcdc_set_dclk(dev_drv); */
+                       rk3368_lcdc_enable_irq(dev_drv);
+               } else {
+                       rk3368_load_screen(dev_drv, 1);
+               }
+               if (dev_drv->bcsh.enable)
+                       rk3368_lcdc_set_bcsh(dev_drv, 1);
+               spin_lock(&lcdc_dev->reg_lock);
+               if (dev_drv->cur_screen->dsp_lut)
+                       rk3368_lcdc_set_lut(dev_drv);
+               spin_unlock(&lcdc_dev->reg_lock);
+       }
+
+       if (win_id < ARRAY_SIZE(lcdc_win))
+               rk3368_lcdc_layer_enable(lcdc_dev, win_id, open);
+       else
+               dev_err(lcdc_dev->dev, "invalid win id:%d\n", win_id);
+
+
+       /* when all layer closed,disable clk */
+       /*if ((!open) && (!lcdc_dev->atv_layer_cnt)) {
+          rk3368_lcdc_disable_irq(lcdc_dev);
+          rk3368_lcdc_reg_update(dev_drv);
+          #if defined(CONFIG_ROCKCHIP_IOMMU)
+          if (dev_drv->iommu_enabled) {
+          if (dev_drv->mmu_dev)
+          rockchip_iovmm_deactivate(dev_drv->dev);
+          }
+          #endif
+          rk3368_lcdc_clk_disable(lcdc_dev);
+          #ifndef CONFIG_RK_FPGA
+          rockchip_clear_system_status(sys_status);
+          #endif
+          } */
+
+       return 0;
+}
+
+static int win_0_1_display(struct lcdc_device *lcdc_dev,
+                          struct rk_lcdc_win *win)
+{
+       u32 y_addr;
+       u32 uv_addr;
+       unsigned int off;
+
+       off = win->id * 0x40;
+       /*win->smem_start + win->y_offset; */
+       y_addr = win->area[0].smem_start + win->area[0].y_offset;
+       uv_addr = win->area[0].cbr_start + win->area[0].c_offset;
+       DBG(2, "lcdc[%d]:win[%d]>>:y_addr:0x%x>>uv_addr:0x%x",
+           lcdc_dev->id, win->id, y_addr, uv_addr);
+       DBG(2, ">>y_offset:0x%x>>c_offset=0x%x\n",
+           win->area[0].y_offset, win->area[0].c_offset);
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               win->area[0].y_addr = y_addr;
+               win->area[0].uv_addr = uv_addr;
+               lcdc_writel(lcdc_dev, WIN0_YRGB_MST + off, win->area[0].y_addr);
+               lcdc_writel(lcdc_dev, WIN0_CBR_MST + off, win->area[0].uv_addr);
+               /*lcdc_cfg_done(lcdc_dev); */
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+
+       return 0;
+}
+
+static int win_2_3_display(struct lcdc_device *lcdc_dev,
+                          struct rk_lcdc_win *win)
+{
+       u32 i, y_addr;
+       unsigned int off;
+
+       off = (win->id - 2) * 0x50;
+       y_addr = win->area[0].smem_start + win->area[0].y_offset;
+       DBG(2, "lcdc[%d]:win[%d]:", lcdc_dev->id, win->id);
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               for (i = 0; i < win->area_num; i++) {
+                       DBG(2, "area[%d]:yaddr:0x%x>>offset:0x%x>>\n",
+                           i, win->area[i].y_addr, win->area[i].y_offset);
+                       win->area[i].y_addr =
+                           win->area[i].smem_start + win->area[i].y_offset;
+                       }
+               lcdc_writel(lcdc_dev, WIN2_MST0 + off, win->area[0].y_addr);
+               lcdc_writel(lcdc_dev, WIN2_MST1 + off, win->area[1].y_addr);
+               lcdc_writel(lcdc_dev, WIN2_MST2 + off, win->area[2].y_addr);
+               lcdc_writel(lcdc_dev, WIN2_MST3 + off, win->area[3].y_addr);
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+       return 0;
+}
+
+static int hwc_display(struct lcdc_device *lcdc_dev, struct rk_lcdc_win *win)
+{
+       u32 y_addr;
+
+       y_addr = win->area[0].smem_start + win->area[0].y_offset;
+       DBG(2, "lcdc[%d]:hwc>>%s>>y_addr:0x%x>>\n",
+           lcdc_dev->id, __func__, y_addr);
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               win->area[0].y_addr = y_addr;
+               lcdc_writel(lcdc_dev, HWC_MST, win->area[0].y_addr);
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+
+       return 0;
+}
+
+static int rk3368_lcdc_pan_display(struct rk_lcdc_driver *dev_drv, int win_id)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_lcdc_win *win = NULL;
+       struct rk_screen *screen = dev_drv->cur_screen;
+
+#if defined(WAIT_FOR_SYNC)
+       int timeout;
+       unsigned long flags;
+#endif
+       win = dev_drv->win[win_id];
+       if (!screen) {
+               dev_err(dev_drv->dev, "screen is null!\n");
+               return -ENOENT;
+       }
+       if (win_id == 0) {
+               win_0_1_display(lcdc_dev, win);
+       } else if (win_id == 1) {
+               win_0_1_display(lcdc_dev, win);
+       } else if (win_id == 2) {
+               win_2_3_display(lcdc_dev, win);
+       } else if (win_id == 3) {
+               win_2_3_display(lcdc_dev, win);
+       } else if (win_id == 4) {
+               hwc_display(lcdc_dev, win);
+       } else {
+               dev_err(dev_drv->dev, "invalid win number:%d!\n", win_id);
+               return -EINVAL;
+       }
+
+       /*this is the first frame of the system ,enable frame start interrupt */
+       if ((dev_drv->first_frame)) {
+               dev_drv->first_frame = 0;
+               rk3368_lcdc_enable_irq(dev_drv);
+       }
+#if defined(WAIT_FOR_SYNC)
+       spin_lock_irqsave(&dev_drv->cpl_lock, flags);
+       init_completion(&dev_drv->frame_done);
+       spin_unlock_irqrestore(&dev_drv->cpl_lock, flags);
+       timeout =
+           wait_for_completion_timeout(&dev_drv->frame_done,
+                                       msecs_to_jiffies(dev_drv->
+                                                        cur_screen->ft + 5));
+       if (!timeout && (!dev_drv->frame_done.done)) {
+               dev_info(dev_drv->dev, "wait for new frame start time out!\n");
+               return -ETIMEDOUT;
+       }
+#endif
+       return 0;
+}
+
+static int rk3368_lcdc_cal_scl_fac(struct rk_lcdc_win *win)
+{
+       u16 srcW;
+       u16 srcH;
+       u16 dstW;
+       u16 dstH;
+       u16 yrgb_srcW;
+       u16 yrgb_srcH;
+       u16 yrgb_dstW;
+       u16 yrgb_dstH;
+       u32 yrgb_vscalednmult;
+       u32 yrgb_xscl_factor;
+       u32 yrgb_yscl_factor;
+       u8 yrgb_vsd_bil_gt2 = 0;
+       u8 yrgb_vsd_bil_gt4 = 0;
+
+       u16 cbcr_srcW;
+       u16 cbcr_srcH;
+       u16 cbcr_dstW;
+       u16 cbcr_dstH;
+       u32 cbcr_vscalednmult;
+       u32 cbcr_xscl_factor;
+       u32 cbcr_yscl_factor;
+       u8 cbcr_vsd_bil_gt2 = 0;
+       u8 cbcr_vsd_bil_gt4 = 0;
+       u8 yuv_fmt = 0;
+
+       srcW = win->area[0].xact;
+       srcH = win->area[0].yact;
+       dstW = win->area[0].xsize;
+       dstH = win->area[0].ysize;
+
+       /*yrgb scl mode */
+       yrgb_srcW = srcW;
+       yrgb_srcH = srcH;
+       yrgb_dstW = dstW;
+       yrgb_dstH = dstH;
+       if ((yrgb_dstW * 8 <= yrgb_srcW) || (yrgb_dstH * 8 <= yrgb_srcH)) {
+               pr_err("ERROR: yrgb scale exceed 8,");
+               pr_err("srcW=%d,srcH=%d,dstW=%d,dstH=%d\n",
+                      yrgb_srcW, yrgb_srcH, yrgb_dstW, yrgb_dstH);
+       }
+       if (yrgb_srcW < yrgb_dstW)
+               win->yrgb_hor_scl_mode = SCALE_UP;
+       else if (yrgb_srcW > yrgb_dstW)
+               win->yrgb_hor_scl_mode = SCALE_DOWN;
+       else
+               win->yrgb_hor_scl_mode = SCALE_NONE;
+
+       if (yrgb_srcH < yrgb_dstH)
+               win->yrgb_ver_scl_mode = SCALE_UP;
+       else if (yrgb_srcH > yrgb_dstH)
+               win->yrgb_ver_scl_mode = SCALE_DOWN;
+       else
+               win->yrgb_ver_scl_mode = SCALE_NONE;
+
+       /*cbcr scl mode */
+       switch (win->area[0].format) {
+       case YUV422:
+       case YUV422_A:
+               cbcr_srcW = srcW / 2;
+               cbcr_dstW = dstW;
+               cbcr_srcH = srcH;
+               cbcr_dstH = dstH;
+               yuv_fmt = 1;
+               break;
+       case YUV420:
+       case YUV420_A:
+               cbcr_srcW = srcW / 2;
+               cbcr_dstW = dstW;
+               cbcr_srcH = srcH / 2;
+               cbcr_dstH = dstH;
+               yuv_fmt = 1;
+               break;
+       case YUV444:
+       case YUV444_A:
+               cbcr_srcW = srcW;
+               cbcr_dstW = dstW;
+               cbcr_srcH = srcH;
+               cbcr_dstH = dstH;
+               yuv_fmt = 1;
+               break;
+       default:
+               cbcr_srcW = 0;
+               cbcr_dstW = 0;
+               cbcr_srcH = 0;
+               cbcr_dstH = 0;
+               yuv_fmt = 0;
+               break;
+       }
+       if (yuv_fmt) {
+               if ((cbcr_dstW * 8 <= cbcr_srcW) ||
+                   (cbcr_dstH * 8 <= cbcr_srcH)) {
+                       pr_err("ERROR: cbcr scale exceed 8,");
+                       pr_err("srcW=%d,srcH=%d,dstW=%d,dstH=%d\n", cbcr_srcW,
+                              cbcr_srcH, cbcr_dstW, cbcr_dstH);
+               }
+       }
+
+       if (cbcr_srcW < cbcr_dstW)
+               win->cbr_hor_scl_mode = SCALE_UP;
+       else if (cbcr_srcW > cbcr_dstW)
+               win->cbr_hor_scl_mode = SCALE_DOWN;
+       else
+               win->cbr_hor_scl_mode = SCALE_NONE;
+
+       if (cbcr_srcH < cbcr_dstH)
+               win->cbr_ver_scl_mode = SCALE_UP;
+       else if (cbcr_srcH > cbcr_dstH)
+               win->cbr_ver_scl_mode = SCALE_DOWN;
+       else
+               win->cbr_ver_scl_mode = SCALE_NONE;
+
+       /*DBG(1, "srcW:%d>>srcH:%d>>dstW:%d>>dstH:%d>>\n"
+           "yrgb:src:W=%d>>H=%d,dst:W=%d>>H=%d,H_mode=%d,V_mode=%d\n"
+           "cbcr:src:W=%d>>H=%d,dst:W=%d>>H=%d,H_mode=%d,V_mode=%d\n", srcW,
+           srcH, dstW, dstH, yrgb_srcW, yrgb_srcH, yrgb_dstW, yrgb_dstH,
+           win->yrgb_hor_scl_mode, win->yrgb_ver_scl_mode, cbcr_srcW,
+           cbcr_srcH, cbcr_dstW, cbcr_dstH, win->cbr_hor_scl_mode,
+           win->cbr_ver_scl_mode);*/
+
+       /*line buffer mode */
+       if ((win->area[0].format == YUV422) ||
+           (win->area[0].format == YUV420) ||
+           (win->area[0].format == YUV422_A) ||
+           (win->area[0].format == YUV420_A)) {
+               if (win->cbr_hor_scl_mode == SCALE_DOWN) {
+                       if ((cbcr_dstW > VOP_INPUT_MAX_WIDTH / 2) ||
+                           (cbcr_dstW == 0))
+                               pr_err("ERROR cbcr_dstW = %d,exceeds 2048\n",
+                                      cbcr_dstW);
+                       else if (cbcr_dstW > 1280)
+                               win->win_lb_mode = LB_YUV_3840X5;
+                       else
+                               win->win_lb_mode = LB_YUV_2560X8;
+               } else {        /*SCALE_UP or SCALE_NONE */
+                       if ((cbcr_srcW > VOP_INPUT_MAX_WIDTH / 2) ||
+                           (cbcr_srcW == 0))
+                               pr_err("ERROR cbcr_srcW = %d,exceeds 2048\n",
+                                      cbcr_srcW);
+                       else if (cbcr_srcW > 1280)
+                               win->win_lb_mode = LB_YUV_3840X5;
+                       else
+                               win->win_lb_mode = LB_YUV_2560X8;
+               }
+       } else {
+               if (win->yrgb_hor_scl_mode == SCALE_DOWN) {
+                       if ((yrgb_dstW > VOP_INPUT_MAX_WIDTH) ||
+                           (yrgb_dstW == 0))
+                               pr_err("ERROR yrgb_dstW = %d\n", yrgb_dstW);
+                       else if (yrgb_dstW > 2560)
+                               win->win_lb_mode = LB_RGB_3840X2;
+                       else if (yrgb_dstW > 1920)
+                               win->win_lb_mode = LB_RGB_2560X4;
+                       else if (yrgb_dstW > 1280)
+                               win->win_lb_mode = LB_RGB_1920X5;
+                       else
+                               win->win_lb_mode = LB_RGB_1280X8;
+               } else {        /*SCALE_UP or SCALE_NONE */
+                       if ((yrgb_srcW > VOP_INPUT_MAX_WIDTH) ||
+                           (yrgb_srcW == 0))
+                               pr_err("ERROR yrgb_srcW = %d\n", yrgb_srcW);
+                       else if (yrgb_srcW > 2560)
+                               win->win_lb_mode = LB_RGB_3840X2;
+                       else if (yrgb_srcW > 1920)
+                               win->win_lb_mode = LB_RGB_2560X4;
+                       else if (yrgb_srcW > 1280)
+                               win->win_lb_mode = LB_RGB_1920X5;
+                       else
+                               win->win_lb_mode = LB_RGB_1280X8;
+               }
+       }
+       DBG(1, "win->win_lb_mode = %d;\n", win->win_lb_mode);
+
+       /*vsd/vsu scale ALGORITHM */
+       win->yrgb_hsd_mode = SCALE_DOWN_BIL;    /*not to specify */
+       win->cbr_hsd_mode = SCALE_DOWN_BIL;     /*not to specify */
+       win->yrgb_vsd_mode = SCALE_DOWN_BIL;    /*not to specify */
+       win->cbr_vsd_mode = SCALE_DOWN_BIL;     /*not to specify */
+       switch (win->win_lb_mode) {
+       case LB_YUV_3840X5:
+       case LB_YUV_2560X8:
+       case LB_RGB_1920X5:
+       case LB_RGB_1280X8:
+               win->yrgb_vsu_mode = SCALE_UP_BIC;
+               win->cbr_vsu_mode = SCALE_UP_BIC;
+               break;
+       case LB_RGB_3840X2:
+               if (win->yrgb_ver_scl_mode != SCALE_NONE)
+                       pr_err("ERROR : not allow yrgb ver scale\n");
+               if (win->cbr_ver_scl_mode != SCALE_NONE)
+                       pr_err("ERROR : not allow cbcr ver scale\n");
+               break;
+       case LB_RGB_2560X4:
+               win->yrgb_vsu_mode = SCALE_UP_BIL;
+               win->cbr_vsu_mode = SCALE_UP_BIL;
+               break;
+       default:
+               pr_info("%s:un supported win_lb_mode:%d\n",
+                       __func__, win->win_lb_mode);
+               break;
+       }
+       if (win->mirror_en == 1) {      /*interlace mode must bill */
+               win->yrgb_vsd_mode = SCALE_DOWN_BIL;
+       }
+
+       if ((win->yrgb_ver_scl_mode == SCALE_DOWN) &&
+           (win->area[0].fbdc_en == 1)) {
+               /*in this pattern,use bil mode,not support souble scd,
+               use avg mode, support double scd, but aclk should be
+               bigger than dclk,aclk>>dclk */
+               if (yrgb_srcH >= 2 * yrgb_dstH) {
+                       pr_err("ERROR : fbdc mode,not support y scale down:");
+                       pr_err("srcH[%d] > 2 *dstH[%d]\n",
+                              yrgb_srcH, yrgb_dstH);
+               }
+       }
+       DBG(1, "yrgb:hsd=%d,vsd=%d,vsu=%d;cbcr:hsd=%d,vsd=%d,vsu=%d\n",
+           win->yrgb_hsd_mode, win->yrgb_vsd_mode, win->yrgb_vsu_mode,
+           win->cbr_hsd_mode, win->cbr_vsd_mode, win->cbr_vsu_mode);
+
+       /*SCALE FACTOR */
+
+       /*(1.1)YRGB HOR SCALE FACTOR */
+       switch (win->yrgb_hor_scl_mode) {
+       case SCALE_NONE:
+               yrgb_xscl_factor = (1 << SCALE_FACTOR_DEFAULT_FIXPOINT_SHIFT);
+               break;
+       case SCALE_UP:
+               yrgb_xscl_factor = GET_SCALE_FACTOR_BIC(yrgb_srcW, yrgb_dstW);
+               break;
+       case SCALE_DOWN:
+               switch (win->yrgb_hsd_mode) {
+               case SCALE_DOWN_BIL:
+                       yrgb_xscl_factor =
+                           GET_SCALE_FACTOR_BILI_DN(yrgb_srcW, yrgb_dstW);
+                       break;
+               case SCALE_DOWN_AVG:
+                       yrgb_xscl_factor =
+                           GET_SCALE_FACTOR_AVRG(yrgb_srcW, yrgb_dstW);
+                       break;
+               default:
+                       pr_info(
+                               "%s:un supported yrgb_hsd_mode:%d\n", __func__,
+                              win->yrgb_hsd_mode);
+                       break;
+               }
+               break;
+       default:
+               pr_info("%s:un supported yrgb_hor_scl_mode:%d\n",
+                       __func__, win->yrgb_hor_scl_mode);
+               break;
+       }                       /*win->yrgb_hor_scl_mode */
+
+       /*(1.2)YRGB VER SCALE FACTOR */
+       switch (win->yrgb_ver_scl_mode) {
+       case SCALE_NONE:
+               yrgb_yscl_factor = (1 << SCALE_FACTOR_DEFAULT_FIXPOINT_SHIFT);
+               break;
+       case SCALE_UP:
+               switch (win->yrgb_vsu_mode) {
+               case SCALE_UP_BIL:
+                       yrgb_yscl_factor =
+                           GET_SCALE_FACTOR_BILI_UP(yrgb_srcH, yrgb_dstH);
+                       break;
+               case SCALE_UP_BIC:
+                       if (yrgb_srcH < 3) {
+                               pr_err("yrgb_srcH should be");
+                               pr_err(" greater than 3 !!!\n");
+                       }
+                       yrgb_yscl_factor = GET_SCALE_FACTOR_BIC(yrgb_srcH,
+                                                               yrgb_dstH);
+                       break;
+               default:
+                       pr_info("%s:un support yrgb_vsu_mode:%d\n",
+                               __func__, win->yrgb_vsu_mode);
+                       break;
+               }
+               break;
+       case SCALE_DOWN:
+               switch (win->yrgb_vsd_mode) {
+               case SCALE_DOWN_BIL:
+                       yrgb_vscalednmult =
+                           rk3368_get_hard_ware_vskiplines(yrgb_srcH,
+                                                           yrgb_dstH);
+                       yrgb_yscl_factor =
+                           GET_SCALE_FACTOR_BILI_DN_VSKIP(yrgb_srcH, yrgb_dstH,
+                                                          yrgb_vscalednmult);
+                       if (yrgb_yscl_factor >= 0x2000) {
+                               pr_err("yrgb_yscl_factor should be ");
+                               pr_err("less than 0x2000,yrgb_yscl_factor=%4x;\n",
+                                      yrgb_yscl_factor);
+                       }
+                       if (yrgb_vscalednmult == 4) {
+                               yrgb_vsd_bil_gt4 = 1;
+                               yrgb_vsd_bil_gt2 = 0;
+                       } else if (yrgb_vscalednmult == 2) {
+                               yrgb_vsd_bil_gt4 = 0;
+                               yrgb_vsd_bil_gt2 = 1;
+                       } else {
+                               yrgb_vsd_bil_gt4 = 0;
+                               yrgb_vsd_bil_gt2 = 0;
+                       }
+                       break;
+               case SCALE_DOWN_AVG:
+                       yrgb_yscl_factor = GET_SCALE_FACTOR_AVRG(yrgb_srcH,
+                                                                yrgb_dstH);
+                       break;
+               default:
+                       pr_info("%s:un support yrgb_vsd_mode:%d\n",
+                               __func__, win->yrgb_vsd_mode);
+                       break;
+               }               /*win->yrgb_vsd_mode */
+               break;
+       default:
+               pr_info("%s:un supported yrgb_ver_scl_mode:%d\n",
+                       __func__, win->yrgb_ver_scl_mode);
+               break;
+       }
+       win->scale_yrgb_x = yrgb_xscl_factor;
+       win->scale_yrgb_y = yrgb_yscl_factor;
+       win->vsd_yrgb_gt4 = yrgb_vsd_bil_gt4;
+       win->vsd_yrgb_gt2 = yrgb_vsd_bil_gt2;
+       DBG(1, "yrgb:h_fac=%d, v_fac=%d,gt4=%d, gt2=%d\n", yrgb_xscl_factor,
+           yrgb_yscl_factor, yrgb_vsd_bil_gt4, yrgb_vsd_bil_gt2);
+
+       /*(2.1)CBCR HOR SCALE FACTOR */
+       switch (win->cbr_hor_scl_mode) {
+       case SCALE_NONE:
+               cbcr_xscl_factor = (1 << SCALE_FACTOR_DEFAULT_FIXPOINT_SHIFT);
+               break;
+       case SCALE_UP:
+               cbcr_xscl_factor = GET_SCALE_FACTOR_BIC(cbcr_srcW, cbcr_dstW);
+               break;
+       case SCALE_DOWN:
+               switch (win->cbr_hsd_mode) {
+               case SCALE_DOWN_BIL:
+                       cbcr_xscl_factor =
+                           GET_SCALE_FACTOR_BILI_DN(cbcr_srcW, cbcr_dstW);
+                       break;
+               case SCALE_DOWN_AVG:
+                       cbcr_xscl_factor =
+                           GET_SCALE_FACTOR_AVRG(cbcr_srcW, cbcr_dstW);
+                       break;
+               default:
+                       pr_info("%s:un support cbr_hsd_mode:%d\n",
+                               __func__, win->cbr_hsd_mode);
+                       break;
+               }
+               break;
+       default:
+               pr_info("%s:un supported cbr_hor_scl_mode:%d\n",
+                       __func__, win->cbr_hor_scl_mode);
+               break;
+       }                       /*win->cbr_hor_scl_mode */
+
+       /*(2.2)CBCR VER SCALE FACTOR */
+       switch (win->cbr_ver_scl_mode) {
+       case SCALE_NONE:
+               cbcr_yscl_factor = (1 << SCALE_FACTOR_DEFAULT_FIXPOINT_SHIFT);
+               break;
+       case SCALE_UP:
+               switch (win->cbr_vsu_mode) {
+               case SCALE_UP_BIL:
+                       cbcr_yscl_factor =
+                           GET_SCALE_FACTOR_BILI_UP(cbcr_srcH, cbcr_dstH);
+                       break;
+               case SCALE_UP_BIC:
+                       if (cbcr_srcH < 3) {
+                               pr_err("cbcr_srcH should be ");
+                               pr_err("greater than 3 !!!\n");
+                       }
+                       cbcr_yscl_factor = GET_SCALE_FACTOR_BIC(cbcr_srcH,
+                                                               cbcr_dstH);
+                       break;
+               default:
+                       pr_info("%s:un support cbr_vsu_mode:%d\n",
+                               __func__, win->cbr_vsu_mode);
+                       break;
+               }
+               break;
+       case SCALE_DOWN:
+               switch (win->cbr_vsd_mode) {
+               case SCALE_DOWN_BIL:
+                       cbcr_vscalednmult =
+                           rk3368_get_hard_ware_vskiplines(cbcr_srcH,
+                                                           cbcr_dstH);
+                       cbcr_yscl_factor =
+                           GET_SCALE_FACTOR_BILI_DN_VSKIP(cbcr_srcH, cbcr_dstH,
+                                                          cbcr_vscalednmult);
+                       if (cbcr_yscl_factor >= 0x2000) {
+                               pr_err("cbcr_yscl_factor should be less ");
+                               pr_err("than 0x2000,cbcr_yscl_factor=%4x;\n",
+                                      cbcr_yscl_factor);
+                       }
+
+                       if (cbcr_vscalednmult == 4) {
+                               cbcr_vsd_bil_gt4 = 1;
+                               cbcr_vsd_bil_gt2 = 0;
+                       } else if (cbcr_vscalednmult == 2) {
+                               cbcr_vsd_bil_gt4 = 0;
+                               cbcr_vsd_bil_gt2 = 1;
+                       } else {
+                               cbcr_vsd_bil_gt4 = 0;
+                               cbcr_vsd_bil_gt2 = 0;
+                       }
+                       break;
+               case SCALE_DOWN_AVG:
+                       cbcr_yscl_factor = GET_SCALE_FACTOR_AVRG(cbcr_srcH,
+                                                                cbcr_dstH);
+                       break;
+               default:
+                       pr_info("%s:un support cbr_vsd_mode:%d\n",
+                               __func__, win->cbr_vsd_mode);
+                       break;
+               }
+               break;
+       default:
+               pr_info("%s:un supported cbr_ver_scl_mode:%d\n",
+                       __func__, win->cbr_ver_scl_mode);
+               break;
+       }
+       win->scale_cbcr_x = cbcr_xscl_factor;
+       win->scale_cbcr_y = cbcr_yscl_factor;
+       win->vsd_cbr_gt4 = cbcr_vsd_bil_gt4;
+       win->vsd_cbr_gt2 = cbcr_vsd_bil_gt2;
+
+       DBG(1, "cbcr:h_fac=%d,v_fac=%d,gt4=%d,gt2=%d\n", cbcr_xscl_factor,
+           cbcr_yscl_factor, cbcr_vsd_bil_gt4, cbcr_vsd_bil_gt2);
+       return 0;
+}
+
+static int win_0_1_set_par(struct lcdc_device *lcdc_dev,
+                          struct rk_screen *screen, struct rk_lcdc_win *win)
+{
+       u32 xact, yact, xvir, yvir, xpos, ypos;
+       u8 fmt_cfg = 0, swap_rb;
+       char fmt[9] = "NULL";
+
+       if (!win->mirror_en) {
+               xpos = win->area[0].xpos + screen->mode.left_margin +
+                   screen->mode.hsync_len;
+               ypos = win->area[0].ypos + screen->mode.upper_margin +
+                   screen->mode.vsync_len;
+       } else {
+               xpos = screen->mode.xres - win->area[0].xpos -
+                       win->area[0].xsize +
+                       screen->mode.left_margin + screen->mode.hsync_len;
+               ypos = screen->mode.yres - win->area[0].ypos -
+                       win->area[0].ysize + screen->mode.upper_margin +
+                       screen->mode.vsync_len;
+       }
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               rk3368_lcdc_cal_scl_fac(win);   /*fac,lb,gt2,gt4 */
+               switch (win->area[0].format) {
+               case ARGB888:
+                       fmt_cfg = 0;
+                       swap_rb = 0;
+                       win->fmt_10 = 0;
+                       break;
+               case XBGR888:
+               case ABGR888:
+                       fmt_cfg = 0;
+                       swap_rb = 1;
+                       win->fmt_10 = 0;
+                       break;
+               case RGB888:
+                       fmt_cfg = 1;
+                       swap_rb = 0;
+                       win->fmt_10 = 0;
+                       break;
+               case RGB565:
+                       fmt_cfg = 2;
+                       swap_rb = 0;
+                       win->fmt_10 = 0;
+                       break;
+               case YUV422:
+                       fmt_cfg = 5;
+                       swap_rb = 0;
+                       win->fmt_10 = 0;
+                       break;
+               case YUV420:
+                       fmt_cfg = 4;
+                       swap_rb = 0;
+                       win->fmt_10 = 0;
+                       break;
+               case YUV444:
+                       fmt_cfg = 6;
+                       swap_rb = 0;
+                       win->fmt_10 = 0;
+                       break;
+               case YUV422_A:
+                       fmt_cfg = 5;
+                       swap_rb = 0;
+                       win->fmt_10 = 1;
+                       break;
+               case YUV420_A:
+                       fmt_cfg = 4;
+                       swap_rb = 0;
+                       win->fmt_10 = 1;
+                       break;
+               case YUV444_A:
+                       fmt_cfg = 6;
+                       swap_rb = 0;
+                       win->fmt_10 = 1;
+                       break;
+               default:
+                       dev_err(lcdc_dev->driver.dev, "%s:unsupport format!\n",
+                               __func__);
+                       break;
+               }
+               win->area[0].fmt_cfg = fmt_cfg;
+               win->area[0].swap_rb = swap_rb;
+               win->area[0].dsp_stx = xpos;
+               win->area[0].dsp_sty = ypos;
+               xact = win->area[0].xact;
+               yact = win->area[0].yact;
+               xvir = win->area[0].xvir;
+               yvir = win->area[0].yvir;
+       }
+       if (win->area[0].fbdc_en)
+               rk3368_init_fbdc_config(&lcdc_dev->driver, win->id);
+       rk3368_win_0_1_reg_update(&lcdc_dev->driver, win->id);
+       spin_unlock(&lcdc_dev->reg_lock);
+
+       DBG(1, "lcdc[%d]:win[%d]\n>>format:%s>>>xact:%d>>yact:%d>>xsize:%d",
+           lcdc_dev->id, win->id, get_format_string(win->area[0].format, fmt),
+           xact, yact, win->area[0].xsize);
+       DBG(1, ">>ysize:%d>>xvir:%d>>yvir:%d>>xpos:%d>>ypos:%d>>\n",
+           win->area[0].ysize, xvir, yvir, xpos, ypos);
+
+       return 0;
+}
+
+
+static int win_2_3_set_par(struct lcdc_device *lcdc_dev,
+                          struct rk_screen *screen, struct rk_lcdc_win *win)
+{
+       int i;
+       u8 fmt_cfg, swap_rb;
+       char fmt[9] = "NULL";
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               DBG(2, "lcdc[%d]:win[%d]>>\n>\n", lcdc_dev->id, win->id);
+               for (i = 0; i < win->area_num; i++) {
+                       switch (win->area[i].format) {
+                       case ARGB888:
+                               fmt_cfg = 0;
+                               swap_rb = 0;
+                               break;
+                       case XBGR888:
+                       case ABGR888:
+                               fmt_cfg = 0;
+                               swap_rb = 1;
+                               break;
+                       case RGB888:
+                               fmt_cfg = 1;
+                               swap_rb = 0;
+                               break;
+                       case RGB565:
+                               fmt_cfg = 2;
+                               swap_rb = 0;
+                               break;
+                       default:
+                               dev_err(lcdc_dev->driver.dev,
+                                       "%s:un supported format!\n", __func__);
+                               break;
+                       }
+                       win->area[i].fmt_cfg = fmt_cfg;
+                       win->area[i].swap_rb = swap_rb;
+                       win->area[i].dsp_stx = win->area[i].xpos +
+                           screen->mode.left_margin + screen->mode.hsync_len;
+                       if (screen->y_mirror == 1) {
+                               win->area[i].dsp_sty = screen->mode.yres -
+                                   win->area[i].ypos -
+                                   win->area[i].ysize +
+                                   screen->mode.upper_margin +
+                                   screen->mode.vsync_len;
+                       } else {
+                               win->area[i].dsp_sty = win->area[i].ypos +
+                                   screen->mode.upper_margin +
+                                   screen->mode.vsync_len;
+                       }
+
+                       DBG(2, "fmt:%s:xsize:%d>>ysize:%d>>xpos:%d>>ypos:%d\n",
+                           get_format_string(win->area[i].format, fmt),
+                           win->area[i].xsize, win->area[i].ysize,
+                           win->area[i].xpos, win->area[i].ypos);
+               }
+       }
+       if (win->area[0].fbdc_en)
+               rk3368_init_fbdc_config(&lcdc_dev->driver, win->id);
+       rk3368_win_2_3_reg_update(&lcdc_dev->driver, win->id);
+       spin_unlock(&lcdc_dev->reg_lock);
+       return 0;
+}
+
+static int hwc_set_par(struct lcdc_device *lcdc_dev,
+                      struct rk_screen *screen, struct rk_lcdc_win *win)
+{
+       u32 xact, yact, xvir, yvir, xpos, ypos;
+       u8 fmt_cfg = 0, swap_rb;
+       char fmt[9] = "NULL";
+
+       xpos = win->area[0].xpos + screen->mode.left_margin +
+           screen->mode.hsync_len;
+       ypos = win->area[0].ypos + screen->mode.upper_margin +
+           screen->mode.vsync_len;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               /*rk3368_lcdc_cal_scl_fac(win); *//*fac,lb,gt2,gt4 */
+               switch (win->area[0].format) {
+               case ARGB888:
+                       fmt_cfg = 0;
+                       swap_rb = 0;
+                       break;
+               case XBGR888:
+               case ABGR888:
+                       fmt_cfg = 0;
+                       swap_rb = 1;
+                       break;
+               case RGB888:
+                       fmt_cfg = 1;
+                       swap_rb = 0;
+                       break;
+               case RGB565:
+                       fmt_cfg = 2;
+                       swap_rb = 0;
+                       break;
+               default:
+                       dev_err(lcdc_dev->driver.dev,
+                               "%s:un supported format!\n", __func__);
+                       break;
+               }
+               win->area[0].fmt_cfg = fmt_cfg;
+               win->area[0].swap_rb = swap_rb;
+               win->area[0].dsp_stx = xpos;
+               win->area[0].dsp_sty = ypos;
+               xact = win->area[0].xact;
+               yact = win->area[0].yact;
+               xvir = win->area[0].xvir;
+               yvir = win->area[0].yvir;
+       }
+       rk3368_hwc_reg_update(&lcdc_dev->driver, 4);
+       spin_unlock(&lcdc_dev->reg_lock);
+
+       DBG(1, "lcdc[%d]:hwc>>%s\n>>format:%s>>>xact:%d>>yact:%d>>xsize:%d",
+           lcdc_dev->id, __func__, get_format_string(win->area[0].format, fmt),
+           xact, yact, win->area[0].xsize);
+       DBG(1, ">>ysize:%d>>xvir:%d>>yvir:%d>>xpos:%d>>ypos:%d>>\n",
+           win->area[0].ysize, xvir, yvir, xpos, ypos);
+       return 0;
+}
+
+static int rk3368_lcdc_set_par(struct rk_lcdc_driver *dev_drv, int win_id)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_lcdc_win *win = NULL;
+       struct rk_screen *screen = dev_drv->cur_screen;
+
+       win = dev_drv->win[win_id];
+       switch (win_id) {
+       case 0:
+               win_0_1_set_par(lcdc_dev, screen, win);
+               break;
+       case 1:
+               win_0_1_set_par(lcdc_dev, screen, win);
+               break;
+       case 2:
+               win_2_3_set_par(lcdc_dev, screen, win);
+               break;
+       case 3:
+               win_2_3_set_par(lcdc_dev, screen, win);
+               break;
+       case 4:
+               hwc_set_par(lcdc_dev, screen, win);
+               break;
+       default:
+               dev_err(dev_drv->dev, "unsupported win number:%d\n", win_id);
+               break;
+       }
+       return 0;
+}
+
+static int rk3368_lcdc_ioctl(struct rk_lcdc_driver *dev_drv, unsigned int cmd,
+                            unsigned long arg, int win_id)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       u32 panel_size[2];
+       void __user *argp = (void __user *)arg;
+       struct color_key_cfg clr_key_cfg;
+
+       switch (cmd) {
+       case RK_FBIOGET_PANEL_SIZE:
+               panel_size[0] = lcdc_dev->screen->mode.xres;
+               panel_size[1] = lcdc_dev->screen->mode.yres;
+               if (copy_to_user(argp, panel_size, 8))
+                       return -EFAULT;
+               break;
+       case RK_FBIOPUT_COLOR_KEY_CFG:
+               if (copy_from_user(&clr_key_cfg, argp,
+                                  sizeof(struct color_key_cfg)))
+                       return -EFAULT;
+               rk3368_lcdc_clr_key_cfg(dev_drv);
+               lcdc_writel(lcdc_dev, WIN0_COLOR_KEY,
+                           clr_key_cfg.win0_color_key_cfg);
+               lcdc_writel(lcdc_dev, WIN1_COLOR_KEY,
+                           clr_key_cfg.win1_color_key_cfg);
+               break;
+
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int rk3368_lcdc_get_backlight_device(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev = container_of(dev_drv,
+                                                   struct lcdc_device, driver);
+       /*struct device_node *backlight;*/
+
+       if (lcdc_dev->backlight)
+               return 0;
+#if 0
+       backlight = of_parse_phandle(lcdc_dev->dev->of_node, "backlight", 0);
+       if (backlight) {
+               lcdc_dev->backlight = of_find_backlight_by_node(backlight);
+               if (!lcdc_dev->backlight)
+                       dev_info(lcdc_dev->dev, "No find backlight device\n");
+       } else {
+               dev_info(lcdc_dev->dev, "No find backlight device node\n");
+       }
+#endif
+       return 0;
+}
+
+static int rk3368_lcdc_early_suspend(struct rk_lcdc_driver *dev_drv)
+{
+       u32 reg;
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       if (dev_drv->suspend_flag)
+               return 0;
+       /* close the backlight */
+       /*rk3368_lcdc_get_backlight_device(dev_drv);
+       if (lcdc_dev->backlight) {
+               lcdc_dev->backlight->props.fb_blank = FB_BLANK_POWERDOWN;
+               backlight_update_status(lcdc_dev->backlight);
+       }*/
+
+       dev_drv->suspend_flag = 1;
+       flush_kthread_worker(&dev_drv->update_regs_worker);
+
+       for (reg = MMU_DTE_ADDR; reg <= MMU_AUTO_GATING; reg += 4)
+               lcdc_readl(lcdc_dev, reg);
+       if (dev_drv->trsm_ops && dev_drv->trsm_ops->disable)
+               dev_drv->trsm_ops->disable();
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (likely(lcdc_dev->clk_on)) {
+               lcdc_msk_reg(lcdc_dev, DSP_CTRL0, m_DSP_BLANK_EN,
+                            v_DSP_BLANK_EN(1));
+               lcdc_msk_reg(lcdc_dev,
+                            INTR_CLEAR, m_FS_INTR_CLR | m_LINE_FLAG0_INTR_CLR,
+                            v_FS_INTR_CLR(1) | v_LINE_FLAG0_INTR_CLR(1));
+               lcdc_msk_reg(lcdc_dev, DSP_CTRL0, m_DSP_OUT_ZERO,
+                            v_DSP_OUT_ZERO(1));
+               lcdc_msk_reg(lcdc_dev, SYS_CTRL, m_STANDBY_EN, v_STANDBY_EN(1));
+               lcdc_cfg_done(lcdc_dev);
+
+               if (dev_drv->iommu_enabled) {
+                       if (dev_drv->mmu_dev)
+                               rockchip_iovmm_deactivate(dev_drv->dev);
+               }
+
+               spin_unlock(&lcdc_dev->reg_lock);
+       } else {
+               spin_unlock(&lcdc_dev->reg_lock);
+               return 0;
+       }
+       rk3368_lcdc_clk_disable(lcdc_dev);
+       rk_disp_pwr_disable(dev_drv);
+       return 0;
+}
+
+static int rk3368_lcdc_early_resume(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       int i, j;
+       int __iomem *c;
+       int v, r, g, b;
+
+       if (!dev_drv->suspend_flag)
+               return 0;
+       rk_disp_pwr_enable(dev_drv);
+       dev_drv->suspend_flag = 0;
+
+       if (1/*lcdc_dev->atv_layer_cnt*/) {
+               rk3368_lcdc_clk_enable(lcdc_dev);
+               rk3368_lcdc_reg_restore(lcdc_dev);
+
+               spin_lock(&lcdc_dev->reg_lock);
+               if (dev_drv->cur_screen->dsp_lut) {
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, m_DSP_LUT_EN,
+                                    v_DSP_LUT_EN(0));
+                       lcdc_cfg_done(lcdc_dev);
+                       mdelay(25);
+                       for (i = 0; i < 256; i++) {
+                               v = dev_drv->cur_screen->dsp_lut[i];
+                               c = lcdc_dev->dsp_lut_addr_base + (i << 2);
+                               b = (v & 0xff);
+                               g = (v & 0xff00);
+                               r = (v & 0xff0000);
+                               v = r + g + b;
+                               for (j = 0; j < 4; j++) {
+                                       writel_relaxed(v, c);
+                                       v += (1 + (1 << 10) + (1 << 20));
+                                       c++;
+                               }
+                       }
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, m_DSP_LUT_EN,
+                                    v_DSP_LUT_EN(1));
+               }
+
+               lcdc_msk_reg(lcdc_dev, DSP_CTRL0, m_DSP_OUT_ZERO,
+                            v_DSP_OUT_ZERO(0));
+               lcdc_msk_reg(lcdc_dev, SYS_CTRL, m_STANDBY_EN, v_STANDBY_EN(0));
+               lcdc_msk_reg(lcdc_dev, DSP_CTRL0, m_DSP_BLANK_EN,
+                            v_DSP_BLANK_EN(0));
+               lcdc_cfg_done(lcdc_dev);
+
+               if (dev_drv->iommu_enabled) {
+                       if (dev_drv->mmu_dev)
+                               rockchip_iovmm_activate(dev_drv->dev);
+               }
+
+               spin_unlock(&lcdc_dev->reg_lock);
+       }
+
+       if (dev_drv->trsm_ops && dev_drv->trsm_ops->enable)
+               dev_drv->trsm_ops->enable();
+
+       return 0;
+}
+
+static int rk3368_lcdc_blank(struct rk_lcdc_driver *dev_drv,
+                            int win_id, int blank_mode)
+{
+       switch (blank_mode) {
+       case FB_BLANK_UNBLANK:
+               rk3368_lcdc_early_resume(dev_drv);
+               break;
+       case FB_BLANK_NORMAL:
+               rk3368_lcdc_early_suspend(dev_drv);
+               break;
+       default:
+               rk3368_lcdc_early_suspend(dev_drv);
+               break;
+       }
+
+       dev_info(dev_drv->dev, "blank mode:%d\n", blank_mode);
+
+       return 0;
+}
+
+static int rk3368_lcdc_get_win_state(struct rk_lcdc_driver *dev_drv, int win_id)
+{
+       return 0;
+}
+
+/*overlay will be do at regupdate*/
+static int rk3368_lcdc_ovl_mgr(struct rk_lcdc_driver *dev_drv, int swap,
+                              bool set)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_lcdc_win *win = NULL;
+       int i, ovl;
+       unsigned int mask, val;
+       int z_order_num = 0;
+       int layer0_sel, layer1_sel, layer2_sel, layer3_sel;
+
+       if (swap == 0) {
+               for (i = 0; i < 4; i++) {
+                       win = dev_drv->win[i];
+                       if (win->state == 1)
+                               z_order_num++;
+               }
+               for (i = 0; i < 4; i++) {
+                       win = dev_drv->win[i];
+                       if (win->state == 0)
+                               win->z_order = z_order_num++;
+                       switch (win->z_order) {
+                       case 0:
+                               layer0_sel = win->id;
+                               break;
+                       case 1:
+                               layer1_sel = win->id;
+                               break;
+                       case 2:
+                               layer2_sel = win->id;
+                               break;
+                       case 3:
+                               layer3_sel = win->id;
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       } else {
+               layer0_sel = swap % 10;
+               layer1_sel = swap / 10 % 10;
+               layer2_sel = swap / 100 % 10;
+               layer3_sel = swap / 1000;
+       }
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (lcdc_dev->clk_on) {
+               if (set) {
+                       mask = m_DSP_LAYER0_SEL | m_DSP_LAYER1_SEL |
+                           m_DSP_LAYER2_SEL | m_DSP_LAYER3_SEL;
+                       val = v_DSP_LAYER0_SEL(layer0_sel) |
+                           v_DSP_LAYER1_SEL(layer1_sel) |
+                           v_DSP_LAYER2_SEL(layer2_sel) |
+                           v_DSP_LAYER3_SEL(layer3_sel);
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, mask, val);
+               } else {
+                       layer0_sel = lcdc_read_bit(lcdc_dev, DSP_CTRL1,
+                                                  m_DSP_LAYER0_SEL);
+                       layer1_sel = lcdc_read_bit(lcdc_dev, DSP_CTRL1,
+                                                  m_DSP_LAYER1_SEL);
+                       layer2_sel = lcdc_read_bit(lcdc_dev, DSP_CTRL1,
+                                                  m_DSP_LAYER2_SEL);
+                       layer3_sel = lcdc_read_bit(lcdc_dev, DSP_CTRL1,
+                                                  m_DSP_LAYER3_SEL);
+                       ovl = layer3_sel * 1000 + layer2_sel * 100 +
+                           layer1_sel * 10 + layer0_sel;
+               }
+       } else {
+               ovl = -EPERM;
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+
+       return ovl;
+}
+
+static char *rk3368_lcdc_format_to_string(int format, char *fmt)
+{
+       if (!fmt)
+               return NULL;
+
+       switch (format) {
+       case 0:
+               strcpy(fmt, "ARGB888");
+               break;
+       case 1:
+               strcpy(fmt, "RGB888");
+               break;
+       case 2:
+               strcpy(fmt, "RGB565");
+               break;
+       case 4:
+               strcpy(fmt, "YCbCr420");
+               break;
+       case 5:
+               strcpy(fmt, "YCbCr422");
+               break;
+       case 6:
+               strcpy(fmt, "YCbCr444");
+               break;
+       default:
+               strcpy(fmt, "invalid\n");
+               break;
+       }
+       return fmt;
+}
+static ssize_t rk3368_lcdc_get_disp_info(struct rk_lcdc_driver *dev_drv,
+                                        char *buf, int win_id)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_screen *screen = dev_drv->cur_screen;
+       u16 hsync_len = screen->mode.hsync_len;
+       u16 left_margin = screen->mode.left_margin;
+       u16 vsync_len = screen->mode.vsync_len;
+       u16 upper_margin = screen->mode.upper_margin;
+       u32 h_pw_bp = hsync_len + left_margin;
+       u32 v_pw_bp = vsync_len + upper_margin;
+       u32 fmt_id;
+       char format_w0[9] = "NULL";
+       char format_w1[9] = "NULL";
+       char format_w2_0[9] = "NULL";
+       char format_w2_1[9] = "NULL";
+       char format_w2_2[9] = "NULL";
+       char format_w2_3[9] = "NULL";
+       char format_w3_0[9] = "NULL";
+       char format_w3_1[9] = "NULL";
+       char format_w3_2[9] = "NULL";
+       char format_w3_3[9] = "NULL";
+       char dsp_buf[100];
+       u32 win_ctrl, zorder, vir_info, act_info, dsp_info, dsp_st;
+       u32 y_factor, uv_factor;
+       u8 layer0_sel, layer1_sel, layer2_sel, layer3_sel;
+       u8 w0_state, w1_state, w2_state, w3_state;
+       u8 w2_0_state, w2_1_state, w2_2_state, w2_3_state;
+       u8 w3_0_state, w3_1_state, w3_2_state, w3_3_state;
+
+       u32 w0_vir_y, w0_vir_uv, w0_act_x, w0_act_y, w0_dsp_x, w0_dsp_y;
+       u32 w0_st_x = h_pw_bp, w0_st_y = v_pw_bp;
+       u32 w1_vir_y, w1_vir_uv, w1_act_x, w1_act_y, w1_dsp_x, w1_dsp_y;
+       u32 w1_st_x = h_pw_bp, w1_st_y = v_pw_bp;
+       u32 w0_y_h_fac, w0_y_v_fac, w0_uv_h_fac, w0_uv_v_fac;
+       u32 w1_y_h_fac, w1_y_v_fac, w1_uv_h_fac, w1_uv_v_fac;
+
+       u32 w2_0_vir_y, w2_1_vir_y, w2_2_vir_y, w2_3_vir_y;
+       u32 w2_0_dsp_x, w2_1_dsp_x, w2_2_dsp_x, w2_3_dsp_x;
+       u32 w2_0_dsp_y, w2_1_dsp_y, w2_2_dsp_y, w2_3_dsp_y;
+       u32 w2_0_st_x = h_pw_bp, w2_1_st_x = h_pw_bp;
+       u32 w2_2_st_x = h_pw_bp, w2_3_st_x = h_pw_bp;
+       u32 w2_0_st_y = v_pw_bp, w2_1_st_y = v_pw_bp;
+       u32 w2_2_st_y = v_pw_bp, w2_3_st_y = v_pw_bp;
+
+       u32 w3_0_vir_y, w3_1_vir_y, w3_2_vir_y, w3_3_vir_y;
+       u32 w3_0_dsp_x, w3_1_dsp_x, w3_2_dsp_x, w3_3_dsp_x;
+       u32 w3_0_dsp_y, w3_1_dsp_y, w3_2_dsp_y, w3_3_dsp_y;
+       u32 w3_0_st_x = h_pw_bp, w3_1_st_x = h_pw_bp;
+       u32 w3_2_st_x = h_pw_bp, w3_3_st_x = h_pw_bp;
+       u32 w3_0_st_y = v_pw_bp, w3_1_st_y = v_pw_bp;
+       u32 w3_2_st_y = v_pw_bp, w3_3_st_y = v_pw_bp;
+       u32 dclk_freq;
+       int size = 0;
+
+       dclk_freq = screen->mode.pixclock;
+       /*rk3368_lcdc_reg_dump(dev_drv); */
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (lcdc_dev->clk_on) {
+               zorder = lcdc_readl(lcdc_dev, DSP_CTRL1);
+               layer0_sel = (zorder & m_DSP_LAYER0_SEL) >> 8;
+               layer1_sel = (zorder & m_DSP_LAYER1_SEL) >> 10;
+               layer2_sel = (zorder & m_DSP_LAYER2_SEL) >> 12;
+               layer3_sel = (zorder & m_DSP_LAYER3_SEL) >> 14;
+               /*WIN0 */
+               win_ctrl = lcdc_readl(lcdc_dev, WIN0_CTRL0);
+               w0_state = win_ctrl & m_WIN0_EN;
+               fmt_id = (win_ctrl & m_WIN0_DATA_FMT) >> 1;
+               rk3368_lcdc_format_to_string(fmt_id, format_w0);
+               vir_info = lcdc_readl(lcdc_dev, WIN0_VIR);
+               act_info = lcdc_readl(lcdc_dev, WIN0_ACT_INFO);
+               dsp_info = lcdc_readl(lcdc_dev, WIN0_DSP_INFO);
+               dsp_st = lcdc_readl(lcdc_dev, WIN0_DSP_ST);
+               y_factor = lcdc_readl(lcdc_dev, WIN0_SCL_FACTOR_YRGB);
+               uv_factor = lcdc_readl(lcdc_dev, WIN0_SCL_FACTOR_CBR);
+               w0_vir_y = vir_info & m_WIN0_VIR_STRIDE;
+               w0_vir_uv = (vir_info & m_WIN0_VIR_STRIDE_UV) >> 16;
+               w0_act_x = (act_info & m_WIN0_ACT_WIDTH) + 1;
+               w0_act_y = ((act_info & m_WIN0_ACT_HEIGHT) >> 16) + 1;
+               w0_dsp_x = (dsp_info & m_WIN0_DSP_WIDTH) + 1;
+               w0_dsp_y = ((dsp_info & m_WIN0_DSP_HEIGHT) >> 16) + 1;
+               if (w0_state) {
+                       w0_st_x = dsp_st & m_WIN0_DSP_XST;
+                       w0_st_y = (dsp_st & m_WIN0_DSP_YST) >> 16;
+               }
+               w0_y_h_fac = y_factor & m_WIN0_HS_FACTOR_YRGB;
+               w0_y_v_fac = (y_factor & m_WIN0_VS_FACTOR_YRGB) >> 16;
+               w0_uv_h_fac = uv_factor & m_WIN0_HS_FACTOR_CBR;
+               w0_uv_v_fac = (uv_factor & m_WIN0_VS_FACTOR_CBR) >> 16;
+
+               /*WIN1 */
+               win_ctrl = lcdc_readl(lcdc_dev, WIN1_CTRL0);
+               w1_state = win_ctrl & m_WIN1_EN;
+               fmt_id = (win_ctrl & m_WIN1_DATA_FMT) >> 1;
+               rk3368_lcdc_format_to_string(fmt_id, format_w1);
+               vir_info = lcdc_readl(lcdc_dev, WIN1_VIR);
+               act_info = lcdc_readl(lcdc_dev, WIN1_ACT_INFO);
+               dsp_info = lcdc_readl(lcdc_dev, WIN1_DSP_INFO);
+               dsp_st = lcdc_readl(lcdc_dev, WIN1_DSP_ST);
+               y_factor = lcdc_readl(lcdc_dev, WIN1_SCL_FACTOR_YRGB);
+               uv_factor = lcdc_readl(lcdc_dev, WIN1_SCL_FACTOR_CBR);
+               w1_vir_y = vir_info & m_WIN1_VIR_STRIDE;
+               w1_vir_uv = (vir_info & m_WIN1_VIR_STRIDE_UV) >> 16;
+               w1_act_x = (act_info & m_WIN1_ACT_WIDTH) + 1;
+               w1_act_y = ((act_info & m_WIN1_ACT_HEIGHT) >> 16) + 1;
+               w1_dsp_x = (dsp_info & m_WIN1_DSP_WIDTH) + 1;
+               w1_dsp_y = ((dsp_info & m_WIN1_DSP_HEIGHT) >> 16) + 1;
+               if (w1_state) {
+                       w1_st_x = dsp_st & m_WIN1_DSP_XST;
+                       w1_st_y = (dsp_st & m_WIN1_DSP_YST) >> 16;
+               }
+               w1_y_h_fac = y_factor & m_WIN1_HS_FACTOR_YRGB;
+               w1_y_v_fac = (y_factor & m_WIN1_VS_FACTOR_YRGB) >> 16;
+               w1_uv_h_fac = uv_factor & m_WIN1_HS_FACTOR_CBR;
+               w1_uv_v_fac = (uv_factor & m_WIN1_VS_FACTOR_CBR) >> 16;
+               /*WIN2 */
+               win_ctrl = lcdc_readl(lcdc_dev, WIN2_CTRL0);
+               w2_state = win_ctrl & m_WIN2_EN;
+               w2_0_state = (win_ctrl & m_WIN2_MST0_EN) >> 4;
+               w2_1_state = (win_ctrl & m_WIN2_MST1_EN) >> 5;
+               w2_2_state = (win_ctrl & m_WIN2_MST2_EN) >> 6;
+               w2_3_state = (win_ctrl & m_WIN2_MST3_EN) >> 7;
+               vir_info = lcdc_readl(lcdc_dev, WIN2_VIR0_1);
+               w2_0_vir_y = vir_info & m_WIN2_VIR_STRIDE0;
+               w2_1_vir_y = (vir_info & m_WIN2_VIR_STRIDE1) >> 16;
+               vir_info = lcdc_readl(lcdc_dev, WIN2_VIR2_3);
+               w2_2_vir_y = vir_info & m_WIN2_VIR_STRIDE2;
+               w2_3_vir_y = (vir_info & m_WIN2_VIR_STRIDE3) >> 16;
+
+               fmt_id = (win_ctrl & m_WIN2_DATA_FMT0) >> 1;
+               rk3368_lcdc_format_to_string(fmt_id, format_w2_0);
+               fmt_id = (win_ctrl & m_WIN2_DATA_FMT1) >> 1;
+               rk3368_lcdc_format_to_string(fmt_id, format_w2_1);
+               fmt_id = (win_ctrl & m_WIN2_DATA_FMT2) >> 1;
+               rk3368_lcdc_format_to_string(fmt_id, format_w2_2);
+               fmt_id = (win_ctrl & m_WIN2_DATA_FMT3) >> 1;
+               rk3368_lcdc_format_to_string(fmt_id, format_w2_3);
+
+               dsp_info = lcdc_readl(lcdc_dev, WIN2_DSP_INFO0);
+               dsp_st = lcdc_readl(lcdc_dev, WIN2_DSP_ST0);
+               w2_0_dsp_x = (dsp_info & m_WIN2_DSP_WIDTH0) + 1;
+               w2_0_dsp_y = ((dsp_info & m_WIN2_DSP_HEIGHT0) >> 16) + 1;
+               if (w2_0_state) {
+                       w2_0_st_x = dsp_st & m_WIN2_DSP_XST0;
+                       w2_0_st_y = (dsp_st & m_WIN2_DSP_YST0) >> 16;
+               }
+               dsp_info = lcdc_readl(lcdc_dev, WIN2_DSP_INFO1);
+               dsp_st = lcdc_readl(lcdc_dev, WIN2_DSP_ST1);
+               w2_1_dsp_x = (dsp_info & m_WIN2_DSP_WIDTH1) + 1;
+               w2_1_dsp_y = ((dsp_info & m_WIN2_DSP_HEIGHT1) >> 16) + 1;
+               if (w2_1_state) {
+                       w2_1_st_x = dsp_st & m_WIN2_DSP_XST1;
+                       w2_1_st_y = (dsp_st & m_WIN2_DSP_YST1) >> 16;
+               }
+               dsp_info = lcdc_readl(lcdc_dev, WIN2_DSP_INFO2);
+               dsp_st = lcdc_readl(lcdc_dev, WIN2_DSP_ST2);
+               w2_2_dsp_x = (dsp_info & m_WIN2_DSP_WIDTH2) + 1;
+               w2_2_dsp_y = ((dsp_info & m_WIN2_DSP_HEIGHT2) >> 16) + 1;
+               if (w2_2_state) {
+                       w2_2_st_x = dsp_st & m_WIN2_DSP_XST2;
+                       w2_2_st_y = (dsp_st & m_WIN2_DSP_YST2) >> 16;
+               }
+               dsp_info = lcdc_readl(lcdc_dev, WIN2_DSP_INFO3);
+               dsp_st = lcdc_readl(lcdc_dev, WIN2_DSP_ST3);
+               w2_3_dsp_x = (dsp_info & m_WIN2_DSP_WIDTH3) + 1;
+               w2_3_dsp_y = ((dsp_info & m_WIN2_DSP_HEIGHT3) >> 16) + 1;
+               if (w2_3_state) {
+                       w2_3_st_x = dsp_st & m_WIN2_DSP_XST3;
+                       w2_3_st_y = (dsp_st & m_WIN2_DSP_YST3) >> 16;
+               }
+
+               /*WIN3 */
+               win_ctrl = lcdc_readl(lcdc_dev, WIN3_CTRL0);
+               w3_state = win_ctrl & m_WIN3_EN;
+               w3_0_state = (win_ctrl & m_WIN3_MST0_EN) >> 4;
+               w3_1_state = (win_ctrl & m_WIN3_MST1_EN) >> 5;
+               w3_2_state = (win_ctrl & m_WIN3_MST2_EN) >> 6;
+               w3_3_state = (win_ctrl & m_WIN3_MST3_EN) >> 7;
+               vir_info = lcdc_readl(lcdc_dev, WIN3_VIR0_1);
+               w3_0_vir_y = vir_info & m_WIN3_VIR_STRIDE0;
+               w3_1_vir_y = (vir_info & m_WIN3_VIR_STRIDE1) >> 16;
+               vir_info = lcdc_readl(lcdc_dev, WIN3_VIR2_3);
+               w3_2_vir_y = vir_info & m_WIN3_VIR_STRIDE2;
+               w3_3_vir_y = (vir_info & m_WIN3_VIR_STRIDE3) >> 16;
+               fmt_id = (win_ctrl & m_WIN3_DATA_FMT0) >> 1;
+               rk3368_lcdc_format_to_string(fmt_id, format_w3_0);
+               fmt_id = (win_ctrl & m_WIN3_DATA_FMT1) >> 1;
+               rk3368_lcdc_format_to_string(fmt_id, format_w3_1);
+               fmt_id = (win_ctrl & m_WIN3_DATA_FMT2) >> 1;
+               rk3368_lcdc_format_to_string(fmt_id, format_w3_2);
+               fmt_id = (win_ctrl & m_WIN3_DATA_FMT3) >> 1;
+               rk3368_lcdc_format_to_string(fmt_id, format_w3_3);
+               dsp_info = lcdc_readl(lcdc_dev, WIN3_DSP_INFO0);
+               dsp_st = lcdc_readl(lcdc_dev, WIN3_DSP_ST0);
+               w3_0_dsp_x = (dsp_info & m_WIN3_DSP_WIDTH0) + 1;
+               w3_0_dsp_y = ((dsp_info & m_WIN3_DSP_HEIGHT0) >> 16) + 1;
+               if (w3_0_state) {
+                       w3_0_st_x = dsp_st & m_WIN3_DSP_XST0;
+                       w3_0_st_y = (dsp_st & m_WIN3_DSP_YST0) >> 16;
+               }
+
+               dsp_info = lcdc_readl(lcdc_dev, WIN3_DSP_INFO1);
+               dsp_st = lcdc_readl(lcdc_dev, WIN3_DSP_ST1);
+               w3_1_dsp_x = (dsp_info & m_WIN3_DSP_WIDTH1) + 1;
+               w3_1_dsp_y = ((dsp_info & m_WIN3_DSP_HEIGHT1) >> 16) + 1;
+               if (w3_1_state) {
+                       w3_1_st_x = dsp_st & m_WIN3_DSP_XST1;
+                       w3_1_st_y = (dsp_st & m_WIN3_DSP_YST1) >> 16;
+               }
+
+               dsp_info = lcdc_readl(lcdc_dev, WIN3_DSP_INFO2);
+               dsp_st = lcdc_readl(lcdc_dev, WIN3_DSP_ST2);
+               w3_2_dsp_x = (dsp_info & m_WIN3_DSP_WIDTH2) + 1;
+               w3_2_dsp_y = ((dsp_info & m_WIN3_DSP_HEIGHT2) >> 16) + 1;
+               if (w3_2_state) {
+                       w3_2_st_x = dsp_st & m_WIN3_DSP_XST2;
+                       w3_2_st_y = (dsp_st & m_WIN3_DSP_YST2) >> 16;
+               }
+
+               dsp_info = lcdc_readl(lcdc_dev, WIN3_DSP_INFO3);
+               dsp_st = lcdc_readl(lcdc_dev, WIN3_DSP_ST3);
+               w3_3_dsp_x = (dsp_info & m_WIN3_DSP_WIDTH3) + 1;
+               w3_3_dsp_y = ((dsp_info & m_WIN3_DSP_HEIGHT3) >> 16) + 1;
+               if (w3_3_state) {
+                       w3_3_st_x = dsp_st & m_WIN3_DSP_XST3;
+                       w3_3_st_y = (dsp_st & m_WIN3_DSP_YST3) >> 16;
+               }
+
+       } else {
+               spin_unlock(&lcdc_dev->reg_lock);
+               return -EPERM;
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+       size += snprintf(dsp_buf, 80,
+               "z-order:\n  win[%d]\n  win[%d]\n  win[%d]\n  win[%d]\n",
+               layer3_sel, layer2_sel, layer1_sel, layer0_sel);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       /*win0*/
+       size += snprintf(dsp_buf, 80,
+                "win0:\n  state:%d, fmt:%7s\n  y_vir:%4d, uv_vir:%4d,",
+                w0_state, format_w0, w0_vir_y, w0_vir_uv);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       size += snprintf(dsp_buf, 80,
+                " x_act  :%5d, y_act  :%5d, dsp_x   :%5d, dsp_y   :%5d\n",
+                w0_act_x, w0_act_y, w0_dsp_x, w0_dsp_y);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       size += snprintf(dsp_buf, 80,
+                "  x_st :%4d, y_st  :%4d, y_h_fac:%5d, y_v_fac:%5d, ",
+                w0_st_x-h_pw_bp, w0_st_y-v_pw_bp, w0_y_h_fac, w0_y_v_fac);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       size += snprintf(dsp_buf, 80,
+                "uv_h_fac:%5d, uv_v_fac:%5d\n  y_addr:0x%08x,    uv_addr:0x%08x\n",
+                w0_uv_h_fac, w0_uv_v_fac, lcdc_readl(lcdc_dev, WIN0_YRGB_MST),
+                lcdc_readl(lcdc_dev, WIN0_CBR_MST));
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       /*win1*/
+       size += snprintf(dsp_buf, 80,
+                "win1:\n  state:%d, fmt:%7s\n  y_vir:%4d, uv_vir:%4d,",
+                w1_state, format_w1, w1_vir_y, w1_vir_uv);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       size += snprintf(dsp_buf, 80,
+                " x_act  :%5d, y_act  :%5d, dsp_x   :%5d, dsp_y   :%5d\n",
+                w1_act_x, w1_act_y, w1_dsp_x, w1_dsp_y);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       size += snprintf(dsp_buf, 80,
+                "  x_st :%4d, y_st  :%4d, y_h_fac:%5d, y_v_fac:%5d, ",
+                w1_st_x-h_pw_bp, w1_st_y-v_pw_bp, w1_y_h_fac, w1_y_v_fac);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       size += snprintf(dsp_buf, 80,
+                "uv_h_fac:%5d, uv_v_fac:%5d\n  y_addr:0x%08x,    uv_addr:0x%08x\n",
+                w1_uv_h_fac, w1_uv_v_fac, lcdc_readl(lcdc_dev, WIN1_YRGB_MST),
+                lcdc_readl(lcdc_dev, WIN1_CBR_MST));
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       /*win2*/
+       size += snprintf(dsp_buf, 80,
+                "win2:\n  state:%d\n",
+                w2_state);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       /*area 0*/
+       size += snprintf(dsp_buf, 80,
+                "  area0: state:%d, fmt:%7s, dsp_x:%4d, dsp_y:%4d,",
+                w2_0_state, format_w2_0, w2_0_dsp_x, w2_0_dsp_y);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       size += snprintf(dsp_buf, 80,
+                " x_st:%4d, y_st:%4d, y_addr:0x%08x\n",
+                w2_0_st_x - h_pw_bp, w2_0_st_y - v_pw_bp,
+                lcdc_readl(lcdc_dev, WIN2_MST0));
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       /*area 1*/
+       size += snprintf(dsp_buf, 80,
+                "  area1: state:%d, fmt:%7s, dsp_x:%4d, dsp_y:%4d,",
+                w2_1_state, format_w2_1, w2_1_dsp_x, w2_1_dsp_y);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       size += snprintf(dsp_buf, 80,
+                " x_st:%4d, y_st:%4d, y_addr:0x%08x\n",
+                w2_1_st_x - h_pw_bp, w2_1_st_y - v_pw_bp,
+                lcdc_readl(lcdc_dev, WIN2_MST1));
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       /*area 2*/
+       size += snprintf(dsp_buf, 80,
+                "  area2: state:%d, fmt:%7s, dsp_x:%4d, dsp_y:%4d,",
+                w2_2_state, format_w2_2, w2_2_dsp_x, w2_2_dsp_y);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       size += snprintf(dsp_buf, 80,
+                " x_st:%4d, y_st:%4d, y_addr:0x%08x\n",
+                w2_2_st_x - h_pw_bp, w2_2_st_y - v_pw_bp,
+                lcdc_readl(lcdc_dev, WIN2_MST2));
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       /*area 3*/
+       size += snprintf(dsp_buf, 80,
+                "  area3: state:%d, fmt:%7s, dsp_x:%4d, dsp_y:%4d,",
+                w2_3_state, format_w2_3, w2_3_dsp_x, w2_3_dsp_y);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       size += snprintf(dsp_buf, 80,
+                " x_st:%4d, y_st:%4d, y_addr:0x%08x\n",
+                w2_3_st_x - h_pw_bp, w2_3_st_y - v_pw_bp,
+                lcdc_readl(lcdc_dev, WIN2_MST3));
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       /*win3*/
+       size += snprintf(dsp_buf, 80,
+                "win3:\n  state:%d\n",
+                w3_state);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       /*area 0*/
+       size += snprintf(dsp_buf, 80,
+                "  area0: state:%d, fmt:%7s, dsp_x:%4d, dsp_y:%4d,",
+                w3_0_state, format_w3_0, w3_0_dsp_x, w3_0_dsp_y);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       size += snprintf(dsp_buf, 80,
+                " x_st:%4d, y_st:%4d, y_addr:0x%08x\n",
+                w3_0_st_x - h_pw_bp, w3_0_st_y - v_pw_bp,
+                lcdc_readl(lcdc_dev, WIN3_MST0));
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       /*area 1*/
+       size += snprintf(dsp_buf, 80,
+                "  area1: state:%d, fmt:%7s, dsp_x:%4d, dsp_y:%4d,",
+                w3_1_state, format_w3_1, w3_1_dsp_x, w3_1_dsp_y);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       size += snprintf(dsp_buf, 80,
+                " x_st:%4d, y_st:%4d, y_addr:0x%08x\n",
+                w3_1_st_x - h_pw_bp, w3_1_st_y - v_pw_bp,
+                lcdc_readl(lcdc_dev, WIN3_MST1));
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       /*area 2*/
+       size += snprintf(dsp_buf, 80,
+                "  area2: state:%d, fmt:%7s, dsp_x:%4d, dsp_y:%4d,",
+                w3_2_state, format_w3_2, w3_2_dsp_x, w3_2_dsp_y);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       size += snprintf(dsp_buf, 80,
+                " x_st:%4d, y_st:%4d, y_addr:0x%08x\n",
+                w3_2_st_x - h_pw_bp, w3_2_st_y - v_pw_bp,
+                lcdc_readl(lcdc_dev, WIN3_MST2));
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       /*area 3*/
+       size += snprintf(dsp_buf, 80,
+                "  area3: state:%d, fmt:%7s, dsp_x:%4d, dsp_y:%4d,",
+                w3_3_state, format_w3_3, w3_3_dsp_x, w3_3_dsp_y);
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+       size += snprintf(dsp_buf, 80,
+                " x_st:%4d, y_st:%4d, y_addr:0x%08x\n",
+                w3_3_st_x - h_pw_bp, w3_3_st_y - v_pw_bp,
+                lcdc_readl(lcdc_dev, WIN3_MST3));
+       strcat(buf, dsp_buf);
+       memset(dsp_buf, 0, sizeof(dsp_buf));
+
+       return size;
+}
+
+static int rk3368_lcdc_fps_mgr(struct rk_lcdc_driver *dev_drv, int fps,
+                              bool set)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_screen *screen = dev_drv->cur_screen;
+       u64 ft = 0;
+       u32 dotclk;
+       int ret;
+       u32 pixclock;
+       u32 x_total, y_total;
+
+       if (set) {
+               if (fps == 0) {
+                       dev_info(dev_drv->dev, "unsupport set fps=0\n");
+                       return 0;
+               }
+               ft = div_u64(1000000000000llu, fps);
+               x_total =
+                   screen->mode.upper_margin + screen->mode.lower_margin +
+                   screen->mode.yres + screen->mode.vsync_len;
+               y_total =
+                   screen->mode.left_margin + screen->mode.right_margin +
+                   screen->mode.xres + screen->mode.hsync_len;
+               dev_drv->pixclock = div_u64(ft, x_total * y_total);
+               dotclk = div_u64(1000000000000llu, dev_drv->pixclock);
+               ret = clk_set_rate(lcdc_dev->pll_sclk, dotclk); /*set pll */
+               if (ret)
+                       dev_err(dev_drv->dev,
+                               "set lcdc%d pll_sclk failed\n", lcdc_dev->id);
+
+               ret = clk_set_rate(lcdc_dev->dclk, dotclk);
+               /*SET NEW PLL FOR RK3368 */
+       }
+
+       pixclock = div_u64(1000000000000llu, clk_get_rate(lcdc_dev->dclk));
+       lcdc_dev->pixclock = pixclock;
+       dev_drv->pixclock = lcdc_dev->pixclock;
+       fps = rk_fb_calc_fps(lcdc_dev->screen, pixclock);
+       screen->ft = 1000 / fps;        /*one frame time in ms */
+
+       if (set)
+               dev_info(dev_drv->dev, "%s:dclk:%lu,fps:%d\n", __func__,
+                        clk_get_rate(lcdc_dev->dclk), fps);
+
+       return fps;
+}
+
+static int rk3368_fb_win_remap(struct rk_lcdc_driver *dev_drv, u16 order)
+{
+       mutex_lock(&dev_drv->fb_win_id_mutex);
+       if (order == FB_DEFAULT_ORDER)
+               order = FB0_WIN0_FB1_WIN1_FB2_WIN2_FB3_WIN3_FB4_HWC;
+       dev_drv->fb4_win_id = order / 10000;
+       dev_drv->fb3_win_id = (order / 1000) % 10;
+       dev_drv->fb2_win_id = (order / 100) % 10;
+       dev_drv->fb1_win_id = (order / 10) % 10;
+       dev_drv->fb0_win_id = order % 10;
+       mutex_unlock(&dev_drv->fb_win_id_mutex);
+
+       return 0;
+}
+
+static int rk3368_lcdc_get_win_id(struct rk_lcdc_driver *dev_drv,
+                                 const char *id)
+{
+       int win_id = 0;
+
+       mutex_lock(&dev_drv->fb_win_id_mutex);
+       if (!strcmp(id, "fb0") || !strcmp(id, "fb5"))
+               win_id = dev_drv->fb0_win_id;
+       else if (!strcmp(id, "fb1") || !strcmp(id, "fb6"))
+               win_id = dev_drv->fb1_win_id;
+       else if (!strcmp(id, "fb2") || !strcmp(id, "fb7"))
+               win_id = dev_drv->fb2_win_id;
+       else if (!strcmp(id, "fb3") || !strcmp(id, "fb8"))
+               win_id = dev_drv->fb3_win_id;
+       else if (!strcmp(id, "fb4") || !strcmp(id, "fb9"))
+               win_id = dev_drv->fb4_win_id;
+       mutex_unlock(&dev_drv->fb_win_id_mutex);
+
+       return win_id;
+}
+
+static int rk3368_set_dsp_lut(struct rk_lcdc_driver *dev_drv, int *lut)
+{
+       int i, j;
+       int __iomem *c;
+       int v, r, g, b;
+       int ret = 0;
+
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       lcdc_msk_reg(lcdc_dev, DSP_CTRL1, m_DSP_LUT_EN, v_DSP_LUT_EN(0));
+       lcdc_cfg_done(lcdc_dev);
+       mdelay(25);
+       if (dev_drv->cur_screen->dsp_lut) {
+               for (i = 0; i < 256; i++) {
+                       dev_drv->cur_screen->dsp_lut[i] = lut[i];
+                       v = dev_drv->cur_screen->dsp_lut[i];
+                       c = lcdc_dev->dsp_lut_addr_base + (i << 2);
+                       b = (v & 0xff) << 2;
+                       g = (v & 0xff00) << 4;
+                       r = (v & 0xff0000) << 6;
+                       v = r + g + b;
+                       for (j = 0; j < 4; j++) {
+                               writel_relaxed(v, c);
+                               v += (1 + (1 << 10) + (1 << 20));
+                               c++;
+                       }
+               }
+       } else {
+               dev_err(dev_drv->dev, "no buffer to backup lut data!\n");
+               ret = -1;
+       }
+
+       do {
+               lcdc_msk_reg(lcdc_dev, DSP_CTRL1, m_DSP_LUT_EN,
+                            v_DSP_LUT_EN(1));
+               lcdc_cfg_done(lcdc_dev);
+       } while (!lcdc_read_bit(lcdc_dev, DSP_CTRL1, m_DSP_LUT_EN));
+       return ret;
+}
+
+static int rk3368_lcdc_config_done(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       int i;
+       unsigned int mask, val;
+       struct rk_lcdc_win *win = NULL;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       lcdc_msk_reg(lcdc_dev, SYS_CTRL, m_STANDBY_EN,
+                    v_STANDBY_EN(lcdc_dev->standby));
+       for (i = 0; i < 4; i++) {
+               win = dev_drv->win[i];
+               if ((win->state == 0) && (win->last_state == 1)) {
+                       switch (win->id) {
+                       case 0:
+                               /*lcdc_writel(lcdc_dev,WIN0_CTRL1,0x0);
+                                  for rk3288 to fix hw bug? */
+                               mask = m_WIN0_EN;
+                               val = v_WIN0_EN(0);
+                               lcdc_msk_reg(lcdc_dev, WIN0_CTRL0, mask, val);
+                               break;
+                       case 1:
+                               /*lcdc_writel(lcdc_dev,WIN1_CTRL1,0x0);
+                                  for rk3288 to fix hw bug? */
+                               mask = m_WIN1_EN;
+                               val = v_WIN1_EN(0);
+                               lcdc_msk_reg(lcdc_dev, WIN1_CTRL0, mask, val);
+                               break;
+                       case 2:
+                               mask = m_WIN2_EN | m_WIN2_MST0_EN |
+                                   m_WIN2_MST1_EN |
+                                   m_WIN2_MST2_EN | m_WIN2_MST3_EN;
+                               val = v_WIN2_EN(0) | v_WIN2_MST0_EN(0) |
+                                   v_WIN2_MST1_EN(0) |
+                                   v_WIN2_MST2_EN(0) | v_WIN2_MST3_EN(0);
+                               lcdc_msk_reg(lcdc_dev, WIN2_CTRL0, mask, val);
+                               break;
+                       case 3:
+                               mask = m_WIN3_EN | m_WIN3_MST0_EN |
+                                   m_WIN3_MST1_EN |
+                                   m_WIN3_MST2_EN | m_WIN3_MST3_EN;
+                               val = v_WIN3_EN(0) | v_WIN3_MST0_EN(0) |
+                                   v_WIN3_MST1_EN(0) |
+                                   v_WIN3_MST2_EN(0) | v_WIN3_MST3_EN(0);
+                               lcdc_msk_reg(lcdc_dev, WIN3_CTRL0, mask, val);
+                               break;
+                       case 4:
+                               mask = m_HWC_EN;
+                               val = v_HWC_EN(0);
+                               lcdc_msk_reg(lcdc_dev, HWC_CTRL0, mask, val);
+                               break;
+                       default:
+                               break;
+                       }
+               }
+               win->last_state = win->state;
+       }
+       lcdc_cfg_done(lcdc_dev);
+       spin_unlock(&lcdc_dev->reg_lock);
+       return 0;
+}
+
+static int rk3368_lcdc_dpi_open(struct rk_lcdc_driver *dev_drv, bool open)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       spin_lock(&lcdc_dev->reg_lock);
+       lcdc_msk_reg(lcdc_dev, SYS_CTRL, m_DIRECT_PATH_EN,
+                    v_DIRECT_PATH_EN(open));
+       lcdc_cfg_done(lcdc_dev);
+       spin_unlock(&lcdc_dev->reg_lock);
+       return 0;
+}
+
+static int rk3368_lcdc_dpi_win_sel(struct rk_lcdc_driver *dev_drv, int win_id)
+{
+       struct lcdc_device *lcdc_dev = container_of(dev_drv,
+                                                   struct lcdc_device, driver);
+       spin_lock(&lcdc_dev->reg_lock);
+       lcdc_msk_reg(lcdc_dev, SYS_CTRL, m_DIRECT_PATCH_SEL,
+                    v_DIRECT_PATCH_SEL(win_id));
+       lcdc_cfg_done(lcdc_dev);
+       spin_unlock(&lcdc_dev->reg_lock);
+       return 0;
+}
+
+static int rk3368_lcdc_dpi_status(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       int ovl;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       ovl = lcdc_read_bit(lcdc_dev, SYS_CTRL, m_DIRECT_PATH_EN);
+       spin_unlock(&lcdc_dev->reg_lock);
+       return ovl;
+}
+
+static int rk3368_lcdc_set_irq_to_cpu(struct rk_lcdc_driver *dev_drv,
+                                     int enable)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       if (enable)
+               enable_irq(lcdc_dev->irq);
+       else
+               disable_irq(lcdc_dev->irq);
+       return 0;
+}
+
+int rk3368_lcdc_poll_vblank(struct rk_lcdc_driver *dev_drv)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       u32 int_reg;
+       int ret;
+
+       if (lcdc_dev->clk_on && (!dev_drv->suspend_flag)) {
+               int_reg = lcdc_readl(lcdc_dev, INTR_STATUS);
+               if (int_reg & m_LINE_FLAG0_INTR_STS) {
+                       lcdc_dev->driver.frame_time.last_framedone_t =
+                           lcdc_dev->driver.frame_time.framedone_t;
+                       lcdc_dev->driver.frame_time.framedone_t = cpu_clock(0);
+                       lcdc_msk_reg(lcdc_dev, INTR_CLEAR,
+                                    m_LINE_FLAG0_INTR_CLR,
+                                    v_LINE_FLAG0_INTR_CLR(1));
+                       ret = RK_LF_STATUS_FC;
+               } else {
+                       ret = RK_LF_STATUS_FR;
+               }
+       } else {
+               ret = RK_LF_STATUS_NC;
+       }
+
+       return ret;
+}
+
+static int rk3368_lcdc_get_dsp_addr(struct rk_lcdc_driver *dev_drv,
+                                   unsigned int *dsp_addr)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       spin_lock(&lcdc_dev->reg_lock);
+       if (lcdc_dev->clk_on) {
+               dsp_addr[0] = lcdc_readl(lcdc_dev, WIN0_YRGB_MST);
+               dsp_addr[1] = lcdc_readl(lcdc_dev, WIN1_YRGB_MST);
+               dsp_addr[2] = lcdc_readl(lcdc_dev, WIN2_MST0);
+               dsp_addr[3] = lcdc_readl(lcdc_dev, WIN3_MST0);
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+       return 0;
+}
+
+static struct lcdc_cabc_mode cabc_mode[4] = {
+       /* pixel_num,8 stage_up, stage_down */
+       {5, 148, 20, 300},      /*mode 1 */
+       {10, 148, 20, 300},     /*mode 2 */
+       {15, 148, 20, 300},     /*mode 3 */
+       {20, 148, 20, 300},     /*mode 4 */
+};
+
+static int rk3368_lcdc_set_dsp_cabc(struct rk_lcdc_driver *dev_drv, int mode)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       struct rk_screen *screen = dev_drv->cur_screen;
+       u32 total_pixel, calc_pixel, stage_up, stage_down;
+       u32 pixel_num, global_su;
+       u32 stage_up_rec, stage_down_rec, global_su_rec;
+       u32 mask = 0, val = 0, cabc_en = 0;
+       u32 __maybe_unused max_mode_num =
+           sizeof(cabc_mode) / sizeof(struct lcdc_cabc_mode);
+
+       dev_drv->cabc_mode = mode;
+#if 0/*ndef CONFIG_RK_FPGA*/
+       /* iomux connect to vop or pwm */
+       if (mode == 0) {
+               DBG(3, "close cabc and select rk pwm\n");
+               val = 0x30002;
+               writel_relaxed(val, RK_GRF_VIRT + rk3368_GRF_GPIO3C_IOMUX);
+               cabc_en = 0;
+       } else if (mode > 0 && mode <= max_mode_num) {
+               DBG(3, "open cabc and select vop pwm\n");
+               val = 0x30003;
+               writel_relaxed(val, RK_GRF_VIRT + rk3368_GRF_GPIO3C_IOMUX);
+               cabc_en = 1;
+       } else if (mode > 0x10 && mode <= (max_mode_num + 0x10)) {
+               DBG(3, "open cabc and select rk pwm\n");
+               val = 0x30003;
+               writel_relaxed(val, RK_GRF_VIRT + rk3368_GRF_GPIO3C_IOMUX);
+               cabc_en = 1;
+               mode -= 0x10;
+       } else if (mode == 0xff) {
+               DBG(3, "close cabc and select vop pwm\n");
+               val = 0x30002;
+               writel_relaxed(val, RK_GRF_VIRT + rk3368_GRF_GPIO3C_IOMUX);
+               cabc_en = 0;
+       } else {
+               dev_err(lcdc_dev->dev, "invalid cabc mode value:%d", mode);
+               return 0;
+       }
+#endif
+       if (cabc_en == 0) {
+               spin_lock(&lcdc_dev->reg_lock);
+               if (lcdc_dev->clk_on) {
+                       lcdc_msk_reg(lcdc_dev, CABC_CTRL0,
+                                    m_CABC_EN, v_CABC_EN(0));
+                       lcdc_cfg_done(lcdc_dev);
+               }
+               spin_unlock(&lcdc_dev->reg_lock);
+               return 0;
+       }
+
+       total_pixel = screen->mode.xres * screen->mode.yres;
+       pixel_num = 1000 - (cabc_mode[mode - 1].pixel_num);
+       calc_pixel = (total_pixel * pixel_num) / 1000;
+       stage_up = cabc_mode[mode - 1].stage_up;
+       stage_down = cabc_mode[mode - 1].stage_down;
+       global_su = cabc_mode[mode - 1].global_su;
+
+       stage_up_rec = 256 * 256 / stage_up;
+       stage_down_rec = 256 * 256 / stage_down;
+       global_su_rec = 256 * 256 / global_su;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (lcdc_dev->clk_on) {
+               mask = m_CABC_CALC_PIXEL_NUM;
+               val = v_CABC_CALC_PIXEL_NUM(calc_pixel);
+               lcdc_msk_reg(lcdc_dev, CABC_CTRL0, mask, val);
+
+               mask = m_CABC_TOTAL_PIXEL_NUM;
+               val = v_CABC_TOTAL_PIXEL_NUM(total_pixel);
+               lcdc_msk_reg(lcdc_dev, CABC_CTRL1, mask, val);
+
+               mask = m_CABC_STAGE_UP | m_CABC_STAGE_UP_REC |
+                   m_CABC_GLOBAL_SU_LIMIT_EN | m_CABC_GLOBAL_SU_REC;
+               val = v_CABC_STAGE_UP(stage_up) |
+                   v_CABC_STAGE_UP_REC(stage_up_rec) |
+                   v_CABC_GLOBAL_SU_LIMIT_EN(1) |
+                   v_CABC_GLOBAL_SU_REC(global_su_rec);
+               lcdc_msk_reg(lcdc_dev, CABC_CTRL2, mask, val);
+
+               mask = m_CABC_STAGE_DOWN | m_CABC_STAGE_DOWN_REC |
+                   m_CABC_GLOBAL_SU;
+               val = v_CABC_STAGE_DOWN(stage_down) |
+                   v_CABC_STAGE_DOWN_REC(stage_down_rec) |
+                   v_CABC_GLOBAL_SU(global_su);
+               lcdc_msk_reg(lcdc_dev, CABC_CTRL3, mask, val);
+               lcdc_cfg_done(lcdc_dev);
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+
+       return 0;
+}
+
+/*
+       a:[-30~0]:
+           sin_hue = sin(a)*256 +0x100;
+           cos_hue = cos(a)*256;
+       a:[0~30]
+           sin_hue = sin(a)*256;
+           cos_hue = cos(a)*256;
+*/
+static int rk3368_lcdc_get_bcsh_hue(struct rk_lcdc_driver *dev_drv,
+                                   bcsh_hue_mode mode)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       u32 val;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (lcdc_dev->clk_on) {
+               val = lcdc_readl(lcdc_dev, BCSH_H);
+               switch (mode) {
+               case H_SIN:
+                       val &= m_BCSH_SIN_HUE;
+                       break;
+               case H_COS:
+                       val &= m_BCSH_COS_HUE;
+                       val >>= 16;
+                       break;
+               default:
+                       break;
+               }
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+
+       return val;
+}
+
+static int rk3368_lcdc_set_bcsh_hue(struct rk_lcdc_driver *dev_drv,
+                                   int sin_hue, int cos_hue)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       u32 mask, val;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (lcdc_dev->clk_on) {
+               mask = m_BCSH_SIN_HUE | m_BCSH_COS_HUE;
+               val = v_BCSH_SIN_HUE(sin_hue) | v_BCSH_COS_HUE(cos_hue);
+               lcdc_msk_reg(lcdc_dev, BCSH_H, mask, val);
+               lcdc_cfg_done(lcdc_dev);
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+
+       return 0;
+}
+
+static int rk3368_lcdc_set_bcsh_bcs(struct rk_lcdc_driver *dev_drv,
+                                   bcsh_bcs_mode mode, int value)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       u32 mask, val;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (lcdc_dev->clk_on) {
+               switch (mode) {
+               case BRIGHTNESS:
+                       /*from 0 to 255,typical is 128 */
+                       if (value < 0x80)
+                               value += 0x80;
+                       else if (value >= 0x80)
+                               value = value - 0x80;
+                       mask = m_BCSH_BRIGHTNESS;
+                       val = v_BCSH_BRIGHTNESS(value);
+                       break;
+               case CONTRAST:
+                       /*from 0 to 510,typical is 256 */
+                       mask = m_BCSH_CONTRAST;
+                       val = v_BCSH_CONTRAST(value);
+                       break;
+               case SAT_CON:
+                       /*from 0 to 1015,typical is 256 */
+                       mask = m_BCSH_SAT_CON;
+                       val = v_BCSH_SAT_CON(value);
+                       break;
+               default:
+                       break;
+               }
+               lcdc_msk_reg(lcdc_dev, BCSH_BCS, mask, val);
+               lcdc_cfg_done(lcdc_dev);
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+       return val;
+}
+
+static int rk3368_lcdc_get_bcsh_bcs(struct rk_lcdc_driver *dev_drv,
+                                   bcsh_bcs_mode mode)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       u32 val;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (lcdc_dev->clk_on) {
+               val = lcdc_readl(lcdc_dev, BCSH_BCS);
+               switch (mode) {
+               case BRIGHTNESS:
+                       val &= m_BCSH_BRIGHTNESS;
+                       if (val > 0x80)
+                               val -= 0x80;
+                       else
+                               val += 0x80;
+                       break;
+               case CONTRAST:
+                       val &= m_BCSH_CONTRAST;
+                       val >>= 8;
+                       break;
+               case SAT_CON:
+                       val &= m_BCSH_SAT_CON;
+                       val >>= 20;
+                       break;
+               default:
+                       break;
+               }
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+       return val;
+}
+
+static int rk3368_lcdc_open_bcsh(struct rk_lcdc_driver *dev_drv, bool open)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+       u32 mask, val;
+
+       spin_lock(&lcdc_dev->reg_lock);
+       if (lcdc_dev->clk_on) {
+               rk3368_lcdc_bcsh_path_sel(dev_drv);
+               if (open) {
+                       lcdc_writel(lcdc_dev, BCSH_COLOR_BAR, 0x1);
+                       lcdc_writel(lcdc_dev, BCSH_BCS, 0xd0010000);
+                       lcdc_writel(lcdc_dev, BCSH_H, 0x01000000);
+               } else {
+                       mask = m_BCSH_EN;
+                       val = v_BCSH_EN(0);
+                       lcdc_msk_reg(lcdc_dev, BCSH_COLOR_BAR, mask, val);
+               }
+               lcdc_cfg_done(lcdc_dev);
+       }
+       spin_unlock(&lcdc_dev->reg_lock);
+       return 0;
+}
+
+static int rk3368_lcdc_set_bcsh(struct rk_lcdc_driver *dev_drv, bool enable)
+{
+       if (!enable || !dev_drv->bcsh.enable) {
+               rk3368_lcdc_open_bcsh(dev_drv, false);
+               return 0;
+       }
+
+       if (dev_drv->bcsh.brightness <= 255 ||
+           dev_drv->bcsh.contrast <= 510 ||
+           dev_drv->bcsh.sat_con <= 1015 ||
+           (dev_drv->bcsh.sin_hue <= 511 && dev_drv->bcsh.cos_hue <= 511)) {
+               rk3368_lcdc_open_bcsh(dev_drv, true);
+               if (dev_drv->bcsh.brightness <= 255)
+                       rk3368_lcdc_set_bcsh_bcs(dev_drv, BRIGHTNESS,
+                                                dev_drv->bcsh.brightness);
+               if (dev_drv->bcsh.contrast <= 510)
+                       rk3368_lcdc_set_bcsh_bcs(dev_drv, CONTRAST,
+                                                dev_drv->bcsh.contrast);
+               if (dev_drv->bcsh.sat_con <= 1015)
+                       rk3368_lcdc_set_bcsh_bcs(dev_drv, SAT_CON,
+                                                dev_drv->bcsh.sat_con);
+               if (dev_drv->bcsh.sin_hue <= 511 &&
+                   dev_drv->bcsh.cos_hue <= 511)
+                       rk3368_lcdc_set_bcsh_hue(dev_drv,
+                                                dev_drv->bcsh.sin_hue,
+                                                dev_drv->bcsh.cos_hue);
+       }
+       return 0;
+}
+
+static int rk3368_lcdc_dsp_black(struct rk_lcdc_driver *dev_drv, int enable)
+{
+       struct lcdc_device *lcdc_dev =
+           container_of(dev_drv, struct lcdc_device, driver);
+
+       rk3368_lcdc_get_backlight_device(dev_drv);
+
+       if (enable) {
+               /* close the backlight */
+               if (lcdc_dev->backlight) {
+                       lcdc_dev->backlight->props.power = FB_BLANK_POWERDOWN;
+                       backlight_update_status(lcdc_dev->backlight);
+               }
+#if 1
+               spin_lock(&lcdc_dev->reg_lock);
+               if (likely(lcdc_dev->clk_on)) {
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL0, m_DSP_BLACK_EN,
+                                    v_DSP_BLACK_EN(1));
+                       lcdc_cfg_done(lcdc_dev);
+               }
+               spin_unlock(&lcdc_dev->reg_lock);
+#endif
+               if (dev_drv->trsm_ops && dev_drv->trsm_ops->disable)
+                       dev_drv->trsm_ops->disable();
+       } else {
+#if 1
+               spin_lock(&lcdc_dev->reg_lock);
+               if (likely(lcdc_dev->clk_on)) {
+                       lcdc_msk_reg(lcdc_dev, DSP_CTRL0, m_DSP_BLACK_EN,
+                                    v_DSP_BLACK_EN(0));
+
+                       lcdc_cfg_done(lcdc_dev);
+               }
+               spin_unlock(&lcdc_dev->reg_lock);
+#endif
+               if (dev_drv->trsm_ops && dev_drv->trsm_ops->enable)
+                       dev_drv->trsm_ops->enable();
+               msleep(100);
+               /* open the backlight */
+               if (lcdc_dev->backlight) {
+                       lcdc_dev->backlight->props.power = FB_BLANK_UNBLANK;
+                       backlight_update_status(lcdc_dev->backlight);
+               }
+       }
+
+       return 0;
+}
+
+static struct rk_lcdc_drv_ops lcdc_drv_ops = {
+       .open = rk3368_lcdc_open,
+       .win_direct_en = rk3368_lcdc_win_direct_en,
+       .load_screen = rk3368_load_screen,
+       .set_par = rk3368_lcdc_set_par,
+       .pan_display = rk3368_lcdc_pan_display,
+       .direct_set_addr = rk3368_lcdc_direct_set_win_addr,
+       /*.lcdc_reg_update = rk3368_lcdc_reg_update,*/
+       .blank = rk3368_lcdc_blank,
+       .ioctl = rk3368_lcdc_ioctl,
+       .suspend = rk3368_lcdc_early_suspend,
+       .resume = rk3368_lcdc_early_resume,
+       .get_win_state = rk3368_lcdc_get_win_state,
+       .ovl_mgr = rk3368_lcdc_ovl_mgr,
+       .get_disp_info = rk3368_lcdc_get_disp_info,
+       .fps_mgr = rk3368_lcdc_fps_mgr,
+       .fb_get_win_id = rk3368_lcdc_get_win_id,
+       .fb_win_remap = rk3368_fb_win_remap,
+       .set_dsp_lut = rk3368_set_dsp_lut,
+       .poll_vblank = rk3368_lcdc_poll_vblank,
+       .dpi_open = rk3368_lcdc_dpi_open,
+       .dpi_win_sel = rk3368_lcdc_dpi_win_sel,
+       .dpi_status = rk3368_lcdc_dpi_status,
+       .get_dsp_addr = rk3368_lcdc_get_dsp_addr,
+       .set_dsp_cabc = rk3368_lcdc_set_dsp_cabc,
+       .set_dsp_bcsh_hue = rk3368_lcdc_set_bcsh_hue,
+       .set_dsp_bcsh_bcs = rk3368_lcdc_set_bcsh_bcs,
+       .get_dsp_bcsh_hue = rk3368_lcdc_get_bcsh_hue,
+       .get_dsp_bcsh_bcs = rk3368_lcdc_get_bcsh_bcs,
+       .open_bcsh = rk3368_lcdc_open_bcsh,
+       .dump_reg = rk3368_lcdc_reg_dump,
+       .cfg_done = rk3368_lcdc_config_done,
+       .set_irq_to_cpu = rk3368_lcdc_set_irq_to_cpu,
+       .dsp_black = rk3368_lcdc_dsp_black,
+       .mmu_en    = rk3368_lcdc_mmu_en,
+};
+
+#ifdef LCDC_IRQ_EMPTY_DEBUG
+static int rk3368_lcdc_parse_irq(struct lcdc_device *lcdc_dev,
+                                unsigned int intr_status)
+{
+       if (intr_status & m_WIN0_EMPTY_INTR_STS) {
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_WIN0_EMPTY_INTR_CLR,
+                            v_WIN0_EMPTY_INTR_CLR(1));
+               dev_warn(lcdc_dev->dev, "win0 empty irq!");
+       } else if (intr_status & m_WIN1_EMPTY_INTR_STS) {
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_WIN1_EMPTY_INTR_CLR,
+                            v_WIN1_EMPTY_INTR_CLR(1));
+               dev_warn(lcdc_dev->dev, "win1 empty irq!");
+       } else if (intr_status & m_WIN2_EMPTY_INTR_STS) {
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_WIN2_EMPTY_INTR_CLR,
+                            v_WIN2_EMPTY_INTR_CLR(1));
+               dev_warn(lcdc_dev->dev, "win2 empty irq!");
+       } else if (intr_status & m_WIN3_EMPTY_INTR_STS) {
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_WIN3_EMPTY_INTR_CLR,
+                            v_WIN3_EMPTY_INTR_CLR(1));
+               dev_warn(lcdc_dev->dev, "win3 empty irq!");
+       } else if (intr_status & m_HWC_EMPTY_INTR_STS) {
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_HWC_EMPTY_INTR_CLR,
+                            v_HWC_EMPTY_INTR_CLR(1));
+               dev_warn(lcdc_dev->dev, "HWC empty irq!");
+       } else if (intr_status & m_POST_BUF_EMPTY_INTR_STS) {
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_POST_BUF_EMPTY_INTR_CLR,
+                            v_POST_BUF_EMPTY_INTR_CLR(1));
+               dev_warn(lcdc_dev->dev, "post buf empty irq!");
+       } else if (intr_status & m_PWM_GEN_INTR_STS) {
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_PWM_GEN_INTR_CLR,
+                            v_PWM_GEN_INTR_CLR(1));
+               dev_warn(lcdc_dev->dev, "PWM gen irq!");
+       }
+       return 0;
+}
+#endif
+
+static irqreturn_t rk3368_lcdc_isr(int irq, void *dev_id)
+{
+       struct lcdc_device *lcdc_dev = (struct lcdc_device *)dev_id;
+       ktime_t timestamp = ktime_get();
+       u32 intr_status;
+
+       intr_status = lcdc_readl(lcdc_dev, INTR_STATUS);
+
+       if (intr_status & m_FS_INTR_STS) {
+               timestamp = ktime_get();
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_FS_INTR_CLR,
+                            v_FS_INTR_CLR(1));
+               /*if(lcdc_dev->driver.wait_fs){ */
+               if (0) {
+                       spin_lock(&(lcdc_dev->driver.cpl_lock));
+                       complete(&(lcdc_dev->driver.frame_done));
+                       spin_unlock(&(lcdc_dev->driver.cpl_lock));
+               }
+#ifdef CONFIG_DRM_ROCKCHIP
+               lcdc_dev->driver.irq_call_back(&lcdc_dev->driver);
+#endif
+               lcdc_dev->driver.vsync_info.timestamp = timestamp;
+               wake_up_interruptible_all(&lcdc_dev->driver.vsync_info.wait);
+
+       } else if (intr_status & m_LINE_FLAG0_INTR_STS) {
+               lcdc_dev->driver.frame_time.last_framedone_t =
+                   lcdc_dev->driver.frame_time.framedone_t;
+               lcdc_dev->driver.frame_time.framedone_t = cpu_clock(0);
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_LINE_FLAG0_INTR_CLR,
+                            v_LINE_FLAG0_INTR_CLR(1));
+       } else if (intr_status & m_LINE_FLAG1_INTR_STS) {
+               /*line flag1 */
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_LINE_FLAG1_INTR_CLR,
+                            v_LINE_FLAG1_INTR_CLR(1));
+       } else if (intr_status & m_FS_NEW_INTR_STS) {
+               /*new frame start */
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_FS_NEW_INTR_CLR,
+                            v_FS_NEW_INTR_CLR(1));
+       } else if (intr_status & m_BUS_ERROR_INTR_STS) {
+               lcdc_msk_reg(lcdc_dev, INTR_CLEAR, m_BUS_ERROR_INTR_CLR,
+                            v_BUS_ERROR_INTR_CLR(1));
+               dev_warn(lcdc_dev->dev, "bus error!");
+       }
+
+       /* for win empty debug */
+#ifdef LCDC_IRQ_EMPTY_DEBUG
+       rk3368_lcdc_parse_irq(lcdc_dev, intr_status);
+#endif
+       return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_PM)
+static int rk3368_lcdc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       return 0;
+}
+
+static int rk3368_lcdc_resume(struct platform_device *pdev)
+{
+       return 0;
+}
+#else
+#define rk3368_lcdc_suspend NULL
+#define rk3368_lcdc_resume  NULL
+#endif
+
+static int rk3368_lcdc_parse_dt(struct lcdc_device *lcdc_dev)
+{
+       struct device_node *np = lcdc_dev->dev->of_node;
+       struct rk_lcdc_driver *dev_drv = &lcdc_dev->driver;
+       int val;
+
+       if (of_property_read_u32(np, "rockchip,prop", &val))
+               lcdc_dev->prop = PRMRY; /*default set it as primary */
+       else
+               lcdc_dev->prop = val;
+
+       if (of_property_read_u32(np, "rockchip,mirror", &val))
+               dev_drv->rotate_mode = NO_MIRROR;
+       else
+               dev_drv->rotate_mode = val;
+
+       if (of_property_read_u32(np, "rockchip,cabc_mode", &val))
+               dev_drv->cabc_mode = 0; /* default set close cabc */
+       else
+               dev_drv->cabc_mode = val;
+
+       if (of_property_read_u32(np, "rockchip,pwr18", &val))
+               /*default set it as 3.xv power supply */
+               lcdc_dev->pwr18 = false;
+       else
+               lcdc_dev->pwr18 = (val ? true : false);
+
+       if (of_property_read_u32(np, "rockchip,fb-win-map", &val))
+               dev_drv->fb_win_map = FB_DEFAULT_ORDER;
+       else
+               dev_drv->fb_win_map = val;
+
+       if (of_property_read_u32(np, "rockchip,bcsh-en", &val))
+               dev_drv->bcsh.enable = false;
+       else
+               dev_drv->bcsh.enable = (val ? true : false);
+
+       if (of_property_read_u32(np, "rockchip,brightness", &val))
+               dev_drv->bcsh.brightness = 0xffff;
+       else
+               dev_drv->bcsh.brightness = val;
+
+       if (of_property_read_u32(np, "rockchip,contrast", &val))
+               dev_drv->bcsh.contrast = 0xffff;
+       else
+               dev_drv->bcsh.contrast = val;
+
+       if (of_property_read_u32(np, "rockchip,sat-con", &val))
+               dev_drv->bcsh.sat_con = 0xffff;
+       else
+               dev_drv->bcsh.sat_con = val;
+
+       if (of_property_read_u32(np, "rockchip,hue", &val)) {
+               dev_drv->bcsh.sin_hue = 0xffff;
+               dev_drv->bcsh.cos_hue = 0xffff;
+       } else {
+               dev_drv->bcsh.sin_hue = val & 0xff;
+               dev_drv->bcsh.cos_hue = (val >> 8) & 0xff;
+       }
+
+#if defined(CONFIG_ROCKCHIP_IOMMU)
+       if (of_property_read_u32(np, "rockchip,iommu-enabled", &val))
+               dev_drv->iommu_enabled = 0;
+       else
+               dev_drv->iommu_enabled = val;
+#else
+       dev_drv->iommu_enabled = 0;
+#endif
+       return 0;
+}
+
+static int rk3368_lcdc_probe(struct platform_device *pdev)
+{
+       struct lcdc_device *lcdc_dev = NULL;
+       struct rk_lcdc_driver *dev_drv;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       struct device_node *np = pdev->dev.of_node;
+       int prop;
+       int ret = 0;
+
+       /*if the primary lcdc has not registered ,the extend
+          lcdc register later */
+       of_property_read_u32(np, "rockchip,prop", &prop);
+       if (prop == EXTEND) {
+               if (!is_prmry_rk_lcdc_registered())
+                       return -EPROBE_DEFER;
+       }
+       lcdc_dev = devm_kzalloc(dev, sizeof(struct lcdc_device), GFP_KERNEL);
+       if (!lcdc_dev) {
+               dev_err(&pdev->dev, "rk3368 lcdc device kmalloc fail!");
+               return -ENOMEM;
+       }
+       platform_set_drvdata(pdev, lcdc_dev);
+       lcdc_dev->dev = dev;
+       rk3368_lcdc_parse_dt(lcdc_dev);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       lcdc_dev->reg_phy_base = res->start;
+       lcdc_dev->len = resource_size(res);
+       lcdc_dev->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(lcdc_dev->regs))
+               return PTR_ERR(lcdc_dev->regs);
+       else
+               dev_info(dev, "lcdc_dev->regs=0x%lx\n", (long)lcdc_dev->regs);
+
+       lcdc_dev->regsbak = devm_kzalloc(dev, lcdc_dev->len, GFP_KERNEL);
+       if (IS_ERR(lcdc_dev->regsbak))
+               return PTR_ERR(lcdc_dev->regsbak);
+       lcdc_dev->dsp_lut_addr_base = (lcdc_dev->regs + GAMMA_LUT_ADDR);
+       lcdc_dev->id = 0;
+       dev_set_name(lcdc_dev->dev, "lcdc%d", lcdc_dev->id);
+       dev_drv = &lcdc_dev->driver;
+       dev_drv->dev = dev;
+       dev_drv->prop = prop;
+       dev_drv->id = lcdc_dev->id;
+       dev_drv->ops = &lcdc_drv_ops;
+       dev_drv->lcdc_win_num = ARRAY_SIZE(lcdc_win);
+       spin_lock_init(&lcdc_dev->reg_lock);
+
+       lcdc_dev->irq = platform_get_irq(pdev, 0);
+       if (lcdc_dev->irq < 0) {
+               dev_err(&pdev->dev, "cannot find IRQ for lcdc%d\n",
+                       lcdc_dev->id);
+               return -ENXIO;
+       }
+
+       ret = devm_request_irq(dev, lcdc_dev->irq, rk3368_lcdc_isr,
+                              IRQF_DISABLED | IRQF_SHARED,
+                              dev_name(dev), lcdc_dev);
+       if (ret) {
+               dev_err(&pdev->dev, "cannot requeset irq %d - err %d\n",
+                       lcdc_dev->irq, ret);
+               return ret;
+       }
+
+       if (dev_drv->iommu_enabled) {
+               if (lcdc_dev->id == 0) {
+                       strcpy(dev_drv->mmu_dts_name,
+                              VOPB_IOMMU_COMPATIBLE_NAME);
+               } else {
+                       strcpy(dev_drv->mmu_dts_name,
+                              VOPL_IOMMU_COMPATIBLE_NAME);
+               }
+       }
+
+       ret = rk_fb_register(dev_drv, lcdc_win, lcdc_dev->id);
+       if (ret < 0) {
+               dev_err(dev, "register fb for lcdc%d failed!\n", lcdc_dev->id);
+               return ret;
+       }
+       lcdc_dev->screen = dev_drv->screen0;
+       dev_info(dev, "lcdc%d probe ok, iommu %s\n",
+                lcdc_dev->id, dev_drv->iommu_enabled ? "enabled" : "disabled");
+
+       return 0;
+}
+
+static int rk3368_lcdc_remove(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static void rk3368_lcdc_shutdown(struct platform_device *pdev)
+{
+       struct lcdc_device *lcdc_dev = platform_get_drvdata(pdev);
+
+       rk3368_lcdc_deint(lcdc_dev);
+       rk_disp_pwr_disable(&lcdc_dev->driver);
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id rk3368_lcdc_dt_ids[] = {
+       {.compatible = "rockchip,rk3368-lcdc",},
+       {}
+};
+#endif
+
+static struct platform_driver rk3368_lcdc_driver = {
+       .probe = rk3368_lcdc_probe,
+       .remove = rk3368_lcdc_remove,
+       .driver = {
+                  .name = "rk3368-lcdc",
+                  .owner = THIS_MODULE,
+                  .of_match_table = of_match_ptr(rk3368_lcdc_dt_ids),
+                  },
+       .suspend = rk3368_lcdc_suspend,
+       .resume = rk3368_lcdc_resume,
+       .shutdown = rk3368_lcdc_shutdown,
+};
+
+static int __init rk3368_lcdc_module_init(void)
+{
+       return platform_driver_register(&rk3368_lcdc_driver);
+}
+
+static void __exit rk3368_lcdc_module_exit(void)
+{
+       platform_driver_unregister(&rk3368_lcdc_driver);
+}
+
+fs_initcall(rk3368_lcdc_module_init);
+module_exit(rk3368_lcdc_module_exit);
diff --git a/drivers/video/rockchip/lcdc/rk3368_lcdc.h b/drivers/video/rockchip/lcdc/rk3368_lcdc.h
new file mode 100644 (file)
index 0000000..7008cf6
--- /dev/null
@@ -0,0 +1,1980 @@
+#ifndef RK3368_LCDC_H_
+#define RK3368_LCDC_H_
+
+#include<linux/rk_fb.h>
+#include<linux/io.h>
+#include<linux/clk.h>
+
+#define VOP_INPUT_MAX_WIDTH 4096 /*3840 for LINCOLN*/
+
+#define REG_CFG_DONE                   (0x0000)
+#define VOP_CFG_DONE(x)                                (((x)&1)<<0)
+#define WIN0_CFG_DONE(x)                       (((x)&1)<<1)
+#define WIN1_CFG_DONE(x)                       (((x)&1)<<2)
+#define WIN2_CFG_DONE(x)                       (((x)&1)<<3)
+#define WIN3_CFG_DONE(x)                       (((x)&1)<<4)
+#define HWC_CFG_DONE(x)                                (((x)&1)<<5)
+#define IEP_CFG_DONE(x)                                (((x)&1)<<6)
+#define FBDC_CFG_DONE(x)                       (((x)&1)<<7)
+#define SYS_CFG_DONE(x)                                (((x)&1)<<8)
+
+#define VOP_CFG_DONE_WMSK(x)                   (((x)&1)<<(0+16))
+#define WIN0_CFG_DONE_WMSK(x)                  (((x)&1)<<(1+16))
+#define WIN1_CFG_DONE_WMSK(x)                  (((x)&1)<<(2+16))
+#define WIN2_CFG_DONE_WMSK(x)                  (((x)&1)<<(3+16))
+#define WIN3_CFG_DONE_WMSK(x)                  (((x)&1)<<(4+16))
+#define HWC_CFG_DONE_WMSK(x)                   (((x)&1)<<(5+16))
+#define IEP_CFG_DONE_WMSK(x)                   (((x)&1)<<(6+16))
+#define FBDC_CFG_DONE_WMSK(x)                  (((x)&1)<<(7+16))
+#define SYS_CFG_DONE_WMSK(x)                   (((x)&1)<<(8+16))
+
+#define VOP_REG_DONE           (VOP_CFG_DONE(1)  | VOP_CFG_DONE_WMSK(1))
+#define WIN0_REG_DONE          (WIN0_CFG_DONE(1) | WIN0_CFG_DONE_WMSK(1))
+#define WIN1_REG_DONE          (WIN1_CFG_DONE(1) | WIN1_CFG_DONE_WMSK(1))
+#define WIN2_REG_DONE          (WIN2_CFG_DONE(1) | WIN2_CFG_DONE_WMSK(1))
+#define WIN3_REG_DONE          (WIN3_CFG_DONE(1) | WIN3_CFG_DONE_WMSK(1))
+#define HWC_REG_DONE           (HWC_CFG_DONE(1)  | HWC_CFG_DONE_WMSK(1))
+#define IEP_REG_DONE           (IEP_CFG_DONE(1)  | IEP_CFG_DONE_WMSK(1))
+#define FBDC_REG_DONE          (FBDC_CFG_DONE(1) | FBDC_CFG_DONE_WMSK(1))
+#define SYS_REG_DONE           (SYS_CFG_DONE(1)  | SYS_CFG_DONE_WMSK(1))
+#define VERSION_INFO                   (0x0004)
+#define m_RTL_VERSION                  (0xffff<<0)
+#define m_FPGA_VERSION                 (0xffff<<16)
+#define SYS_CTRL                       (0x0008)
+#define v_DIRECT_PATH_EN(x)                    (((x)&1)<<0)
+#define v_DIRECT_PATCH_SEL(x)                  (((x)&3)<<1)
+#define v_DOUB_CHANNEL_EN(x)                   (((x)&1)<<3)
+#define v_DOUB_CH_OVERLAP_NUM(x)               (((x)&0xf)<<4)
+#define v_EDPI_HALT_EN(x)                      (((x)&1)<<8)
+#define v_EDPI_WMS_MODE(x)                     (((x)&1)<<9)
+#define v_EDPI_WMS_FS(x)                       (((x)&1)<<10)
+#define v_GLOBAL_REGDONE_EN(x)                 (((x)&1)<<11)
+#define v_RGB_OUT_EN(x)                                (((x)&1)<<12)
+#define v_HDMI_OUT_EN(x)                       (((x)&1)<<13)
+#define v_EDP_OUT_EN(x)                                (((x)&1)<<14)
+#define v_MIPI_OUT_EN(x)                       (((x)&1)<<15)
+#define v_OVERLAY_MODE(x)                      (((x)&1)<<16)
+#define v_FS_SAME_ADDR_MASK_EN(x)              (((x)&1)<<17)
+#define v_POST_LB_MODE(x)                      (((x)&1)<<18)
+#define v_WIN23_PRI_OPT_MODE(x)                        (((x)&1)<<19)
+#define v_MMU_EN(x)                            (((x)&1)<<20)
+#define v_DMA_STOP(x)                          (((x)&1)<<21)
+#define v_STANDBY_EN(x)                                (((x)&1)<<22)
+#define v_AUTO_GATING_EN(x)                    (((x)&1)<<23)
+
+#define m_DIRECT_PATH_EN                       (1<<0)
+#define m_DIRECT_PATCH_SEL                     (3<<1)
+#define m_DOUB_CHANNEL_EN                      (1<<3)
+#define m_DOUB_CH_OVERLAP_NUM                  (0xf<<4)
+#define m_EDPI_HALT_EN                         (1<<8)
+#define m_EDPI_WMS_MODE                                (1<<9)
+#define m_EDPI_WMS_FS                          (1<<10)
+#define m_GLOBAL_REGDONE_EN                    (1<<11)
+#define m_RGB_OUT_EN                           (1<<12)
+#define m_HDMI_OUT_EN                          (1<<13)
+#define m_EDP_OUT_EN                           (1<<14)
+#define m_MIPI_OUT_EN                          (1<<15)
+#define m_OVERLAY_MODE                         (1<<16)
+#define m_FS_SAME_ADDR_MASK_EN                 (1<<17)
+#define m_POST_LB_MODE                         (1<<18)
+#define m_WIN23_PRI_OPT_MODE                   (1<<19)
+#define m_MMU_EN                               (1<<20)
+#define m_DMA_STOP                             (1<<21)
+#define m_STANDBY_EN                           (1<<22)
+#define m_AUTO_GATING_EN                       (1<<23)
+
+#define SYS_CTRL1                      (0x000c)
+#define v_NOC_HURRY_EN(x)                      (((x)&0x1)<<0)
+#define v_NOC_HURRY_VALUE(x)                   (((x)&0x3)<<1)
+#define v_NOC_HURRY_THRESHOLD(x)               (((x)&0x3f)<<3)
+#define v_NOC_QOS_EN(x)                                (((x)&0x1)<<9)
+#define v_NOC_WIN_QOS(x)                       (((x)&0x3)<<10)
+#define v_AXI_MAX_OUTSTANDING_EN(x)            (((x)&0x1)<<12)
+#define v_AXI_OUTSTANDING_MAX_NUM(x)           (((x)&0x1f)<<13)
+
+#define m_NOC_HURRY_EN                         (0x1<<0)
+#define m_NOC_HURRY_VALUE                      (0x3<<1)
+#define m_NOC_HURRY_THRESHOLD                  (0x3f<<3)
+#define m_NOC_QOS_EN                           (0x1<<9)
+#define m_NOC_WIN_QOS                          (0x3<<10)
+#define m_AXI_MAX_OUTSTANDING_EN               (0x1<<12)
+#define m_AXI_OUTSTANDING_MAX_NUM              (0x1f<<13)
+
+#define DSP_CTRL0                      (0x0010)
+#define v_DSP_OUT_MODE(x)                      (((x)&0x0f)<<0)
+#define v_DSP_DCLK_DDR(x)                      (((x)&1)<<8)
+#define v_DSP_DDR_PHASE(x)                     (((x)&1)<<9)
+#define v_DSP_INTERLACE(x)                     (((x)&1)<<10)
+#define v_DSP_FIELD_POL(x)                     (((x)&1)<<11)
+#define v_DSP_BG_SWAP(x)                       (((x)&1)<<12)
+#define v_DSP_RB_SWAP(x)                       (((x)&1)<<13)
+#define v_DSP_RG_SWAP(x)                       (((x)&1)<<14)
+#define v_DSP_DELTA_SWAP(x)                    (((x)&1)<<15)
+#define v_DSP_DUMMY_SWAP(x)                    (((x)&1)<<16)
+#define v_DSP_OUT_ZERO(x)                      (((x)&1)<<17)
+#define v_DSP_BLANK_EN(x)                      (((x)&1)<<18)
+#define v_DSP_BLACK_EN(x)                      (((x)&1)<<19)
+#define v_DSP_CCIR656_AVG(x)                   (((x)&1)<<20)
+#define v_DSP_YUV_CLIP(x)                      (((x)&1)<<21)
+#define v_DSP_X_MIR_EN(x)                      (((x)&1)<<22)
+#define v_DSP_Y_MIR_EN(x)                      (((x)&1)<<23)
+#define m_DSP_OUT_MODE                         (0x0f<<0)
+#define m_DSP_DCLK_DDR                         (1<<8)
+#define m_DSP_DDR_PHASE                                (1<<9)
+#define m_DSP_INTERLACE                                (1<<10)
+#define m_DSP_FIELD_POL                                (1<<11)
+#define m_DSP_BG_SWAP                          (1<<12)
+#define m_DSP_RB_SWAP                          (1<<13)
+#define m_DSP_RG_SWAP                          (1<<14)
+#define m_DSP_DELTA_SWAP                       (1<<15)
+#define m_DSP_DUMMY_SWAP                       (1<<16)
+#define m_DSP_OUT_ZERO                         (1<<17)
+#define m_DSP_BLANK_EN                         (1<<18)
+#define m_DSP_BLACK_EN                         (1<<19)
+#define m_DSP_CCIR656_AVG                      (1<<20)
+#define m_DSP_YUV_CLIP                         (1<<21)
+#define m_DSP_X_MIR_EN                         (1<<22)
+#define m_DSP_Y_MIR_EN                         (1<<23)
+
+#define DSP_CTRL1                      (0x0014)
+#define v_DSP_LUT_EN(x)                                (((x)&1)<<0)
+#define v_PRE_DITHER_DOWN_EN(x)                        (((x)&1)<<1)
+#define v_DITHER_DOWN_EN(x)                    (((x)&1)<<2)
+#define v_DITHER_DOWN_MODE(x)                  (((x)&1)<<3)
+#define v_DITHER_DOWN_SEL(x)                   (((x)&1)<<4)
+#define v_DITHER_UP_EN(x)                      (((x)&1)<<6)
+#define v_DSP_LAYER0_SEL(x)                    (((x)&3)<<8)
+#define v_DSP_LAYER1_SEL(x)                    (((x)&3)<<10)
+#define v_DSP_LAYER2_SEL(x)                    (((x)&3)<<12)
+#define v_DSP_LAYER3_SEL(x)                    (((x)&3)<<14)
+
+#define v_RGB_LVDS_HSYNC_POL(x)                        (((x)&1)<<16)
+#define v_RGB_LVDS_VSYNC_POL(x)                        (((x)&1)<<17)
+#define v_RGB_LVDS_DEN_POL(x)                  (((x)&1)<<18)
+#define v_RGB_LVDS_DCLK_POL(x)                 (((x)&1)<<19)
+
+#define v_HDMI_HSYNC_POL(x)                    (((x)&1)<<20)
+#define v_HDMI_VSYNC_POL(x)                    (((x)&1)<<21)
+#define v_HDMI_DEN_POL(x)                      (((x)&1)<<22)
+#define v_HDMI_DCLK_POL(x)                     (((x)&1)<<23)
+
+#define v_EDP_HSYNC_POL(x)                     (((x)&1)<<24)
+#define v_EDP_VSYNC_POL(x)                     (((x)&1)<<25)
+#define v_EDP_DEN_POL(x)                       (((x)&1)<<26)
+#define v_EDP_DCLK_POL(x)                      (((x)&1)<<27)
+
+#define v_MIPI_HSYNC_POL(x)                    (((x)&1)<<28)
+#define v_MIPI_VSYNC_POL(x)                    (((x)&1)<<29)
+#define v_MIPI_DEN_POL(x)                      (((x)&1)<<30)
+#define v_MIPI_DCLK_POL(x)                     (((x)&1)<<31)
+
+#define m_DSP_LUT_EN                           (1<<0)
+#define m_PRE_DITHER_DOWN_EN                   (1<<1)
+#define m_DITHER_DOWN_EN                       (1<<2)
+#define m_DITHER_DOWN_MODE                     (1<<3)
+#define m_DITHER_DOWN_SEL                      (1<<4)
+#define m_DITHER_UP_EN                         (1<<6)
+#define m_DSP_LAYER0_SEL                       (3<<8)
+#define m_DSP_LAYER1_SEL                       (3<<10)
+#define m_DSP_LAYER2_SEL                       (3<<12)
+#define m_DSP_LAYER3_SEL                       (3<<14)
+
+#define m_RGB_LVDS_HSYNC_POL                   (1<<16)
+#define m_RGB_LVDS_VSYNC_POL                   (1<<17)
+#define m_RGB_LVDS_DEN_POL                     (1<<18)
+#define m_RGB_LVDS_DCLK_POL                    (1<<19)
+
+#define m_HDMI_HSYNC_POL                       (1<<20)
+#define m_HDMI_VSYNC_POL                       (1<<21)
+#define m_HDMI_DEN_POL                         (1<<22)
+#define m_HDMI_DCLK_POL                                (1<<23)
+
+#define m_EDP_HSYNC_POL                                (1<<24)
+#define m_EDP_VSYNC_POL                                (1<<25)
+#define m_EDP_DEN_POL                          (1<<26)
+#define m_EDP_DCLK_POL                         (1<<27)
+
+#define m_MIPI_HSYNC_POL                       (1<<28)
+#define m_MIPI_VSYNC_POL                       (1<<29)
+#define m_MIPI_DEN_POL                         (1<<30)
+#define m_MIPI_DCLK_POL                                (1<<31)
+
+#define DSP_BG                         (0x0018)
+#define v_DSP_BG_BLUE(x)                       (((x)&0xff) << 0)
+#define v_DSP_BG_GREEN(x)                      (((x)&0xff) << 8)
+#define v_DSP_BG_RED(x)                                (((x)&0xff) << 16)
+#define m_DSP_BG_BLUE                          (0xff << 0)
+#define m_DSP_BG_GREEN                         (0xff << 8)
+#define m_DSP_BG_RED                           (0xff << 16)
+
+#define MCU_CTRL                       (0x001c)
+#define v_MCU_PIX_TOTAL(x)                     (((x)&0x3f)<<0)
+#define v_MCU_CS_PST(x)                                (((x)&0xf)<<6)
+#define v_MCU_CS_PEND(x)                       (((x)&0x3f)<<10)
+#define v_MCU_RW_PST(x)                                (((x)&0xf)<<16)
+#define v_MCU_RW_PEND(x)                       (((x)&0x3f)<<20)
+#define v_MCU_CLK_SEL(x)                       (((x)&1)<<26)
+#define v_MCU_HOLD_MODE(x)                     (((x)&1)<<27)
+#define v_MCU_FRAME_ST(x)                      (((x)&1)<<28)
+#define v_MCU_RS(x)                            (((x)&1)<<29)
+#define v_MCU_BYPASS(x)                                (((x)&1)<<30)
+#define v_MCU_TYPE(x)                          (((x)&1)<<31)
+#define m_MCU_PIX_TOTAL                                (0x3f<<0)
+#define m_MCU_CS_PST                           (0xf<<6)
+#define m_MCU_CS_PEND                          (0x3f<<10)
+#define m_MCU_RW_PST                           (0xf<<16)
+#define m_MCU_RW_PEND                          (0x3f<<20)
+#define m_MCU_CLK_SEL                          (1<<26)
+#define m_MCU_HOLD_MODE                                (1<<27)
+#define m_MCU_FRAME_ST                         (1<<28)
+#define m_MCU_RS                               (1<<29)
+#define m_MCU_BYPASS                           (1<<30)
+#define m_MCU_TYPE                             ((u32)1<<31)
+
+#define LINE_FLAG                      (0x0020)
+#define m_DSP_LINE_FLAG0_NUM                   (0x1fff<<0)
+#define m_DSP_LINE_FLAG1_NUM                   (0x1fff<<16)
+#define v_DSP_LINE_FLAG0_NUM(x)                        (((x)&0x1fff)<<0)
+#define v_DSP_LINE_FLAG1_NUM(x)                        (((x)&0x1fff)<<16)
+
+#define INTR_EN                                (0x0024)
+#define v_FS_INTR_EN(x)                                ((((x)&1)<<0) | ((1<<(0+16))))
+#define v_FS_NEW_INTR_EN(x)                    ((((x)&1)<<1) | ((1<<(1+16))))
+#define v_ADDR_SAME_INTR_EN(x)                 ((((x)&1)<<2) | ((1<<(2+16))))
+#define v_LINE_FLAG0_INTR_EN(x)                        ((((x)&1)<<3) | ((1<<(3+16))))
+#define v_LINE_FLAG1_INTR_EN(x)                        ((((x)&1)<<4) | ((1<<(4+16))))
+#define v_BUS_ERROR_INTR_EN(x)                 ((((x)&1)<<5) | ((1<<(5+16))))
+#define v_WIN0_EMPTY_INTR_EN(x)                        ((((x)&1)<<6) | ((1<<(6+16))))
+#define v_WIN1_EMPTY_INTR_EN(x)                        ((((x)&1)<<7) | ((1<<(7+16))))
+#define v_WIN2_EMPTY_INTR_EN(x)                        ((((x)&1)<<8) | ((1<<(8+16))))
+#define v_WIN3_EMPTY_INTR_EN(x)                        ((((x)&1)<<9) | ((1<<(9+16))))
+#define v_HWC_EMPTY_INTR_EN(x)         ((((x)&1)<<10) | ((1<<(10+16))))
+#define v_POST_BUF_EMPTY_INTR_EN(x)    ((((x)&1)<<11) | ((1<<(11+16))))
+#define v_PWM_GEN_INTR_EN(x)           ((((x)&1)<<12) | ((1<<(12+16))))
+#define v_DSP_HOLD_VALID_INTR_EN(x)    ((((x)&1)<<13) | ((1<<(13+16))))
+
+#define m_FS_INTR_EN                           ((1<<0) | ((1<<(0+16))))
+#define m_FS_NEW_INTR_EN                       ((1<<1) | ((1<<(1+16))))
+#define m_ADDR_SAME_INTR_EN                    ((1<<2) | ((1<<(2+16))))
+#define m_LINE_FLAG0_INTR_EN                   ((1<<3) | ((1<<(3+16))))
+#define m_LINE_FLAG1_INTR_EN                   ((1<<4) | ((1<<(4+16))))
+#define m_BUS_ERROR_INTR_EN                    ((1<<5) | ((1<<(5+16))))
+#define m_WIN0_EMPTY_INTR_EN                   ((1<<6) | ((1<<(6+16))))
+#define m_WIN1_EMPTY_INTR_EN                   ((1<<7) | ((1<<(7+16))))
+#define m_WIN2_EMPTY_INTR_EN                   ((1<<8) | ((1<<(8+16))))
+#define m_WIN3_EMPTY_INTR_EN                   ((1<<9) | ((1<<(9+16))))
+#define m_HWC_EMPTY_INTR_EN                    ((1<<10) | ((1<<(10+16))))
+#define m_POST_BUF_EMPTY_INTR_EN               ((1<<11) | ((1<<(11+16))))
+#define m_PWM_GEN_INTR_EN                      ((1<<12) | ((1<<(12+16))))
+#define m_DSP_HOLD_VALID_INTR_EN               ((1<<13) | ((1<<(13+16))))
+
+#define  INTR_CLEAR                    (0x0028)
+#define v_FS_INTR_CLR(x)                       ((((x)&1)<<0) | (1<<(0+16)))
+#define v_FS_NEW_INTR_CLR(x)                   ((((x)&1)<<1) | (1<<(1+16)))
+#define v_ADDR_SAME_INTR_CLR(x)                        ((((x)&1)<<2) | (1<<(2+16)))
+#define v_LINE_FLAG0_INTR_CLR(x)               ((((x)&1)<<3) | (1<<(3+16)))
+#define v_LINE_FLAG1_INTR_CLR(x)               ((((x)&1)<<4) | (1<<(4+16)))
+#define v_BUS_ERROR_INTR_CLR(x)                        ((((x)&1)<<5) | (1<<(5+16)))
+#define v_WIN0_EMPTY_INTR_CLR(x)               ((((x)&1)<<6) | (1<<(6+16)))
+#define v_WIN1_EMPTY_INTR_CLR(x)               ((((x)&1)<<7) | (1<<(7+16)))
+#define v_WIN2_EMPTY_INTR_CLR(x)               ((((x)&1)<<8) | (1<<(8+16)))
+#define v_WIN3_EMPTY_INTR_CLR(x)               ((((x)&1)<<9) | (1<<(9+16)))
+#define v_HWC_EMPTY_INTR_CLR(x)                        ((((x)&1)<<10) | (1<<(10+16)))
+#define v_POST_BUF_EMPTY_INTR_CLR(x)           ((((x)&1)<<11) | (1<<(11+16)))
+#define v_PWM_GEN_INTR_CLR(x)                  ((((x)&1)<<12) | (1<<(12+16)))
+#define v_DSP_HOLD_VALID_INTR_CLR(x)           ((((x)&1)<<13) | (1<<(13+16)))
+
+#define m_FS_INTR_CLR                          ((1<<0) | ((1<<(0+16))))
+#define m_FS_NEW_INTR_CLR                      ((1<<1) | ((1<<(1+16))))
+#define m_ADDR_SAME_INTR_CLR                   ((1<<2) | ((1<<(2+16))))
+#define m_LINE_FLAG0_INTR_CLR                  ((1<<3) | ((1<<(3+16))))
+#define m_LINE_FLAG1_INTR_CLR                  ((1<<4) | ((1<<(4+16))))
+#define m_BUS_ERROR_INTR_CLR                   ((1<<5) | ((1<<(5+16))))
+#define m_WIN0_EMPTY_INTR_CLR                  ((1<<6) | ((1<<(5+16))))
+#define m_WIN1_EMPTY_INTR_CLR                  ((1<<7) | ((1<<(7+16))))
+#define m_WIN2_EMPTY_INTR_CLR                  ((1<<8) | ((1<<(8+16))))
+#define m_WIN3_EMPTY_INTR_CLR                  ((1<<9) | ((1<<(9+16))))
+#define m_HWC_EMPTY_INTR_CLR                   ((1<<10) | ((1<<(10+16))))
+#define m_POST_BUF_EMPTY_INTR_CLR              ((1<<11) | ((1<<(11+16))))
+#define m_PWM_GEN_INTR_CLR                     ((1<<12) | ((1<<(12+16))))
+#define m_DSP_HOLD_VALID_INTR_CLR              ((1<<13) | ((1<<(13+16))))
+
+#define  INTR_STATUS                   (0x002c)
+#define m_FS_INTR_STS                          (1<<0)
+#define m_FS_NEW_INTR_STS                      (1<<1)
+#define m_ADDR_SAME_INTR_STS                   (1<<2)
+#define m_LINE_FLAG0_INTR_STS                  (1<<3)
+#define m_LINE_FLAG1_INTR_STS                  (1<<4)
+#define m_BUS_ERROR_INTR_STS                   (1<<5)
+#define m_WIN0_EMPTY_INTR_STS                  (1<<6)
+#define m_WIN1_EMPTY_INTR_STS                  (1<<7)
+#define m_WIN2_EMPTY_INTR_STS                  (1<<8)
+#define m_WIN3_EMPTY_INTR_STS                  (1<<9)
+#define m_HWC_EMPTY_INTR_STS                   (1<<10)
+#define m_POST_BUF_EMPTY_INTR_STS              (1<<11)
+#define m_PWM_GEN_INTR_STS                     (1<<12)
+#define m_DSP_HOLD_VALID_INTR_STS              (1<<13)
+
+#define m_FS_INTR_RAWSTS                       (1<<(0+16))
+#define m_FS_NEW_INTR_RAWSTS                   (1<<(1+16))
+#define m_ADDR_SAME_INTR_RAWSTS                        (1<<(2+16))
+#define m_LINE_FLAG0_INTR_RAWSTS               (1<<(3+16))
+#define m_LINE_FLAG1_INTR_RAWSTS               (1<<(4+16))
+#define m_BUS_ERROR_INTR_RAWSTS                        (1<<(5+16))
+#define m_WIN0_EMPTY_INTR_RAWSTS               (1<<(6+16))
+#define m_WIN1_EMPTY_INTR_RAWSTS               (1<<(7+16))
+#define m_WIN2_EMPTY_INTR_RAWSTS               (1<<(8+16))
+#define m_WIN3_EMPTY_INTR_RAWSTS               (1<<(9+16))
+#define m_HWC_EMPTY_INTR_RAWSTS                        (1<<(10+16))
+#define m_POST_BUF_EMPTY_INTR_RAWSTS           (1<<(11+16))
+#define m_PWM_GEN_INTR_RAWSTS                  (1<<(12+16))
+#define m_DSP_HOLD_VALID_INTR_RAWSTS           (1<<(13+16))
+
+/*win0 register*/
+#define WIN0_CTRL0                     (0x0030)
+#define v_WIN0_EN(x)                           (((x)&1)<<0)
+#define v_WIN0_DATA_FMT(x)                     (((x)&7)<<1)
+#define v_WIN0_FMT_10(x)                       (((x)&1)<<4)
+#define v_WIN0_LB_MODE(x)                      (((x)&7)<<5)
+#define v_WIN0_INTERLACE_READ(x)               (((x)&1)<<8)
+#define v_WIN0_NO_OUTSTANDING(x)               (((x)&1)<<9)
+#define v_WIN0_CSC_MODE(x)                     (((x)&3)<<10)
+#define v_WIN0_RB_SWAP(x)                      (((x)&1)<<12)
+#define v_WIN0_ALPHA_SWAP(x)                   (((x)&1)<<13)
+#define v_WIN0_MID_SWAP(x)                     (((x)&1)<<14)
+#define v_WIN0_UV_SWAP(x)                      (((x)&1)<<15)
+#define v_WIN0_HW_PRE_MUL_EN(x)                        (((x)&1)<<16)
+#define v_WIN0_YRGB_DEFLICK(x)                 (((x)&1)<<18)
+#define v_WIN0_CBR_DEFLICK(x)                  (((x)&1)<<19)
+#define v_WIN0_YUV_CLIP(x)                     (((x)&1)<<20)
+#define v_WIN0_X_MIRROR(x)                     (((x)&1)<<21)
+#define v_WIN0_Y_MIRROR(x)                     (((x)&1)<<22)
+#define v_WIN0_AXI_MAX_OUTSTANDING_EN(x)       (((x)&1)<<24)
+#define v_WIN0_AXI_OUTSTANDING_MAX_NUM(x)      (((x)&0x1f)<<25)
+#define v_WIN0_DMA_BURST_LENGTH(x)             (((x)&0x3)<<30)
+
+#define m_WIN0_EN                              (1<<0)
+#define m_WIN0_DATA_FMT                                (7<<1)
+#define m_WIN0_FMT_10                          (1<<4)
+#define m_WIN0_LB_MODE                         (7<<5)
+#define m_WIN0_INTERLACE_READ                  (1<<8)
+#define m_WIN0_NO_OUTSTANDING                  (1<<9)
+#define m_WIN0_CSC_MODE                                (3<<10)
+#define m_WIN0_RB_SWAP                         (1<<12)
+#define m_WIN0_ALPHA_SWAP                      (1<<13)
+#define m_WIN0_MID_SWAP                                (1<<14)
+#define m_WIN0_UV_SWAP                         (1<<15)
+#define m_WIN0_HW_PRE_MUL_EN                   (1<<16)
+#define m_WIN0_YRGB_DEFLICK                    (1<<18)
+#define m_WIN0_CBR_DEFLICK                     (1<<19)
+#define m_WIN0_YUV_CLIP                                (1<<20)
+#define m_WIN0_X_MIRROR                                (1<<21)
+#define m_WIN0_Y_MIRROR                                (1<<22)
+#define m_WIN0_AXI_MAX_OUTSTANDING_EN          (1<<24)
+#define m_WIN0_AXI_OUTSTANDING_MAX_NUM         (0x1f<<25)
+#define m_WIN0_DMA_BURST_LENGTH                        (0x3<<30)
+
+#define WIN0_CTRL1                     (0x0034)
+#define v_WIN0_YRGB_AXI_GATHER_EN(x)           (((x)&1)<<0)
+#define v_WIN0_CBR_AXI_GATHER_EN(x)            (((x)&1)<<1)
+#define v_WIN0_BIC_COE_SEL(x)                  (((x)&3)<<2)
+#define v_WIN0_VSD_YRGB_GT4(x)                 (((x)&1)<<4)
+#define v_WIN0_VSD_YRGB_GT2(x)                 (((x)&1)<<5)
+#define v_WIN0_VSD_CBR_GT4(x)                  (((x)&1)<<6)
+#define v_WIN0_VSD_CBR_GT2(x)                  (((x)&1)<<7)
+#define v_WIN0_YRGB_AXI_GATHER_NUM(x)          (((x)&0xf)<<8)
+#define v_WIN0_CBR_AXI_GATHER_NUM(x)           (((x)&7)<<12)
+#define v_WIN0_LINE_LOAD_MODE(x)               (((x)&1)<<15)
+#define v_WIN0_YRGB_HOR_SCL_MODE(x)            (((x)&3)<<16)
+#define v_WIN0_YRGB_VER_SCL_MODE(x)            (((x)&3)<<18)
+#define v_WIN0_YRGB_HSD_MODE(x)                        (((x)&3)<<20)
+#define v_WIN0_YRGB_VSU_MODE(x)                        (((x)&1)<<22)
+#define v_WIN0_YRGB_VSD_MODE(x)                        (((x)&1)<<23)
+#define v_WIN0_CBR_HOR_SCL_MODE(x)             (((x)&3)<<24)
+#define v_WIN0_CBR_VER_SCL_MODE(x)             (((x)&3)<<26)
+#define v_WIN0_CBR_HSD_MODE(x)                 (((x)&3)<<28)
+#define v_WIN0_CBR_VSU_MODE(x)                 (((x)&1)<<30)
+#define v_WIN0_CBR_VSD_MODE(x)                 (((x)&1)<<31)
+
+#define m_WIN0_YRGB_AXI_GATHER_EN              (1<<0)
+#define m_WIN0_CBR_AXI_GATHER_EN               (1<<1)
+#define m_WIN0_BIC_COE_SEL                     (3<<2)
+#define m_WIN0_VSD_YRGB_GT4                    (1<<4)
+#define m_WIN0_VSD_YRGB_GT2                    (1<<5)
+#define m_WIN0_VSD_CBR_GT4                     (1<<6)
+#define m_WIN0_VSD_CBR_GT2                     (1<<7)
+#define m_WIN0_YRGB_AXI_GATHER_NUM             (0xf<<8)
+#define m_WIN0_CBR_AXI_GATHER_NUM              (7<<12)
+#define m_WIN0_LINE_LOAD_MODE                  (1<<15)
+#define m_WIN0_YRGB_HOR_SCL_MODE               (3<<16)
+#define m_WIN0_YRGB_VER_SCL_MODE               (3<<18)
+#define m_WIN0_YRGB_HSD_MODE                   (3<<20)
+#define m_WIN0_YRGB_VSU_MODE                   (1<<22)
+#define m_WIN0_YRGB_VSD_MODE                   (1<<23)
+#define m_WIN0_CBR_HOR_SCL_MODE                        (3<<24)
+#define m_WIN0_CBR_VER_SCL_MODE                        (3<<26)
+#define m_WIN0_CBR_HSD_MODE                    (3<<28)
+#define m_WIN0_CBR_VSU_MODE                    ((u32)1<<30)
+#define m_WIN0_CBR_VSD_MODE                    ((u32)1<<31)
+
+#define WIN0_COLOR_KEY                 (0x0038)
+#define v_WIN0_COLOR_KEY(x)                    (((x)&0x3fffffff)<<0)
+#define v_WIN0_COLOR_KEY_EN(x)                 (((x)&1)<<31)
+#define m_WIN0_COLOR_KEY                       (0x3fffffff<<0)
+#define m_WIN0_COLOR_KEY_EN                    ((u32)1<<31)
+
+#define WIN0_VIR                       (0x003c)
+#define v_WIN0_VIR_STRIDE(x)                   (((x)&0xffff)<<0)
+#define v_WIN0_VIR_STRIDE_UV(x)                        (((x)&0xffff)<<16)
+#define m_WIN0_VIR_STRIDE                      (0xffff<<0)
+#define m_WIN0_VIR_STRIDE_UV                   (0xffff<<16)
+
+#define WIN0_YRGB_MST                  (0x0040)
+#define WIN0_CBR_MST                   (0x0044)
+#define WIN0_ACT_INFO                  (0x0048)
+#define v_WIN0_ACT_WIDTH(x)                    (((x-1)&0x1fff)<<0)
+#define v_WIN0_ACT_HEIGHT(x)                   (((x-1)&0x1fff)<<16)
+#define m_WIN0_ACT_WIDTH                       (0x1fff<<0)
+#define m_WIN0_ACT_HEIGHT                      (0x1fff<<16)
+
+#define WIN0_DSP_INFO                  (0x004c)
+#define v_WIN0_DSP_WIDTH(x)                    (((x-1)&0xfff)<<0)
+#define v_WIN0_DSP_HEIGHT(x)                   (((x-1)&0xfff)<<16)
+#define m_WIN0_DSP_WIDTH                       (0xfff<<0)
+#define m_WIN0_DSP_HEIGHT                      (0xfff<<16)
+
+#define WIN0_DSP_ST                    (0x0050)
+#define v_WIN0_DSP_XST(x)                      (((x)&0x1fff)<<0)
+#define v_WIN0_DSP_YST(x)                      (((x)&0x1fff)<<16)
+#define m_WIN0_DSP_XST                         (0x1fff<<0)
+#define m_WIN0_DSP_YST                         (0x1fff<<16)
+
+#define WIN0_SCL_FACTOR_YRGB           (0x0054)
+#define v_WIN0_HS_FACTOR_YRGB(x)               (((x)&0xffff)<<0)
+#define v_WIN0_VS_FACTOR_YRGB(x)               (((x)&0xffff)<<16)
+#define m_WIN0_HS_FACTOR_YRGB                  (0xffff<<0)
+#define m_WIN0_VS_FACTOR_YRGB                  ((u32)0xffff<<16)
+
+#define WIN0_SCL_FACTOR_CBR            (0x0058)
+#define v_WIN0_HS_FACTOR_CBR(x)                        (((x)&0xffff)<<0)
+#define v_WIN0_VS_FACTOR_CBR(x)                        (((x)&0xffff)<<16)
+#define m_WIN0_HS_FACTOR_CBR                   (0xffff<<0)
+#define m_WIN0_VS_FACTOR_CBR                   ((u32)0xffff<<16)
+
+#define WIN0_SCL_OFFSET                        (0x005c)
+#define v_WIN0_HS_OFFSET_YRGB(x)               (((x)&0xff)<<0)
+#define v_WIN0_HS_OFFSET_CBR(x)                        (((x)&0xff)<<8)
+#define v_WIN0_VS_OFFSET_YRGB(x)               (((x)&0xff)<<16)
+#define v_WIN0_VS_OFFSET_CBR(x)                        (((x)&0xff)<<24)
+
+#define m_WIN0_HS_OFFSET_YRGB                  (0xff<<0)
+#define m_WIN0_HS_OFFSET_CBR                   (0xff<<8)
+#define m_WIN0_VS_OFFSET_YRGB                  (0xff<<16)
+#define m_WIN0_VS_OFFSET_CBR                   ((u32)0xff<<24)
+
+#define WIN0_SRC_ALPHA_CTRL            (0x0060)
+#define v_WIN0_SRC_ALPHA_EN(x)                 (((x)&1)<<0)
+#define v_WIN0_SRC_COLOR_M0(x)                 (((x)&1)<<1)
+#define v_WIN0_SRC_ALPHA_M0(x)                 (((x)&1)<<2)
+#define v_WIN0_SRC_BLEND_M0(x)                 (((x)&3)<<3)
+#define v_WIN0_SRC_ALPHA_CAL_M0(x)             (((x)&1)<<5)
+#define v_WIN0_SRC_FACTOR_M0(x)                        (((x)&7)<<6)
+#define v_WIN0_SRC_GLOBAL_ALPHA(x)             (((x)&0xff)<<16)
+#define v_WIN0_FADING_VALUE(x)                 (((x)&0xff)<<24)
+
+#define m_WIN0_SRC_ALPHA_EN                    (1<<0)
+#define m_WIN0_SRC_COLOR_M0                    (1<<1)
+#define m_WIN0_SRC_ALPHA_M0                    (1<<2)
+#define m_WIN0_SRC_BLEND_M0                    (3<<3)
+#define m_WIN0_SRC_ALPHA_CAL_M0                        (1<<5)
+#define m_WIN0_SRC_FACTOR_M0                   (7<<6)
+#define m_WIN0_SRC_GLOBAL_ALPHA                        (0xff<<16)
+#define m_WIN0_FADING_VALUE                    (0xff<<24)
+
+#define WIN0_DST_ALPHA_CTRL            (0x0064)
+#define v_WIN0_DST_FACTOR_M0(x)                        (((x)&7)<<6)
+#define m_WIN0_DST_FACTOR_M0                   (7<<6)
+
+#define WIN0_FADING_CTRL               (0x0068)
+#define v_WIN0_FADING_OFFSET_R(x)              (((x)&0xff)<<0)
+#define v_WIN0_FADING_OFFSET_G(x)              (((x)&0xff)<<8)
+#define v_WIN0_FADING_OFFSET_B(x)              (((x)&0xff)<<16)
+#define v_WIN0_FADING_EN(x)                    (((x)&1)<<24)
+
+#define m_WIN0_FADING_OFFSET_R                 (0xff<<0)
+#define m_WIN0_FADING_OFFSET_G                 (0xff<<8)
+#define m_WIN0_FADING_OFFSET_B                 (0xff<<16)
+#define m_WIN0_FADING_EN                       (1<<24)
+
+#define WIN0_CTRL2                     (0x006c)
+#define v_WIN_RID_WIN0_YRGB(x)                 (((x)&0xf)<<0)
+#define v_WIN_RID_WIN0_CBR(x)                  (((x)&0xf)<<4)
+#define m_WIN_RID_WIN0_YRGB                    ((0xf)<<0)
+#define m_WIN_RID_WIN0_CBR                     ((0xf)<<4)
+/*win1 register*/
+#define WIN1_CTRL0                     (0x0070)
+#define v_WIN1_EN(x)                           (((x)&1)<<0)
+#define v_WIN1_DATA_FMT(x)                     (((x)&7)<<1)
+#define v_WIN1_FMT_10(x)                       (((x)&1)<<4)
+#define v_WIN1_LB_MODE(x)                      (((x)&7)<<5)
+#define v_WIN1_INTERLACE_READ(x)               (((x)&1)<<8)
+#define v_WIN1_NO_OUTSTANDING(x)               (((x)&1)<<9)
+#define v_WIN1_CSC_MODE(x)                     (((x)&3)<<10)
+#define v_WIN1_RB_SWAP(x)                      (((x)&1)<<12)
+#define v_WIN1_ALPHA_SWAP(x)                   (((x)&1)<<13)
+#define v_WIN1_MID_SWAP(x)                     (((x)&1)<<14)
+#define v_WIN1_UV_SWAP(x)                      (((x)&1)<<15)
+#define v_WIN1_HW_PRE_MUL_EN(x)                        (((x)&1)<<16)
+#define v_WIN1_YRGB_DEFLICK(x)                 (((x)&1)<<18)
+#define v_WIN1_CBR_DEFLICK(x)                  (((x)&1)<<19)
+#define v_WIN1_YUV_CLIP(x)                     (((x)&1)<<20)
+#define v_WIN1_X_MIRROR(x)                     (((x)&1)<<21)
+#define v_WIN1_Y_MIRROR(x)                     (((x)&1)<<22)
+#define v_WIN1_AXI_MAX_OUTSTANDING_EN(x)       (((x)&1)<<24)
+#define v_WIN1_AXI_OUTSTANDING_MAX_NUM(x)      (((x)&0x1f)<<25)
+#define v_WIN1_DMA_BURST_LENGTH(x)             (((x)&0x3)<<30)
+#define m_WIN1_EN                              (1<<0)
+#define m_WIN1_DATA_FMT                                (7<<1)
+#define m_WIN1_FMT_10                          (1<<4)
+#define m_WIN1_LB_MODE                         (7<<5)
+#define m_WIN1_INTERLACE_READ                  (1<<8)
+#define m_WIN1_NO_OUTSTANDING                  (1<<9)
+#define m_WIN1_CSC_MODE                                (3<<10)
+#define m_WIN1_RB_SWAP                         (1<<12)
+#define m_WIN1_ALPHA_SWAP                      (1<<13)
+#define m_WIN1_MID_SWAP                                (1<<14)
+#define m_WIN1_UV_SWAP                         (1<<15)
+#define m_WIN1_HW_PRE_MUL_EN                   (1<<16)
+#define m_WIN1_YRGB_DEFLICK                    (1<<18)
+#define m_WIN1_CBR_DEFLICK                     (1<<19)
+#define m_WIN1_YUV_CLIP                                (1<<20)
+#define m_WIN1_X_MIRROR                                (1<<21)
+#define m_WIN1_Y_MIRROR                                (1<<22)
+#define m_WIN1_AXI_MAX_OUTSTANDING_EN          (1<<24)
+#define m_WIN1_AXI_OUTSTANDING_MAX_NUM         (0x1f<<25)
+#define m_WIN1_DMA_BURST_LENGTH                        (0x3<<30)
+
+#define WIN1_CTRL1                     (0x0074)
+#define v_WIN1_YRGB_AXI_GATHER_EN(x)           (((x)&1)<<0)
+#define v_WIN1_CBR_AXI_GATHER_EN(x)            (((x)&1)<<1)
+#define v_WIN1_BIC_COE_SEL(x)                  (((x)&3)<<2)
+#define v_WIN1_VSD_YRGB_GT4(x)                 (((x)&1)<<4)
+#define v_WIN1_VSD_YRGB_GT2(x)                 (((x)&1)<<5)
+#define v_WIN1_VSD_CBR_GT4(x)                  (((x)&1)<<6)
+#define v_WIN1_VSD_CBR_GT2(x)                  (((x)&1)<<7)
+#define v_WIN1_YRGB_AXI_GATHER_NUM(x)          (((x)&0xf)<<8)
+#define v_WIN1_CBR_AXI_GATHER_NUM(x)           (((x)&7)<<12)
+#define v_WIN1_LINE_LOAD_MODE(x)               (((x)&1)<<15)
+#define v_WIN1_YRGB_HOR_SCL_MODE(x)            (((x)&3)<<16)
+#define v_WIN1_YRGB_VER_SCL_MODE(x)            (((x)&3)<<18)
+#define v_WIN1_YRGB_HSD_MODE(x)                        (((x)&3)<<20)
+#define v_WIN1_YRGB_VSU_MODE(x)                        (((x)&1)<<22)
+#define v_WIN1_YRGB_VSD_MODE(x)                        (((x)&1)<<23)
+#define v_WIN1_CBR_HOR_SCL_MODE(x)             (((x)&3)<<24)
+#define v_WIN1_CBR_VER_SCL_MODE(x)             (((x)&3)<<26)
+#define v_WIN1_CBR_HSD_MODE(x)                 (((x)&3)<<28)
+#define v_WIN1_CBR_VSU_MODE(x)                 (((x)&1)<<30)
+#define v_WIN1_CBR_VSD_MODE(x)                 (((x)&1)<<31)
+
+#define m_WIN1_YRGB_AXI_GATHER_EN              (1<<0)
+#define m_WIN1_CBR_AXI_GATHER_EN               (1<<1)
+#define m_WIN1_BIC_COE_SEL                     (3<<2)
+#define m_WIN1_VSD_YRGB_GT4                    (1<<4)
+#define m_WIN1_VSD_YRGB_GT2                    (1<<5)
+#define m_WIN1_VSD_CBR_GT4                     (1<<6)
+#define m_WIN1_VSD_CBR_GT2                     (1<<7)
+#define m_WIN1_YRGB_AXI_GATHER_NUM             (0xf<<8)
+#define m_WIN1_CBR_AXI_GATHER_NUM              (7<<12)
+#define m_WIN1_LINE_LOAD_MODE                  (1<<15)
+#define m_WIN1_YRGB_HOR_SCL_MODE               (3<<16)
+#define m_WIN1_YRGB_VER_SCL_MODE               (3<<18)
+#define m_WIN1_YRGB_HSD_MODE                   (3<<20)
+#define m_WIN1_YRGB_VSU_MODE                   (1<<22)
+#define m_WIN1_YRGB_VSD_MODE                   (1<<23)
+#define m_WIN1_CBR_HOR_SCL_MODE                        (3<<24)
+#define m_WIN1_CBR_VER_SCL_MODE                        (3<<26)
+#define m_WIN1_CBR_HSD_MODE                    (3<<28)
+#define m_WIN1_CBR_VSU_MODE                    (1<<30)
+#define m_WIN1_CBR_VSD_MODE                    ((u32)1<<31)
+
+#define WIN1_COLOR_KEY                 (0x0078)
+#define v_WIN1_COLOR_KEY(x)                    (((x)&0x3fffffff)<<0)
+#define v_WIN1_COLOR_KEY_EN(x)                 (((x)&1)<<31)
+#define m_WIN1_COLOR_KEY                       (0x3fffffff<<0)
+#define m_WIN1_COLOR_KEY_EN                    ((u32)1<<31)
+
+#define WIN1_VIR                       (0x007c)
+#define v_WIN1_VIR_STRIDE(x)                   (((x)&0xffff)<<0)
+#define v_WIN1_VIR_STRIDE_UV(x)                        (((x)&0xffff)<<16)
+#define m_WIN1_VIR_STRIDE                      (0xffff<<0)
+#define m_WIN1_VIR_STRIDE_UV                   (0xffff<<16)
+
+#define WIN1_YRGB_MST                  (0x0080)
+#define WIN1_CBR_MST                   (0x0084)
+#define WIN1_ACT_INFO                  (0x0088)
+#define v_WIN1_ACT_WIDTH(x)                    (((x-1)&0x1fff)<<0)
+#define v_WIN1_ACT_HEIGHT(x)                   (((x-1)&0x1fff)<<16)
+#define m_WIN1_ACT_WIDTH                       (0x1fff<<0)
+#define m_WIN1_ACT_HEIGHT                      (0x1fff<<16)
+
+#define WIN1_DSP_INFO                  (0x008c)
+#define v_WIN1_DSP_WIDTH(x)                    (((x-1)&0xfff)<<0)
+#define v_WIN1_DSP_HEIGHT(x)                   (((x-1)&0xfff)<<16)
+#define m_WIN1_DSP_WIDTH                       (0xfff<<0)
+#define m_WIN1_DSP_HEIGHT                      (0xfff<<16)
+
+#define WIN1_DSP_ST                    (0x0090)
+#define v_WIN1_DSP_XST(x)                      (((x)&0x1fff)<<0)
+#define v_WIN1_DSP_YST(x)                      (((x)&0x1fff)<<16)
+#define m_WIN1_DSP_XST                         (0x1fff<<0)
+#define m_WIN1_DSP_YST                         (0x1fff<<16)
+
+#define WIN1_SCL_FACTOR_YRGB           (0x0094)
+#define v_WIN1_HS_FACTOR_YRGB(x)               (((x)&0xffff)<<0)
+#define v_WIN1_VS_FACTOR_YRGB(x)               (((x)&0xffff)<<16)
+#define m_WIN1_HS_FACTOR_YRGB                  (0xffff<<0)
+#define m_WIN1_VS_FACTOR_YRGB                  ((u32)0xffff<<16)
+
+#define WIN1_SCL_FACTOR_CBR            (0x0098)
+#define v_WIN1_HS_FACTOR_CBR(x)                        (((x)&0xffff)<<0)
+#define v_WIN1_VS_FACTOR_CBR(x)                        (((x)&0xffff)<<16)
+#define m_WIN1_HS_FACTOR_CBR                   (0xffff<<0)
+#define m_WIN1_VS_FACTOR_CBR                   ((u32)0xffff<<16)
+
+#define WIN1_SCL_OFFSET                        (0x009c)
+#define v_WIN1_HS_OFFSET_YRGB(x)               (((x)&0xff)<<0)
+#define v_WIN1_HS_OFFSET_CBR(x)                        (((x)&0xff)<<8)
+#define v_WIN1_VS_OFFSET_YRGB(x)               (((x)&0xff)<<16)
+#define v_WIN1_VS_OFFSET_CBR(x)                        (((x)&0xff)<<24)
+
+#define m_WIN1_HS_OFFSET_YRGB                  (0xff<<0)
+#define m_WIN1_HS_OFFSET_CBR                   (0xff<<8)
+#define m_WIN1_VS_OFFSET_YRGB                  (0xff<<16)
+#define m_WIN1_VS_OFFSET_CBR                   ((u32)0xff<<24)
+
+#define WIN1_SRC_ALPHA_CTRL            (0x00a0)
+#define v_WIN1_SRC_ALPHA_EN(x)                 (((x)&1)<<0)
+#define v_WIN1_SRC_COLOR_M0(x)                 (((x)&1)<<1)
+#define v_WIN1_SRC_ALPHA_M0(x)                 (((x)&1)<<2)
+#define v_WIN1_SRC_BLEND_M0(x)                 (((x)&3)<<3)
+#define v_WIN1_SRC_ALPHA_CAL_M0(x)             (((x)&1)<<5)
+#define v_WIN1_SRC_FACTOR_M0(x)                        (((x)&7)<<6)
+#define v_WIN1_SRC_GLOBAL_ALPHA(x)             (((x)&0xff)<<16)
+#define v_WIN1_FADING_VALUE(x)                 (((x)&0xff)<<24)
+
+#define m_WIN1_SRC_ALPHA_EN                    (1<<0)
+#define m_WIN1_SRC_COLOR_M0                    (1<<1)
+#define m_WIN1_SRC_ALPHA_M0                    (1<<2)
+#define m_WIN1_SRC_BLEND_M0                    (3<<3)
+#define m_WIN1_SRC_ALPHA_CAL_M0                        (1<<5)
+#define m_WIN1_SRC_FACTOR_M0                   (7<<6)
+#define m_WIN1_SRC_GLOBAL_ALPHA                        (0xff<<16)
+#define m_WIN1_FADING_VALUE                    (0xff<<24)
+
+#define WIN1_DST_ALPHA_CTRL                    (0x00a4)
+#define v_WIN1_DST_FACTOR_M0(x)                        (((x)&7)<<6)
+#define m_WIN1_DST_FACTOR_M0                   (7<<6)
+
+#define WIN1_FADING_CTRL               (0x00a8)
+#define v_WIN1_FADING_OFFSET_R(x)              (((x)&0xff)<<0)
+#define v_WIN1_FADING_OFFSET_G(x)              (((x)&0xff)<<8)
+#define v_WIN1_FADING_OFFSET_B(x)              (((x)&0xff)<<16)
+#define v_WIN1_FADING_EN(x)                    (((x)&1)<<24)
+
+#define m_WIN1_FADING_OFFSET_R                 (0xff<<0)
+#define m_WIN1_FADING_OFFSET_G                 (0xff<<8)
+#define m_WIN1_FADING_OFFSET_B                 (0xff<<16)
+#define m_WIN1_FADING_EN                       (1<<24)
+
+#define WIN1_CTRL2                     (0xac)
+#define v_WIN_RID_WIN1_YRGB(x)                 (((x)&0xf)<<0)
+#define v_WIN_RID_WIN1_CBR(x)                  (((x)&0xf)<<4)
+#define m_WIN_RID_WIN1_YRGB                    ((0xf)<<0)
+#define m_WIN_RID_WIN1_CBR                     ((0xf)<<4)
+/*win2 register*/
+#define WIN2_CTRL0                     (0x00b0)
+#define v_WIN2_EN(x)                           (((x)&1)<<0)
+#define v_WIN2_INTERLACE_READ(x)               (((x)&1)<<1)
+#define v_WIN2_CSC_MODE(x)                     (((x)&1)<<2)
+#define v_WIN2_MST0_EN(x)                      (((x)&1)<<4)
+#define v_WIN2_DATA_FMT0(x)                    (((x)&3)<<5)
+#define v_WIN2_MST1_EN(x)                      (((x)&1)<<8)
+#define v_WIN2_DATA_FMT1(x)                    (((x)&3)<<9)
+#define v_WIN2_MST2_EN(x)                      (((x)&1)<<12)
+#define v_WIN2_DATA_FMT2(x)                    (((x)&3)<<13)
+#define v_WIN2_MST3_EN(x)                      (((x)&1)<<16)
+#define v_WIN2_DATA_FMT3(x)                    (((x)&3)<<17)
+#define v_WIN2_RB_SWAP0(x)                     (((x)&1)<<20)
+#define v_WIN2_ALPHA_SWAP0(x)                  (((x)&1)<<21)
+#define v_WIN2_ENDIAN_SWAP0(x)                 (((x)&1)<<22)
+#define v_WIN2_RB_SWAP1(x)                     (((x)&1)<<23)
+#define v_WIN2_ALPHA_SWAP1(x)                  (((x)&1)<<24)
+#define v_WIN2_ENDIAN_SWAP1(x)                 (((x)&1)<<25)
+#define v_WIN2_RB_SWAP2(x)                     (((x)&1)<<26)
+#define v_WIN2_ALPHA_SWAP2(x)                  (((x)&1)<<27)
+#define v_WIN2_ENDIAN_SWAP2(x)                 (((x)&1)<<28)
+#define v_WIN2_RB_SWAP3(x)                     (((x)&1)<<29)
+#define v_WIN2_ALPHA_SWAP3(x)                  (((x)&1)<<30)
+#define v_WIN2_ENDIAN_SWAP3(x)                 (((x)&1)<<31)
+
+#define m_WIN2_EN                              (1<<0)
+#define m_WIN2_INTERLACE_READ                  (1<<1)
+#define m_WIN2_CSC_MODE                                (1<<2)
+#define m_WIN2_MST0_EN                         (1<<4)
+#define m_WIN2_DATA_FMT0                       (3<<5)
+#define m_WIN2_MST1_EN                         (1<<8)
+#define m_WIN2_DATA_FMT1                       (3<<9)
+#define m_WIN2_MST2_EN                         (1<<12)
+#define m_WIN2_DATA_FMT2                       (3<<13)
+#define m_WIN2_MST3_EN                         (1<<16)
+#define m_WIN2_DATA_FMT3                       (3<<17)
+#define m_WIN2_RB_SWAP0                                (1<<20)
+#define m_WIN2_ALPHA_SWAP0                     (1<<21)
+#define m_WIN2_ENDIAN_SWAP0                    (1<<22)
+#define m_WIN2_RB_SWAP1                                (1<<23)
+#define m_WIN2_ALPHA_SWAP1                     (1<<24)
+#define m_WIN2_ENDIAN_SWAP1                    (1<<25)
+#define m_WIN2_RB_SWAP2                                (1<<26)
+#define m_WIN2_ALPHA_SWAP2                     (1<<27)
+#define m_WIN2_ENDIAN_SWAP2                    (1<<28)
+#define m_WIN2_RB_SWAP3                                (1<<29)
+#define m_WIN2_ALPHA_SWAP3                     (1<<30)
+#define m_WIN2_ENDIAN_SWAP3                    (1<<31)
+
+#define WIN2_CTRL1                     (0x00b4)
+#define v_WIN2_AXI_GATHER_EN(x)                        (((x)&1)<<0)
+#define v_WIN2_AXI_MAX_OUTSTANDING_EN(x)       (((x)&1)<<1)
+#define v_WIN2_DMA_BURST_LENGTH(x)             (((x)&0x3)<<2)
+#define v_WIN2_AXI_GATHER_NUM(x)               (((x)&0xf)<<4)
+#define v_WIN2_AXI_OUTSTANDING_MAX_NUM(x)      (((x)&0x1f)<<8)
+#define v_WIN2_RGB2YUV_EN(x)                   (((x)&1)<<13)
+#define v_WIN2_NO_OUTSTANDING(x)               (((x)&1)<<14)
+#define v_WIN2_Y_MIR(x)                                (((x)&1)<<15)
+#define v_WIN2_LUT_EN(x)                       (((x)&1)<<16)
+#define v_WIN_RID_WIN2(x)                      (((x)&0xf)<<20)
+
+#define m_WIN2_AXI_GATHER_EN                   (1<<0)
+#define m_WIN2_AXI_MAX_OUTSTANDING_EN          (1<<1)
+#define m_WIN2_DMA_BURST_LENGTH                        (0x3<<2)
+#define m_WIN2_AXI_GATHER_NUM                  (0xf<<4)
+#define m_WIN2_AXI_OUTSTANDING_MAX_NUM         (0x1f<<8)
+#define m_WIN2_RGB2YUV_EN                      (1<<13)
+#define m_WIN2_NO_OUTSTANDING                  (1<<14)
+#define m_WIN2_Y_MIR                           (1<<15)
+#define m_WIN2_LUT_EN                          (1<<16)
+#define m_WIN_RID_WIN2                         (0xf<<20)
+
+#define WIN2_VIR0_1                    (0x00b8)
+#define v_WIN2_VIR_STRIDE0(x)                  (((x)&0xffff)<<0)
+#define v_WIN2_VIR_STRIDE1(x)                  (((x)&0xffff)<<16)
+#define m_WIN2_VIR_STRIDE0                     (((u32)0xffff)<<0)
+#define m_WIN2_VIR_STRIDE1                     (((u32)0xffff)<<16)
+
+#define WIN2_VIR2_3                    (0x00bc)
+#define v_WIN2_VIR_STRIDE2(x)                  (((x)&0xffff)<<0)
+#define v_WIN2_VIR_STRIDE3(x)                  (((x)&0xffff)<<16)
+#define m_WIN2_VIR_STRIDE2                     (((u32)0xffff)<<0)
+#define m_WIN2_VIR_STRIDE3                     (((u32)0xffff)<<16)
+
+#define WIN2_MST0                      (0x00c0)
+#define WIN2_DSP_INFO0                 (0x00c4)
+#define v_WIN2_DSP_WIDTH0(x)                   (((x-1)&0xfff)<<0)
+#define v_WIN2_DSP_HEIGHT0(x)                  (((x-1)&0xfff)<<16)
+#define m_WIN2_DSP_WIDTH0                      (0xfff<<0)
+#define m_WIN2_DSP_HEIGHT0                     (0xfff<<16)
+
+#define WIN2_DSP_ST0                   (0x00c8)
+#define v_WIN2_DSP_XST0(x)                     (((x)&0x1fff)<<0)
+#define v_WIN2_DSP_YST0(x)                     (((x)&0x1fff)<<16)
+#define m_WIN2_DSP_XST0                                (0x1fff<<0)
+#define m_WIN2_DSP_YST0                                (0x1fff<<16)
+
+#define WIN2_COLOR_KEY                 (0x00cc)
+#define v_WIN2_COLOR_KEY(x)                    (((x)&0xffffff)<<0)
+#define v_WIN2_KEY_EN(x)                       (((x)&1)<<24)
+#define m_WIN2_COLOR_KEY                       (0xffffff<<0)
+#define m_WIN2_KEY_EN                          ((u32)1<<24)
+
+
+#define WIN2_MST1                      (0x00d0)
+#define WIN2_DSP_INFO1                 (0x00d4)
+#define v_WIN2_DSP_WIDTH1(x)                   (((x-1)&0xfff)<<0)
+#define v_WIN2_DSP_HEIGHT1(x)                  (((x-1)&0xfff)<<16)
+
+#define m_WIN2_DSP_WIDTH1                      (0xfff<<0)
+#define m_WIN2_DSP_HEIGHT1                     (0xfff<<16)
+
+#define WIN2_DSP_ST1                   (0x00d8)
+#define v_WIN2_DSP_XST1(x)                     (((x)&0x1fff)<<0)
+#define v_WIN2_DSP_YST1(x)                     (((x)&0x1fff)<<16)
+#define m_WIN2_DSP_XST1                                (0x1fff<<0)
+#define m_WIN2_DSP_YST1                                (0x1fff<<16)
+
+#define WIN2_SRC_ALPHA_CTRL            (0x00dc)
+#define v_WIN2_SRC_ALPHA_EN(x)                 (((x)&1)<<0)
+#define v_WIN2_SRC_COLOR_M0(x)                 (((x)&1)<<1)
+#define v_WIN2_SRC_ALPHA_M0(x)                 (((x)&1)<<2)
+#define v_WIN2_SRC_BLEND_M0(x)                 (((x)&3)<<3)
+#define v_WIN2_SRC_ALPHA_CAL_M0(x)             (((x)&1)<<5)
+#define v_WIN2_SRC_FACTOR_M0(x)                        (((x)&7)<<6)
+#define v_WIN2_SRC_GLOBAL_ALPHA(x)             (((x)&0xff)<<16)
+#define v_WIN2_FADING_VALUE(x)                 (((x)&0xff)<<24)
+#define m_WIN2_SRC_ALPHA_EN                    (1<<0)
+#define m_WIN2_SRC_COLOR_M0                    (1<<1)
+#define m_WIN2_SRC_ALPHA_M0                    (1<<2)
+#define m_WIN2_SRC_BLEND_M0                    (3<<3)
+#define m_WIN2_SRC_ALPHA_CAL_M0                        (1<<5)
+#define m_WIN2_SRC_FACTOR_M0                   (7<<6)
+#define m_WIN2_SRC_GLOBAL_ALPHA                        (0xff<<16)
+#define m_WIN2_FADING_VALUE                    (0xff<<24)
+
+#define WIN2_MST2                      (0x00e0)
+#define WIN2_DSP_INFO2                 (0x00e4)
+#define v_WIN2_DSP_WIDTH2(x)                   (((x-1)&0xfff)<<0)
+#define v_WIN2_DSP_HEIGHT2(x)                  (((x-1)&0xfff)<<16)
+#define m_WIN2_DSP_WIDTH2                      (0xfff<<0)
+#define m_WIN2_DSP_HEIGHT2                     (0xfff<<16)
+
+#define WIN2_DSP_ST2                   (0x00e8)
+#define v_WIN2_DSP_XST2(x)                     (((x)&0x1fff)<<0)
+#define v_WIN2_DSP_YST2(x)                     (((x)&0x1fff)<<16)
+#define m_WIN2_DSP_XST2                                (0x1fff<<0)
+#define m_WIN2_DSP_YST2                                (0x1fff<<16)
+
+#define WIN2_DST_ALPHA_CTRL            (0x00ec)
+#define v_WIN2_DST_FACTOR_M0(x)                        (((x)&7)<<6)
+#define m_WIN2_DST_FACTOR_M0                   (7<<6)
+
+#define WIN2_MST3                      (0x00f0)
+#define WIN2_DSP_INFO3                 (0x00f4)
+#define v_WIN2_DSP_WIDTH3(x)                   (((x-1)&0xfff)<<0)
+#define v_WIN2_DSP_HEIGHT3(x)                  (((x-1)&0xfff)<<16)
+#define m_WIN2_DSP_WIDTH3                      (0xfff<<0)
+#define m_WIN2_DSP_HEIGHT3                     (0xfff<<16)
+
+#define WIN2_DSP_ST3                   (0x00f8)
+#define v_WIN2_DSP_XST3(x)                     (((x)&0x1fff)<<0)
+#define v_WIN2_DSP_YST3(x)                     (((x)&0x1fff)<<16)
+#define m_WIN2_DSP_XST3                                (0x1fff<<0)
+#define m_WIN2_DSP_YST3                                (0x1fff<<16)
+
+#define WIN2_FADING_CTRL               (0x00fc)
+#define v_WIN2_FADING_OFFSET_R(x)              (((x)&0xff)<<0)
+#define v_WIN2_FADING_OFFSET_G(x)              (((x)&0xff)<<8)
+#define v_WIN2_FADING_OFFSET_B(x)              (((x)&0xff)<<16)
+#define v_WIN2_FADING_EN(x)                    (((x)&1)<<24)
+
+#define m_WIN2_FADING_OFFSET_R                 (0xff<<0)
+#define m_WIN2_FADING_OFFSET_G                 (0xff<<8)
+#define m_WIN2_FADING_OFFSET_B                 (0xff<<16)
+#define m_WIN2_FADING_EN                       (1<<24)
+
+/*win3 register*/
+#define WIN3_CTRL0                     (0x0100)
+#define v_WIN3_EN(x)                           (((x)&1)<<0)
+#define v_WIN3_INTERLACE_READ(x)               (((x)&1)<<1)
+#define v_WIN3_CSC_MODE(x)                     (((x)&1)<<2)
+#define v_WIN3_MST0_EN(x)                      (((x)&1)<<4)
+#define v_WIN3_DATA_FMT0(x)                    (((x)&3)<<5)
+#define v_WIN3_MST1_EN(x)                      (((x)&1)<<8)
+#define v_WIN3_DATA_FMT1(x)                    (((x)&3)<<9)
+#define v_WIN3_MST2_EN(x)                      (((x)&1)<<12)
+#define v_WIN3_DATA_FMT2(x)                    (((x)&3)<<13)
+#define v_WIN3_MST3_EN(x)                      (((x)&1)<<16)
+#define v_WIN3_DATA_FMT3(x)                    (((x)&3)<<17)
+#define v_WIN3_RB_SWAP0(x)                     (((x)&1)<<20)
+#define v_WIN3_ALPHA_SWAP0(x)                  (((x)&1)<<21)
+#define v_WIN3_ENDIAN_SWAP0(x)                 (((x)&1)<<22)
+#define v_WIN3_RB_SWAP1(x)                     (((x)&1)<<23)
+#define v_WIN3_ALPHA_SWAP1(x)                  (((x)&1)<<24)
+#define v_WIN3_ENDIAN_SWAP1(x)                 (((x)&1)<<25)
+#define v_WIN3_RB_SWAP2(x)                     (((x)&1)<<26)
+#define v_WIN3_ALPHA_SWAP2(x)                  (((x)&1)<<27)
+#define v_WIN3_ENDIAN_SWAP2(x)                 (((x)&1)<<28)
+#define v_WIN3_RB_SWAP3(x)                     (((x)&1)<<29)
+#define v_WIN3_ALPHA_SWAP3(x)                  (((x)&1)<<30)
+#define v_WIN3_ENDIAN_SWAP3(x)                 (((x)&1)<<31)
+
+#define m_WIN3_EN                              (1<<0)
+#define m_WIN3_INTERLACE_READ                  (1<<1)
+#define m_WIN3_CSC_MODE                                (1<<2)
+#define m_WIN3_MST0_EN                         (1<<4)
+#define m_WIN3_DATA_FMT0                       (3<<5)
+#define m_WIN3_MST1_EN                         (1<<8)
+#define m_WIN3_DATA_FMT1                       (3<<9)
+#define m_WIN3_MST2_EN                         (1<<12)
+#define m_WIN3_DATA_FMT2                       (3<<13)
+#define m_WIN3_MST3_EN                         (1<<16)
+#define m_WIN3_DATA_FMT3                       (3<<17)
+#define m_WIN3_RB_SWAP0                                (1<<20)
+#define m_WIN3_ALPHA_SWAP0                     (1<<21)
+#define m_WIN3_ENDIAN_SWAP0                    (1<<22)
+#define m_WIN3_RB_SWAP1                                (1<<23)
+#define m_WIN3_ALPHA_SWAP1                     (1<<24)
+#define m_WIN3_ENDIAN_SWAP1                    (1<<25)
+#define m_WIN3_RB_SWAP2                                (1<<26)
+#define m_WIN3_ALPHA_SWAP2                     (1<<27)
+#define m_WIN3_ENDIAN_SWAP2                    (1<<28)
+#define m_WIN3_RB_SWAP3                                (1<<29)
+#define m_WIN3_ALPHA_SWAP3                     (1<<30)
+#define m_WIN3_ENDIAN_SWAP3                    (1<<31)
+
+#define WIN3_CTRL1                     (0x0104)
+#define v_WIN3_AXI_GATHER_EN(x)                        (((x)&1)<<0)
+#define v_WIN3_AXI_MAX_OUTSTANDING_EN(x)       (((x)&1)<<1)
+#define v_WIN3_DMA_BURST_LENGTH(x)             (((x)&0x3)<<2)
+#define v_WIN3_AXI_GATHER_NUM(x)               (((x)&0xf)<<4)
+#define v_WIN3_AXI_OUTSTANDING_MAX_NUM(x)      (((x)&0x1f)<<8)
+#define v_WIN3_NO_OUTSTANDING(x)               (((x)&1)<<14)
+#define v_WIN3_Y_MIR(x)                                (((x)&1)<<15)
+#define v_WIN3_LUT_EN(x)                       (((x)&1)<<16)
+#define v_WIN_RID_WIN3(x)                      (((x)&0xf)<<20)
+
+#define m_WIN3_AXI_GATHER_EN                   (1<<0)
+#define m_WIN3_AXI_MAX_OUTSTANDING_EN          (1<<1)
+#define m_WIN3_DMA_BURST_LENGTH                        (0x3<<2)
+#define m_WIN3_AXI_GATHER_NUM                  (0xf<<4)
+#define m_WIN3_AXI_OUTSTANDING_MAX_NUM         (0x1f<<8)
+#define m_WIN3_NO_OUTSTANDING                  (1<<14)
+#define m_WIN3_Y_MIR                           (1<<15)
+#define m_WIN3_LUT_EN                          (1<<16)
+#define m_WIN_RID_WIN3                         (0xf<<20)
+
+#define WIN3_VIR0_1                    (0x0108)
+#define v_WIN3_VIR_STRIDE0(x)                  (((x)&0xffff)<<0)
+#define v_WIN3_VIR_STRIDE1(x)                  (((x)&0xffff)<<16)
+#define m_WIN3_VIR_STRIDE0                     (0xffff<<0)
+#define m_WIN3_VIR_STRIDE1                     (0xffff<<16)
+
+#define WIN3_VIR2_3                    (0x010c)
+#define v_WIN3_VIR_STRIDE2(x)                  (((x)&0xffff)<<0)
+#define v_WIN3_VIR_STRIDE3(x)                  (((x)&0xffff)<<16)
+#define m_WIN3_VIR_STRIDE2                     (0xffff<<0)
+#define m_WIN3_VIR_STRIDE3                     (0xffff<<16)
+
+#define WIN3_MST0                      (0x0110)
+#define WIN3_DSP_INFO0                 (0x0114)
+#define v_WIN3_DSP_WIDTH0(x)                   (((x-1)&0xfff)<<0)
+#define v_WIN3_DSP_HEIGHT0(x)                  (((x-1)&0xfff)<<16)
+#define m_WIN3_DSP_WIDTH0                      (0xfff<<0)
+#define m_WIN3_DSP_HEIGHT0                     (0xfff<<16)
+
+#define WIN3_DSP_ST0                   (0x0118)
+#define v_WIN3_DSP_XST0(x)                     (((x)&0x1fff)<<0)
+#define v_WIN3_DSP_YST0(x)                     (((x)&0x1fff)<<16)
+#define m_WIN3_DSP_XST0                                (0x1fff<<0)
+#define m_WIN3_DSP_YST0                                (0x1fff<<16)
+
+#define WIN3_COLOR_KEY                 (0x011c)
+#define v_WIN3_COLOR_KEY(x)                    (((x)&0xffffff)<<0)
+#define v_WIN3_KEY_EN(x)                       (((x)&1)<<24)
+#define m_WIN3_COLOR_KEY                       (0xffffff<<0)
+#define m_WIN3_KEY_EN                          ((u32)1<<24)
+
+#define WIN3_MST1                      (0x0120)
+#define WIN3_DSP_INFO1                 (0x0124)
+#define v_WIN3_DSP_WIDTH1(x)                   (((x-1)&0xfff)<<0)
+#define v_WIN3_DSP_HEIGHT1(x)                  (((x-1)&0xfff)<<16)
+#define m_WIN3_DSP_WIDTH1                      (0xfff<<0)
+#define m_WIN3_DSP_HEIGHT1                     (0xfff<<16)
+
+#define WIN3_DSP_ST1                   (0x0128)
+#define v_WIN3_DSP_XST1(x)                     (((x)&0x1fff)<<0)
+#define v_WIN3_DSP_YST1(x)                     (((x)&0x1fff)<<16)
+#define m_WIN3_DSP_XST1                                (0x1fff<<0)
+#define m_WIN3_DSP_YST1                                (0x1fff<<16)
+
+#define WIN3_SRC_ALPHA_CTRL            (0x012c)
+#define v_WIN3_SRC_ALPHA_EN(x)                 (((x)&1)<<0)
+#define v_WIN3_SRC_COLOR_M0(x)                 (((x)&1)<<1)
+#define v_WIN3_SRC_ALPHA_M0(x)                 (((x)&1)<<2)
+#define v_WIN3_SRC_BLEND_M0(x)                 (((x)&3)<<3)
+#define v_WIN3_SRC_ALPHA_CAL_M0(x)             (((x)&1)<<5)
+#define v_WIN3_SRC_FACTOR_M0(x)                        (((x)&7)<<6)
+#define v_WIN3_SRC_GLOBAL_ALPHA(x)             (((x)&0xff)<<16)
+#define v_WIN3_FADING_VALUE(x)                 (((x)&0xff)<<24)
+
+#define m_WIN3_SRC_ALPHA_EN                    (1<<0)
+#define m_WIN3_SRC_COLOR_M0                    (1<<1)
+#define m_WIN3_SRC_ALPHA_M0                    (1<<2)
+#define m_WIN3_SRC_BLEND_M0                    (3<<3)
+#define m_WIN3_SRC_ALPHA_CAL_M0                        (1<<5)
+#define m_WIN3_SRC_FACTOR_M0                   (7<<6)
+#define m_WIN3_SRC_GLOBAL_ALPHA                        (0xff<<16)
+#define m_WIN3_FADING_VALUE                    (0xff<<24)
+
+#define WIN3_MST2                      (0x0130)
+#define WIN3_DSP_INFO2                 (0x0134)
+#define v_WIN3_DSP_WIDTH2(x)                   (((x-1)&0xfff)<<0)
+#define v_WIN3_DSP_HEIGHT2(x)                  (((x-1)&0xfff)<<16)
+#define m_WIN3_DSP_WIDTH2                      (0xfff<<0)
+#define m_WIN3_DSP_HEIGHT2                     (0xfff<<16)
+
+#define WIN3_DSP_ST2                   (0x0138)
+#define v_WIN3_DSP_XST2(x)                     (((x)&0x1fff)<<0)
+#define v_WIN3_DSP_YST2(x)                     (((x)&0x1fff)<<16)
+#define m_WIN3_DSP_XST2                                (0x1fff<<0)
+#define m_WIN3_DSP_YST2                                (0x1fff<<16)
+
+#define WIN3_DST_ALPHA_CTRL            (0x013c)
+#define v_WIN3_DST_FACTOR_M0(x)                        (((x)&7)<<6)
+#define m_WIN3_DST_FACTOR_M0                   (7<<6)
+
+
+#define WIN3_MST3                      (0x0140)
+#define WIN3_DSP_INFO3                 (0x0144)
+#define v_WIN3_DSP_WIDTH3(x)                   (((x-1)&0xfff)<<0)
+#define v_WIN3_DSP_HEIGHT3(x)                  (((x-1)&0xfff)<<16)
+#define m_WIN3_DSP_WIDTH3              (0xfff<<0)
+#define m_WIN3_DSP_HEIGHT3             (0xfff<<16)
+
+#define WIN3_DSP_ST3                   (0x0148)
+#define v_WIN3_DSP_XST3(x)                     (((x)&0x1fff)<<0)
+#define v_WIN3_DSP_YST3(x)                     (((x)&0x1fff)<<16)
+#define m_WIN3_DSP_XST3                        (0x1fff<<0)
+#define m_WIN3_DSP_YST3                        (0x1fff<<16)
+
+#define WIN3_FADING_CTRL               (0x014c)
+#define v_WIN3_FADING_OFFSET_R(x)              (((x)&0xff)<<0)
+#define v_WIN3_FADING_OFFSET_G(x)              (((x)&0xff)<<8)
+#define v_WIN3_FADING_OFFSET_B(x)              (((x)&0xff)<<16)
+#define v_WIN3_FADING_EN(x)                    (((x)&1)<<24)
+
+#define m_WIN3_FADING_OFFSET_R                 (0xff<<0)
+#define m_WIN3_FADING_OFFSET_G                 (0xff<<8)
+#define m_WIN3_FADING_OFFSET_B                 (0xff<<16)
+#define m_WIN3_FADING_EN                       (1<<24)
+
+
+/*hwc register*/
+#define HWC_CTRL0                      (0x0150)
+#define v_HWC_EN(x)                            (((x)&1)<<0)
+#define v_HWC_DATA_FMT(x)                      (((x)&7)<<1)
+#define v_HWC_MODE(x)                          (((x)&1)<<4)
+#define v_HWC_SIZE(x)                          (((x)&3)<<5)
+#define v_HWC_INTERLACE_READ(x)                        (((x)&1)<<8)
+#define v_HWC_CSC_MODE(x)                      (((x)&1)<<10)
+#define v_HWC_RB_SWAP(x)                       (((x)&1)<<12)
+#define v_HWC_ALPHA_SWAP(x)                    (((x)&1)<<13)
+#define v_HWC_ENDIAN_SWAP(x)                   (((x)&1)<<14)
+
+#define m_HWC_EN                               (1<<0)
+#define m_HWC_DATA_FMT                         (7<<1)
+#define m_HWC_MODE                             (1<<4)
+#define m_HWC_SIZE                             (3<<5)
+#define m_HWC_INTERLACE_READ                   (1<<8)
+#define m_HWC_CSC_MODE                         (1<<10)
+#define m_HWC_RB_SWAP                          (1<<12)
+#define m_HWC_ALPHA_SWAP                       (1<<13)
+#define m_HWC_ENDIAN_SWAP                      (1<<14)
+
+
+#define HWC_CTRL1                      (0x0154)
+#define v_HWC_AXI_GATHER_EN(x)                 (((x)&1)<<0)
+#define v_HWC_AXI_MAX_OUTSTANDING_EN(x)                (((x)&1)<<1)
+#define v_HWC_DMA_BURST_LENGTH(x)              (((x)&0x3)<<2)
+#define v_HWC_AXI_GATHER_NUM(x)                        (((x)&0x7)<<4)
+#define v_HWC_AXI_OUTSTANDING_MAX_NUM(x)       (((x)&0x1f)<<8)
+#define v_HWC_RGB2YUV_EN(x)                    (((x)&1)<<13)
+#define v_HWC_NO_OUTSTANDING(x)                        (((x)&1)<<14)
+#define v_HWC_Y_MIR(x)                         (((x)&1)<<15)
+#define v_HWC_LUT_EN(x)                                (((x)&1)<<16)
+#define v_WIN_RID_HWC(x)                       (((x)&0xf)<<20)
+
+#define m_HWC_AXI_GATHER_EN                    (1<<0)
+#define m_HWC_AXI_MAX_OUTSTANDING_EN           (1<<1)
+#define m_HWC_DMA_BURST_LENGTH                 (0x3<<2)
+#define m_HWC_AXI_GATHER_NUM                   (0x7<<4)
+#define m_HWC_AXI_OUTSTANDING_MAX_NUM          (0x1f<<8)
+#define m_HWC_RGB2YUV_EN                       (1<<13)
+#define m_HWC_NO_OUTSTANDING                   (1<<14)
+#define m_HWC_Y_MIR                            (1<<15)
+#define m_HWC_LUT_EN                           (1<<16)
+#define m_WIN_RID_HWC                          (0xf<<20)
+
+#define HWC_MST                                (0x0158)
+#define HWC_DSP_ST                     (0x015c)
+#define v_HWC_DSP_XST(x)                       (((x)&0x1fff)<<0)
+#define v_HWC_DSP_YST(x)                       (((x)&0x1fff)<<16)
+#define m_HWC_DSP_XST                          (0x1fff<<0)
+#define m_HWC_DSP_YST                          (0x1fff<<16)
+
+#define HWC_SRC_ALPHA_CTRL             (0x0160)
+#define v_HWC_SRC_ALPHA_EN(x)                  (((x)&1)<<0)
+#define v_HWC_SRC_COLOR_M0(x)                  (((x)&1)<<1)
+#define v_HWC_SRC_ALPHA_M0(x)                  (((x)&1)<<2)
+#define v_HWC_SRC_BLEND_M0(x)                  (((x)&3)<<3)
+#define v_HWC_SRC_ALPHA_CAL_M0(x)              (((x)&1)<<5)
+#define v_HWC_SRC_FACTOR_M0(x)                 (((x)&7)<<6)
+#define v_HWC_SRC_GLOBAL_ALPHA(x)              (((x)&0xff)<<16)
+#define v_HWC_FADING_VALUE(x)                  (((x)&0xff)<<24)
+
+#define m_HWC_SRC_ALPHA_EN                     (1<<0)
+#define m_HWC_SRC_COLOR_M0                     (1<<1)
+#define m_HWC_SRC_ALPHA_M0                     (1<<2)
+#define m_HWC_SRC_BLEND_M0                     (3<<3)
+#define m_HWC_SRC_ALPHA_CAL_M0                 (1<<5)
+#define m_HWC_SRC_FACTOR_M0                    (7<<6)
+#define m_HWC_SRC_GLOBAL_ALPHA                 (0xff<<16)
+#define m_HWC_FADING_VALUE                     (0xff<<24)
+
+#define HWC_DST_ALPHA_CTRL             (0x0164)
+#define v_HWC_DST_FACTOR_M0(x)                 (((x)&7)<<6)
+#define m_HWC_DST_FACTOR_M0                    (7<<6)
+
+
+#define HWC_FADING_CTRL                        (0x0168)
+#define v_HWC_FADING_OFFSET_R(x)               (((x)&0xff)<<0)
+#define v_HWC_FADING_OFFSET_G(x)               (((x)&0xff)<<8)
+#define v_HWC_FADING_OFFSET_B(x)               (((x)&0xff)<<16)
+#define v_HWC_FADING_EN(x)                     (((x)&1)<<24)
+
+#define m_HWC_FADING_OFFSET_R                  (0xff<<0)
+#define m_HWC_FADING_OFFSET_G                  (0xff<<8)
+#define m_HWC_FADING_OFFSET_B                  (0xff<<16)
+#define m_HWC_FADING_EN                                (1<<24)
+
+/*post process register*/
+#define POST_DSP_HACT_INFO             (0x0170)
+#define v_DSP_HACT_END_POST(x)                 (((x)&0x1fff)<<0)
+#define v_DSP_HACT_ST_POST(x)                  (((x)&0x1fff)<<16)
+#define m_DSP_HACT_END_POST                    (0x1fff<<0)
+#define m_DSP_HACT_ST_POST                     (0x1fff<<16)
+
+#define POST_DSP_VACT_INFO             (0x0174)
+#define v_DSP_VACT_END_POST(x)                 (((x)&0x1fff)<<0)
+#define v_DSP_VACT_ST_POST(x)                  (((x)&0x1fff)<<16)
+#define m_DSP_VACT_END_POST                    (0x1fff<<0)
+#define m_DSP_VACT_ST_POST                     (0x1fff<<16)
+
+#define POST_SCL_FACTOR_YRGB           (0x0178)
+#define v_POST_HS_FACTOR_YRGB(x)               (((x)&0xffff)<<0)
+#define v_POST_VS_FACTOR_YRGB(x)               (((x)&0xffff)<<16)
+#define m_POST_HS_FACTOR_YRGB                  (0xffff<<0)
+#define m_POST_VS_FACTOR_YRGB                  (0xffff<<16)
+
+#define POST_SCL_CTRL                  (0x0180)
+#define v_POST_HOR_SD_EN(x)                    (((x)&1)<<0)
+#define v_POST_VER_SD_EN(x)                    (((x)&1)<<1)
+
+#define m_POST_HOR_SD_EN                       (0x1<<0)
+#define m_POST_VER_SD_EN                       (0x1<<1)
+
+#define POST_DSP_VACT_INFO_F1          (0x0184)
+#define v_DSP_VACT_END_POST_F1(x)              (((x)&0x1fff)<<0)
+#define v_DSP_VACT_ST_POST_F1(x)               (((x)&0x1fff)<<16)
+
+#define m_DSP_VACT_END_POST_F1                 (0x1fff<<0)
+#define m_DSP_VACT_ST_POST_F1                  (0x1fff<<16)
+
+#define DSP_HTOTAL_HS_END              (0x0188)
+#define v_DSP_HS_PW(x)                         (((x)&0x1fff)<<0)
+#define v_DSP_HTOTAL(x)                                (((x)&0x1fff)<<16)
+#define m_DSP_HS_PW                            (0x1fff<<0)
+#define m_DSP_HTOTAL                           (0x1fff<<16)
+
+#define DSP_HACT_ST_END                        (0x018c)
+#define v_DSP_HACT_END(x)                      (((x)&0x1fff)<<0)
+#define v_DSP_HACT_ST(x)                       (((x)&0x1fff)<<16)
+#define m_DSP_HACT_END                         (0x1fff<<0)
+#define m_DSP_HACT_ST                          (0x1fff<<16)
+
+#define DSP_VTOTAL_VS_END              (0x0190)
+#define v_DSP_VS_PW(x)                         (((x)&0x1fff)<<0)
+#define v_DSP_VTOTAL(x)                                (((x)&0x1fff)<<16)
+#define m_DSP_VS_PW                            (0x1fff<<0)
+#define m_DSP_VTOTAL                           (0x1fff<<16)
+
+#define DSP_VACT_ST_END                        (0x0194)
+#define v_DSP_VACT_END(x)                      (((x)&0x1fff)<<0)
+#define v_DSP_VACT_ST(x)                       (((x)&0x1fff)<<16)
+#define m_DSP_VACT_END                         (0x1fff<<0)
+#define m_DSP_VACT_ST                          (0x1fff<<16)
+
+#define DSP_VS_ST_END_F1               (0x0198)
+#define v_DSP_VS_END_F1(x)                     (((x)&0x1fff)<<0)
+#define v_DSP_VS_ST_F1(x)                      (((x)&0x1fff)<<16)
+#define m_DSP_VS_END_F1                                (0x1fff<<0)
+#define m_DSP_VS_ST_F1                         (0x1fff<<16)
+
+#define DSP_VACT_ST_END_F1             (0x019c)
+#define v_DSP_VACT_END_F1(x)                   (((x)&0x1fff)<<0)
+#define v_DSP_VAC_ST_F1(x)                     (((x)&0x1fff)<<16)
+#define m_DSP_VACT_END_F1                      (0x1fff<<0)
+#define m_DSP_VAC_ST_F1                                (0x1fff<<16)
+
+
+/*pwm register*/
+#define PWM_CTRL                       (0x01a0)
+#define v_PWM_EN(x)                            (((x)&1)<<0)
+#define v_PWM_MODE(x)                          (((x)&3)<<1)
+
+#define v_DUTY_POL(x)                          (((x)&1)<<3)
+#define v_INACTIVE_POL(x)                      (((x)&1)<<4)
+#define v_OUTPUT_MODE(x)                       (((x)&1)<<5)
+#define v_BL_EN(x)                             (((x)&1)<<8)
+#define v_CLK_SEL(x)                           (((x)&1)<<9)
+#define v_PRESCALE(x)                          (((x)&7)<<12)
+#define v_SCALE(x)                             (((x)&0xff)<<16)
+#define v_RPT(x)                               (((x)&0xff)<<24)
+
+#define m_PWM_EN                               (1<<0)
+#define m_PWM_MODE                             (3<<1)
+
+#define m_DUTY_POL                             (1<<3)
+#define m_INACTIVE_POL                         (1<<4)
+#define m_OUTPUT_MODE                          (1<<5)
+#define m_BL_EN                                        (1<<8)
+#define m_CLK_SEL                              (1<<9)
+#define m_PRESCALE                             (7<<12)
+#define m_SCALE                                        (0xff<<16)
+#define m_RPT                                  ((u32)0xff<<24)
+
+#define PWM_PERIOD_HPR                         (0x01a4)
+#define PWM_DUTY_LPR                           (0x01a8)
+#define PWM_CNT                                        (0x01ac)
+
+/*BCSH register*/
+#define BCSH_COLOR_BAR                 (0x01b0)
+#define v_BCSH_EN(x)                           (((x)&1)<<0)
+#define v_BCSH_COLOR_BAR_Y(x)                  (((x)&0xff)<<8)
+#define v_BCSH_COLOR_BAR_U(x)                  (((x)&0xff)<<16)
+#define v_BCSH_COLOR_BAR_V(x)                  (((x)&0xff)<<24)
+#define m_BCSH_EN                              (1<<0)
+#define m_BCSH_COLOR_BAR_Y                     (0xff<<8)
+#define m_BCSH_COLOR_BAR_U                     (0xff<<16)
+#define m_BCSH_COLOR_BAR_V                     (0xff<<24)
+
+#define BCSH_BCS                       (0x01b4)
+#define v_BCSH_BRIGHTNESS(x)                   (((x)&0xff)<<0)
+#define v_BCSH_CONTRAST(x)                     (((x)&0x1ff)<<8)
+#define v_BCSH_SAT_CON(x)                      (((x)&0x3ff)<<20)
+#define v_BCSH_OUT_MODE(x)                     (((x)&0x3)<<30)
+#define m_BCSH_BRIGHTNESS                      (0xff<<0)
+#define m_BCSH_CONTRAST                                (0x1ff<<8)
+#define m_BCSH_SAT_CON                         (0x3ff<<20)
+#define m_BCSH_OUT_MODE                                ((u32)0x3<<30)
+
+#define BCSH_H                         (0x01b8)
+#define v_BCSH_SIN_HUE(x)                      (((x)&0x1ff)<<0)
+#define v_BCSH_COS_HUE(x)                      (((x)&0x1ff)<<16)
+
+#define m_BCSH_SIN_HUE                         (0x1ff<<0)
+#define m_BCSH_COS_HUE                         (0x1ff<<16)
+
+#define BCSH_CTRL                      (0x01bc)
+#define   m_BCSH_Y2R_EN                                (0x1<<0)
+#define   m_BCSH_Y2R_CSC_MODE                  (0x3<<2)
+#define   m_BCSH_R2Y_EN                                (0x1<<4)
+#define   m_BCSH_R2Y_CSC_MODE                  (0x1<<6)
+#define   v_BCSH_Y2R_EN(x)                     (((x)&0x1)<<0)
+#define   v_BCSH_Y2R_CSC_MODE(x)               (((x)&0x3)<<2)
+#define   v_BCSH_R2Y_EN(x)                     (((x)&0x1)<<4)
+#define   v_BCSH_R2Y_CSC_MODE(x)               (((x)&0x1)<<6)
+
+#define CABC_CTRL0                     (0x01c0)
+#define v_CABC_EN(x)                           (((x)&1)<<0)
+#define v_CABC_CALC_PIXEL_NUM(x)               (((x)&0xffffff)<<1)
+#define m_CABC_EN                              (1<<0)
+#define m_CABC_CALC_PIXEL_NUM                  (0xffffff<<1)
+
+
+#define CABC_CTRL1                     (0x01c4)
+#define v_CABC_LUT_EN(x)                       (((x)&1)<<0)
+#define v_CABC_TOTAL_PIXEL_NUM(x)              (((x)&0xffffff)<<1)
+#define m_CABC_LUT_EN                          (1<<0)
+#define m_CABC_TOTAL_PIXEL_NUM                 (0xffffff<<1)
+
+#define CABC_CTRL2                     (0x01c8)
+#define v_CABC_STAGE_UP_REC(x)                 (((x)&0xff)<<0)
+#define m_CABC_STAGE_UP_REC                    (0xff<<0)
+#define v_CABC_STAGE_UP(x)                     (((x)&0x1ff)<<8)
+#define m_CABC_STAGE_UP                                (0x1ff<<8)
+#define v_CABC_GLOBAL_SU_LIMIT_EN(x)           (((x)&0x1)<<23)
+#define m_CABC_GLOBAL_SU_LIMIT_EN              (0x1<<23)
+#define v_CABC_GLOBAL_SU_REC(x)                        (((x)&0xff)<<24)
+#define m_CABC_GLOBAL_SU_REC                   (0xff<<24)
+
+#define CABC_CTRL3                     (0x01cc)
+#define v_CABC_STAGE_DOWN(x)                   (((x)&0xff)<<0)
+#define m_CABC_STAGE_DOWN                      (0xff<<0)
+#define v_CABC_STAGE_DOWN_REC(x)               (((x)&0x1ff)<<8)
+#define m_CABC_STAGE_DOWN_REC                  (0x1ff<<8)
+#define v_CABC_GLOBAL_SU(x)                    (((x)&0x1ff)<<23)
+#define m_CABC_GLOBAL_SU                       (0x1ff<<23)
+#define CABC_GAUSS_LINE0_0             (0x01d0)
+#define v_CABC_T_LINE0_0(x)                    (((x)&0xff)<<0)
+#define v_CABC_T_LINE0_1(x)                    (((x)&0xff)<<8)
+#define v_CABC_T_LINE0_2(x)                    (((x)&0xff)<<16)
+#define v_CABC_T_LINE0_3(x)                    (((x)&0xff)<<24)
+#define m_CABC_T_LINE0_0                       (0xff<<0)
+#define m_CABC_T_LINE0_1                       (0xff<<8)
+#define m_CABC_T_LINE0_2                       (0xff<<16)
+#define m_CABC_T_LINE0_3                       ((u32)0xff<<24)
+
+#define CABC_GAUSS_LINE0_1             (0x01d4)
+#define v_CABC_T_LINE0_4(x)                    (((x)&0xff)<<0)
+#define v_CABC_T_LINE0_5(x)                    (((x)&0xff)<<8)
+#define v_CABC_T_LINE0_6(x)                    (((x)&0xff)<<16)
+#define m_CABC_T_LINE0_4                       (0xff<<0)
+#define m_CABC_T_LINE0_5                       (0xff<<8)
+#define m_CABC_T_LINE0_6                       (0xff<<16)
+
+
+#define CABC_GAUSS_LINE1_0             (0x01d8)
+#define v_CABC_T_LINE1_0(x)                    (((x)&0xff)<<0)
+#define v_CABC_T_LINE1_1(x)                    (((x)&0xff)<<8)
+#define v_CABC_T_LINE1_2(x)                    (((x)&0xff)<<16)
+#define v_CABC_T_LINE1_3(x)                    (((x)&0xff)<<24)
+#define m_CABC_T_LINE1_0                       (0xff<<0)
+#define m_CABC_T_LINE1_1                       (0xff<<8)
+#define m_CABC_T_LINE1_2                       (0xff<<16)
+#define m_CABC_T_LINE1_3                       ((u32)0xff<<24)
+
+
+#define CABC_GAUSS_LINE1_1             (0x01dc)
+#define v_CABC_T_LINE1_4(x)                    (((x)&0xff)<<0)
+#define v_CABC_T_LINE1_5(x)                    (((x)&0xff)<<8)
+#define v_CABC_T_LINE1_6(x)                    (((x)&0xff)<<16)
+#define m_CABC_T_LINE1_4                       (0xff<<0)
+#define m_CABC_T_LINE1_5                       (0xff<<8)
+#define m_CABC_T_LINE1_6                       (0xff<<16)
+
+
+#define CABC_GAUSS_LINE2_0             (0x01e0)
+#define v_CABC_T_LINE2_0(x)                    (((x)&0xff)<<0)
+#define v_CABC_T_LINE2_1(x)                    (((x)&0xff)<<8)
+#define v_CABC_T_LINE2_2(x)                    (((x)&0xff)<<16)
+#define v_CABC_T_LINE2_3(x)                    (((x)&0xff)<<24)
+#define m_CABC_T_LINE2_0                       (0xff<<0)
+#define m_CABC_T_LINE2_1                       (0xff<<8)
+#define m_CABC_T_LINE2_2                       (0xff<<16)
+#define m_CABC_T_LINE2_3                       ((u32)0xff<<24)
+
+
+#define CABC_GAUSS_LINE2_1             (0x01e4)
+#define v_CABC_T_LINE2_4(x)                    (((x)&0xff)<<0)
+#define v_CABC_T_LINE2_5(x)                    (((x)&0xff)<<8)
+#define v_CABC_T_LINE2_6(x)                    (((x)&0xff)<<16)
+#define m_CABC_T_LINE2_4                       (0xff<<0)
+#define m_CABC_T_LINE2_5                       (0xff<<8)
+#define m_CABC_T_LINE2_6                       (0xff<<16)
+
+/*FRC register*/
+#define FRC_LOWER01_0                  (0x01e8)
+#define v_FRC_LOWER01_FRM0(x)                  (((x)&0xffff)<<0)
+#define v_FRC_LOWER01_FRM1(x)                  (((x)&0xffff)<<16)
+#define m_FRC_LOWER01_FRM0                     (0xffff<<0)
+#define m_FRC_LOWER01_FRM1                     ((u32)0xffff<<16)
+
+#define FRC_LOWER01_1                  (0x01ec)
+#define v_FRC_LOWER01_FRM2(x)                  (((x)&0xffff)<<0)
+#define v_FRC_LOWER01_FRM3(x)                  (((x)&0xffff)<<16)
+#define m_FRC_LOWER01_FRM2                     (0xffff<<0)
+#define m_FRC_LOWER01_FRM3                     ((u32)0xffff<<16)
+
+
+#define FRC_LOWER10_0                  (0x01f0)
+#define v_FRC_LOWER10_FRM0(x)                  (((x)&0xffff)<<0)
+#define v_FRC_LOWER10_FRM1(x)                  (((x)&0xffff)<<16)
+#define m_FRC_LOWER10_FRM0                     (0xffff<<0)
+#define m_FRC_LOWER10_FRM1                     ((u32)0xffff<<16)
+
+
+#define FRC_LOWER10_1                  (0x01f4)
+#define v_FRC_LOWER10_FRM2(x)                  (((x)&0xffff)<<0)
+#define v_FRC_LOWER10_FRM3(x)                  (((x)&0xffff)<<16)
+#define m_FRC_LOWER10_FRM2                     (0xffff<<0)
+#define m_FRC_LOWER10_FRM3                     ((u32)0xffff<<16)
+
+
+#define FRC_LOWER11_0                  (0x01f8)
+#define v_FRC_LOWER11_FRM0(x)                  (((x)&0xffff)<<0)
+#define v_FRC_LOWER11_FRM1(x)                  (((x)&0xffff)<<16)
+#define m_FRC_LOWER11_FRM0                     (0xffff<<0)
+#define m_FRC_LOWER11_FRM1                     ((u32)0xffff<<16)
+
+
+#define FRC_LOWER11_1                  (0x01fc)
+#define v_FRC_LOWER11_FRM2(x)                  (((x)&0xffff)<<0)
+#define v_FRC_LOWER11_FRM3(x)                  (((x)&0xffff)<<16)
+#define m_FRC_LOWER11_FRM2                     (0xffff<<0)
+#define m_FRC_LOWER11_FRM3                     ((u32)0xffff<<16)
+
+#define IFBDC_CTRL                     (0x0200)
+#define v_IFBDC_CTRL_FBDC_EN(x)                (((x)&0x1)<<0)
+#define v_IFBDC_CTRL_FBDC_COR_EN(x)            (((x)&0x1)<<1)
+#define v_IFBDC_CTRL_FBDC_WIN_SEL(x)           (((x)&0x3)<<2)
+#define v_IFBDC_CTRL_FBDC_ROTATION_MODE(x)     (((x)&0x7)<<4)
+#define v_IFBDC_CTRL_FBDC_FMT(x)               (((x)&0x7f)<<7)
+#define v_IFBDC_AXI_MAX_OUTSTANDING_EN(x)      (((x)&0x1)<<14)
+#define v_IFBDC_AXI_OUTSTANDING_MAX_NUM(x)     (((x)&0x1f)<<15)
+#define v_IFBDC_CTRL_WIDTH_RATIO(x)            (((x)&0x1)<<20)
+#define v_IFBDC_FRAME_RST_EN(x)                        (((x)&0x1)<<21)
+#define v_IFBDC_ICTRL_NOTIFY(x)                        (((x)&0x1)<<22)
+#define v_IFBDC_INVALIDATE_PENDING_O(x)                (((x)&0x1)<<23)
+#define v_IFBDC_RID(x)                         (((x)&0xf)<<24)
+#define v_IFBDC_RSTN(x)                                (((x)&0x1)<<28)
+
+#define m_IFBDC_CTRL_FBDC_EN                   (0x1<<0)
+#define m_IFBDC_CTRL_FBDC_COR_EN               (0x1<<1)
+#define m_IFBDC_CTRL_FBDC_WIN_SEL              (0x3<<2)
+#define m_IFBDC_CTRL_FBDC_ROTATION_MODE                (0x7<<4)
+#define m_IFBDC_CTRL_FBDC_FMT                  (0x7f<<7)
+#define m_IFBDC_AXI_MAX_OUTSTANDING_EN         (0x1<<14)
+#define m_IFBDC_AXI_OUTSTANDING_MAX_NUM                (0x1f<<15)
+#define m_IFBDC_CTRL_WIDTH_RATIO               (0x1<<20)
+#define m_IFBDC_FRAME_RST_EN                   (0x1<<21)
+#define m_IFBDC_ICTRL_NOTIFY                   (0x1<<22)
+#define m_IFBDC_INVALIDATE_PENDING_O           (0x1<<23)
+#define m_IFBDC_RID                            (0xf<<24)
+#define m_IFBDC_RSTN                           (0x1<<28)
+
+#define IFBDC_TILES_NUM                        (0x0204)
+#define v_IFBDC_TILES_NUM(x)                   (((x-1)&0x7fffff)<<0)
+#define m_IFBDC_TILES_NUM                      (0x7fffff<<0)
+
+#define IFBDC_FRAME_RST_CYCLE          (0x0208)
+#define v_IFBDC_FRAME_RST_CYCLE(x)             (((x)&0x3ff)<<0)
+#define v_DMA_IFBDC_FRAME_RST_CYCLE(x)         (((x)&0x3ff)<<16)
+#define m_IFBDC_FRAME_RST_CYCLE                        ((0x3ff)<<0)
+#define m_DMA_IFBDC_FRAME_RST_CYCLE            ((0x3ff)<<16)
+
+
+
+#define IFBDC_BASE_ADDR                        (0x20c)
+#define v_IFBDC_BASE_ADDR(x)                   (((x)&0xffffffff)<<0)
+#define m_IFBDC_BASE_ADDR                      ((0xffffffff)<<0)
+
+#define IFBDC_MB_SIZE                  (0x210)
+#define  v_IFBDC_MB_SIZE_WIDTH(x)              (((x-1)&0x7f)<<0)
+#define  v_IFBDC_MB_SIZE_HEIGHT(x)             (((x-1)&0x1ff)<<16)
+#define  m_IFBDC_MB_SIZE_WIDTH                 ((0x7f)<<0)
+#define  m_IFBDC_MB_SIZE_HEIGHT                        ((0x1ff)<<16)
+
+
+#define IFBDC_CMP_INDEX_INIT           (0x0214)
+#define v_IFBDC_CMP_INDEX_INIT(x)               (((x)&0x7fffff) << 0)
+#define m_IFBDC_CMP_INDEX_INIT                  (0x7fffff<<0)
+
+#define IFBDC_MB_VIR_WIDTH             (0x220)
+#define  v_IFBDC_MB_VIR_WIDTH(x)               (((x)&0xff)<<0)
+#define  m_IFBDC_MB_VIR_WIDTH                  ((0xff)<<0)
+
+#define IFBDC_DEBUG0                   (0x230)
+#define v_DBG_IFBDC_MB_Y_WCNT(x)               (((x)&0x1ff)<<0)
+#define v_DBG_IFBDC_IDLE(x)                    (((x)&0x1)<<12)
+#define v_DBG_IFBDC_LB_RCNT(x)                 (((x)&0x7FF)<<16)
+#define v_DBG_IFBDC_INVALIDATE_PENDING_I(x)    (((x)&0x1)<<28)
+
+#define m_DBG_IFBDC_MB_Y_WCNT                  (0x1ff<<0)
+#define m_DBG_IFBDC_IDLE                       (0x1<<12)
+#define m_DBG_IFBDC_LB_RCNT                    (0x7FF<<16)
+#define m_DBG_IFBDC_INVALIDATE_PENDING_I       (0x1<<28)
+
+#define IFBDC_DEBUG1                   (0x234)
+#define V_DBG_FBDC_CMP_TILE_INDEX(x)           (((x)&0x7fffff)<<0)
+#define m_DBG_FBDC_CMP_TILE_INDEX              (0x7fffff<<0)
+
+#define LATENCY_CTRL0                  (0x250)
+#define  v_RD_LATENCY_EN(x)                    (((x)&0x1)<<0)
+#define  v_HAND_LATENCY_CLR(x)                 (((x)&0x1)<<1)
+#define  v_RD_LATENCY_MODE(x)                  (((x)&0x1)<<2)
+#define  v_RD_LATENCY_ID0(x)                   (((x)&0xf)<<4)
+#define  v_RD_LATENCY_THR(x)                   (((x)&0xfff)<<8)
+#define  v_RD_LATENCY_ST_NUM(x)                        (((x)&0x1f)<<20)
+#define  m_RD_LATENCY_EN                       (0x1<<0)
+#define  m_HAND_LATENCY_CLR                    (0x1<<1)
+#define  m_RD_LATENCY_MODE                     (0x1<<2)
+#define  m_RD_LATENCY_ID0                      (0xf<<4)
+#define  m_RD_LATENCY_THR                      (0xfff<<8)
+#define  m_RD_LATENCY_ST_NUM                   (0x1f<<20)
+
+#define RD_MAX_LATENCY_NUM0            (0x254)
+#define v_RD_MAX_LATENCY_NUM_CH0(x)            (((x)&0xFFF)<<0)
+#define v_RD_LATENCY_OVERFLOW_CH0(x)           (((x)&0x1)<<16)
+#define m_RD_MAX_LATENCY_NUM_CH0               (0xFFF<<0)
+#define m_RD_LATENCY_OVERFLOW_CH0              (0x1<<16)
+
+#define RD_LATENCY_THR_NUM0            (0x258)
+#define v_RD_LATENCY_THR_NUM_CH0(x)            (((x)&0xFFFFFF)<<0)
+#define m_RD_LATENCY_THR_NUM_CH0               (0xFFFFFF<<0)
+
+#define RD_LATENCY_SWAP_NUM0           (0x25c)
+#define v_RD_LATENCY_SAMP_NUM_CH0(x)           (((x)&0xFFFFFF)<<0)
+#define m_RD_LATENCY_SAMP_NUM_CH0              (0xFFFFFF<<0)
+
+#define WIN0_DSP_BG                    (0x260)
+#define v_WIN0_DSP_BG_BLUE(x)                  (((x)&0xff)<<0)
+#define v_WIN0_DSP_BG_GREEN(x)                 (((x)&0xff)<<8)
+#define v_WIN0_DSP_BG_RED(x)                   (((x)&0xff)<<16)
+#define v_WIN0_DSP_BG_EN(x)                    (((x)&1)<<31)
+#define m_WIN0_DSP_BG_BLUE                     (0xff<<0)
+#define m_WIN0_DSP_BG_GREEN                    (0xff<<8)
+#define m_WIN0_DSP_BG_RED                      (0xff<<16)
+#define m_WIN0_DSP_BG_EN                       (0x1<<31)
+
+#define WIN1_DSP_BG                    (0x264)
+#define v_WIN1_DSP_BG_BLUE(x)                  (((x)&0xff)<<0)
+#define v_WIN1_DSP_BG_GREEN(x)                 (((x)&0xff)<<8)
+#define v_WIN1_DSP_BG_RED(x)                   (((x)&0xff)<<16)
+#define v_WIN1_DSP_BG_EN(x)                    (((x)&1)<<31)
+#define m_WIN1_DSP_BG_BLUE                     (0xff<<0)
+#define m_WIN1_DSP_BG_GREEN                    (0xff<<8)
+#define m_WIN1_DSP_BG_RED                      (0xff<<16)
+#define m_WIN1_DSP_BG_EN                       (0x1<<31)
+
+#define WIN2_DSP_BG                    (0x268)
+#define v_WIN2_DSP_BG_BLUE(x)                  (((x)&0xff)<<0)
+#define v_WIN2_DSP_BG_GREEN(x)                 (((x)&0xff)<<8)
+#define v_WIN2_DSP_BG_RED(x)                   (((x)&0xff)<<16)
+#define v_WIN2_DSP_BG_EN(x)                    (((x)&1)<<31)
+#define m_WIN2_DSP_BG_BLUE                     (0xff<<0)
+#define m_WIN2_DSP_BG_GREEN                    (0xff<<8)
+#define m_WIN2_DSP_BG_RED                      (0xff<<16)
+#define m_WIN2_DSP_BG_EN                       (0x1<<31)
+
+#define WIN3_DSP_BG                    (0x26c)
+#define v_WIN3_DSP_BG_BLUE(x)                  (((x)&0xff)<<0)
+#define v_WIN3_DSP_BG_GREEN(x)                 (((x)&0xff)<<8)
+#define v_WIN3_DSP_BG_RED(x)                   (((x)&0xff)<<16)
+#define v_WIN3_DSP_BG_EN(x)                    (((x)&1)<<31)
+#define m_WIN3_DSP_BG_BLUE                     (0xff<<0)
+#define m_WIN3_DSP_BG_GREEN                    (0xff<<8)
+#define m_WIN3_DSP_BG_RED                      (0xff<<16)
+#define m_WIN3_DSP_BG_EN                       (0x1<<31)
+
+#define SCAN_LINE_NUM                  (0x270)
+#define CABC_DEBUG0                    (0x274)
+#define CABC_DEBUG1                    (0x278)
+#define CABC_DEBUG2                    (0x27c)
+#define DBG_REG_000                    (0x280)
+#define DBG_REG_001                    (0x284)
+#define DBG_REG_002                    (0x288)
+#define DBG_REG_003                    (0x28c)
+#define DBG_REG_004                    (0x290)
+#define DBG_REG_005                    (0x294)
+#define DBG_REG_006                    (0x298)
+#define DBG_REG_007                    (0x29c)
+#define DBG_REG_008                    (0x2a0)
+#define DBG_REG_016                    (0x2c0)
+#define DBG_REG_017                    (0x2c4)
+#define DBG_REG_018                    (0x2c8)
+#define DBG_REG_019                    (0x2cc)
+#define DBG_REG_020                    (0x2d0)
+#define DBG_REG_021                    (0x2d4)
+#define DBG_REG_022                    (0x2d8)
+#define DBG_REG_023                    (0x2dc)
+#define DBG_REG_028                    (0x2f0)
+
+#define MMU_DTE_ADDR                   (0x0300)
+#define v_MMU_DTE_ADDR(x)                      (((x)&0xffffffff)<<0)
+#define m_MMU_DTE_ADDR                         (0xffffffff<<0)
+
+#define MMU_STATUS                     (0x0304)
+#define v_PAGING_ENABLED(x)                    (((x)&1)<<0)
+#define v_PAGE_FAULT_ACTIVE(x)                 (((x)&1)<<1)
+#define v_STAIL_ACTIVE(x)                      (((x)&1)<<2)
+#define v_MMU_IDLE(x)                          (((x)&1)<<3)
+#define v_REPLAY_BUFFER_EMPTY(x)               (((x)&1)<<4)
+#define v_PAGE_FAULT_IS_WRITE(x)               (((x)&1)<<5)
+#define v_PAGE_FAULT_BUS_ID(x)                 (((x)&0x1f)<<6)
+#define m_PAGING_ENABLED                       (1<<0)
+#define m_PAGE_FAULT_ACTIVE                    (1<<1)
+#define m_STAIL_ACTIVE                         (1<<2)
+#define m_MMU_IDLE                             (1<<3)
+#define m_REPLAY_BUFFER_EMPTY                  (1<<4)
+#define m_PAGE_FAULT_IS_WRITE                  (1<<5)
+#define m_PAGE_FAULT_BUS_ID                    (0x1f<<6)
+
+#define MMU_COMMAND                    (0x0308)
+#define v_MMU_CMD(x)                           (((x)&0x3)<<0)
+#define m_MMU_CMD                      (0x3<<0)
+
+#define MMU_PAGE_FAULT_ADDR            (0x030c)
+#define v_PAGE_FAULT_ADDR(x)                   (((x)&0xffffffff)<<0)
+#define m_PAGE_FAULT_ADDR                      (0xffffffff<<0)
+
+#define MMU_ZAP_ONE_LINE               (0x0310)
+#define v_MMU_ZAP_ONE_LINE(x)                  (((x)&0xffffffff)<<0)
+#define m_MMU_ZAP_ONE_LINE                     (0xffffffff<<0)
+
+#define MMU_INT_RAWSTAT                        (0x0314)
+#define v_PAGE_FAULT_RAWSTAT(x)                        (((x)&1)<<0)
+#define v_READ_BUS_ERROR_RAWSTAT(x)            (((x)&1)<<1)
+#define m_PAGE_FAULT_RAWSTAT                   (1<<0)
+#define m_READ_BUS_ERROR_RAWSTAT               (1<<1)
+
+#define MMU_INT_CLEAR                  (0x0318)
+#define v_PAGE_FAULT_CLEAR(x)                  (((x)&1)<<0)
+#define v_READ_BUS_ERROR_CLEAR(x)              (((x)&1)<<1)
+#define m_PAGE_FAULT_CLEAR                     (1<<0)
+#define m_READ_BUS_ERROR_CLEAR                 (1<<1)
+
+#define MMU_INT_MASK                   (0x031c)
+#define v_PAGE_FAULT_MASK(x)                   (((x)&1)<<0)
+#define v_READ_BUS_ERROR_MASK(x)               (((x)&1)<<1)
+#define m_PAGE_FAULT_MASK                      (1<<0)
+#define m_READ_BUS_ERROR_MASK                  (1<<1)
+
+#define MMU_INT_STATUS                 (0x0320)
+#define v_PAGE_FAULT_STATUS(x)                 (((x)&1)<<0)
+#define v_READ_BUS_ERROR_STATUS(x)             (((x)&1)<<1)
+#define m_PAGE_FAULT_STATUS                    (1<<0)
+#define m_READ_BUS_ERROR_STATUS                        (1<<1)
+
+#define MMU_AUTO_GATING                        (0x0324)
+#define v_MMU_AUTO_GATING(x)                   (((x)&1)<<0)
+#define m_MMU_AUTO_GATING                      (1<<0)
+
+#define WIN2_LUT_ADDR                  (0x0400)
+#define WIN3_LUT_ADDR                  (0x0800)
+#define HWC_LUT_ADDR                   (0x0c00)
+#define GAMMA_LUT_ADDR                 (0x1000)
+#define CABC_GAMMA_LUT_ADDR            (0x1800)
+#define MCU_BYPASS_WPORT               (0x2200)
+#define MCU_BYPASS_RPORT               (0x2300)
+
+
+
+enum lb_mode {
+       LB_YUV_3840X5 = 0x0,
+       LB_YUV_2560X8 = 0x1,
+       LB_RGB_3840X2 = 0x2,
+       LB_RGB_2560X4 = 0x3,
+       LB_RGB_1920X5 = 0x4,
+       LB_RGB_1280X8 = 0x5
+};
+
+enum sacle_up_mode {
+       SCALE_UP_BIL = 0x0,
+       SCALE_UP_BIC = 0x1
+};
+
+enum scale_down_mode {
+       SCALE_DOWN_BIL = 0x0,
+       SCALE_DOWN_AVG = 0x1
+};
+
+/*ALPHA BLENDING MODE*/
+enum alpha_mode {               /*  Fs       Fd */
+       AB_USER_DEFINE     = 0x0,
+       AB_CLEAR           = 0x1,/*  0          0*/
+       AB_SRC             = 0x2,/*  1          0*/
+       AB_DST             = 0x3,/*  0          1  */
+       AB_SRC_OVER        = 0x4,/*  1              1-As''*/
+       AB_DST_OVER        = 0x5,/*  1-Ad''   1*/
+       AB_SRC_IN          = 0x6,
+       AB_DST_IN          = 0x7,
+       AB_SRC_OUT         = 0x8,
+       AB_DST_OUT         = 0x9,
+       AB_SRC_ATOP        = 0xa,
+       AB_DST_ATOP        = 0xb,
+       XOR                = 0xc,
+       AB_SRC_OVER_GLOBAL = 0xd
+}; /*alpha_blending_mode*/
+
+enum src_alpha_mode {
+       AA_STRAIGHT        = 0x0,
+       AA_INVERSE         = 0x1
+};/*src_alpha_mode*/
+
+enum global_alpha_mode {
+       AA_GLOBAL         = 0x0,
+       AA_PER_PIX        = 0x1,
+       AA_PER_PIX_GLOBAL = 0x2
+};/*src_global_alpha_mode*/
+
+enum src_alpha_sel {
+       AA_SAT          = 0x0,
+       AA_NO_SAT       = 0x1
+};/*src_alpha_sel*/
+
+enum src_color_mode {
+       AA_SRC_PRE_MUL         = 0x0,
+       AA_SRC_NO_PRE_MUL      = 0x1
+};/*src_color_mode*/
+
+enum factor_mode {
+       AA_ZERO                 = 0x0,
+       AA_ONE                  = 0x1,
+       AA_SRC                  = 0x2,
+       AA_SRC_INVERSE          = 0x3,
+       AA_SRC_GLOBAL           = 0x4
+};/*src_factor_mode  &&  dst_factor_mode*/
+
+enum _vop_r2y_csc_mode {
+       VOP_R2Y_CSC_BT601 = 0,
+       VOP_R2Y_CSC_BT709
+};
+
+enum _vop_y2r_csc_mode {
+       VOP_Y2R_CSC_MPEG = 0,
+       VOP_Y2R_CSC_JPEG,
+       VOP_Y2R_CSC_HD,
+       VOP_Y2R_CSC_BYPASS
+};
+enum _vop_format {
+       VOP_FORMAT_ARGB888 = 0,
+       VOP_FORMAT_RGB888,
+       VOP_FORMAT_RGB565,
+       VOP_FORMAT_YCBCR420 = 4,
+       VOP_FORMAT_YCBCR422,
+       VOP_FORMAT_YCBCR444
+};
+
+enum _vop_overlay_mode {
+       VOP_RGB_DOMAIN,
+       VOP_YUV_DOMAIN
+};
+
+struct lcdc_device {
+       int id;
+       struct rk_lcdc_driver driver;
+       struct device *dev;
+       struct rk_screen *screen;
+
+       void __iomem *regs;
+       void *regsbak;          /*back up reg*/
+       u32 reg_phy_base;       /* physical basic address of lcdc register*/
+       u32 len;                /* physical map length of lcdc register*/
+       /*one time only one process allowed to config the register*/
+       spinlock_t reg_lock;
+
+       int __iomem *dsp_lut_addr_base;
+
+
+       int prop;               /*used for primary or extended display device*/
+       bool pre_init;
+       bool pwr18;             /*if lcdc use 1.8v power supply*/
+       /*if aclk or hclk is closed ,acess to register is not allowed*/
+       bool clk_on;
+       /*active layer counter,when  atv_layer_cnt = 0,disable lcdc*/
+       u8 atv_layer_cnt;
+
+
+       unsigned int            irq;
+
+       struct clk              *pd;            /*lcdc power domain*/
+       struct clk              *hclk;          /*lcdc AHP clk*/
+       struct clk              *dclk;          /*lcdc dclk*/
+       struct clk              *aclk;          /*lcdc share memory frequency*/
+       u32 pixclock;
+
+       u32 standby;                            /*1:standby,0:wrok*/
+       u32 iommu_status;
+       struct backlight_device *backlight;
+       struct clk              *pll_sclk;
+};
+
+struct alpha_config {
+       enum src_alpha_mode src_alpha_mode;       /*win0_src_alpha_m0*/
+       u32 src_global_alpha_val; /*win0_src_global_alpha*/
+       enum global_alpha_mode src_global_alpha_mode;/*win0_src_blend_m0*/
+       enum src_alpha_sel src_alpha_cal_m0;     /*win0_src_alpha_cal_m0*/
+       enum src_color_mode src_color_mode;      /*win0_src_color_m0*/
+       enum factor_mode src_factor_mode;        /*win0_src_factor_m0*/
+       enum factor_mode dst_factor_mode;      /*win0_dst_factor_m0*/
+};
+
+struct lcdc_cabc_mode {
+       u32 pixel_num;                  /* pixel precent number */
+       u16 stage_up;                   /* up stride */
+       u16 stage_down;         /* down stride */
+       u16 global_su;
+};
+
+static inline void lcdc_writel(struct lcdc_device *lcdc_dev, u32 offset, u32 v)
+{
+       u32 *_pv = (u32 *)lcdc_dev->regsbak;
+
+       _pv += (offset >> 2);
+       *_pv = v;
+       writel_relaxed(v, lcdc_dev->regs + offset);
+}
+
+static inline u32 lcdc_readl(struct lcdc_device *lcdc_dev, u32 offset)
+{
+       u32 v;
+       u32 *_pv = (u32 *)lcdc_dev->regsbak;
+
+       _pv += (offset >> 2);
+       v = readl_relaxed(lcdc_dev->regs + offset);
+       *_pv = v;
+       return v;
+}
+
+static inline u32 lcdc_read_bit(struct lcdc_device *lcdc_dev,
+                               u32 offset, u32 msk)
+{
+       u32 v;
+       u32 *_pv = (u32 *)lcdc_dev->regsbak;
+       u32 _v = readl_relaxed(lcdc_dev->regs + offset);
+
+       _pv += (offset >> 2);
+       *_pv = _v;
+       _v &= msk;
+       v = (_v ? 1 : 0);
+       return v;
+}
+
+static inline void  lcdc_set_bit(struct lcdc_device *lcdc_dev,
+                                u32 offset, u32 msk)
+{
+       u32 *_pv = (u32 *)lcdc_dev->regsbak;
+
+       _pv += (offset >> 2);
+       (*_pv) |= msk;
+       writel_relaxed(*_pv, lcdc_dev->regs + offset);
+}
+
+static inline void lcdc_clr_bit(struct lcdc_device *lcdc_dev,
+                               u32 offset, u32 msk)
+{
+       u32 *_pv = (u32 *)lcdc_dev->regsbak;
+
+       _pv += (offset >> 2);
+       (*_pv) &= (~msk);
+       writel_relaxed(*_pv, lcdc_dev->regs + offset);
+}
+
+static inline void  lcdc_msk_reg(struct lcdc_device *lcdc_dev,
+                                u32 offset, u32 msk, u32 v)
+{
+       u32 *_pv = (u32 *)lcdc_dev->regsbak;
+
+       _pv += (offset >> 2);
+       (*_pv) &= (~msk);
+       (*_pv) |= v;
+       writel_relaxed(*_pv, lcdc_dev->regs + offset);
+}
+
+static inline void lcdc_cfg_done(struct lcdc_device *lcdc_dev)
+{
+       writel_relaxed(0x01, lcdc_dev->regs + REG_CFG_DONE);
+       dsb(sy);
+}
+
+#define CUBIC_PRECISE  0
+#define CUBIC_SPLINE   1
+#define CUBIC_CATROM   2
+#define CUBIC_MITCHELL 3
+
+#define CUBIC_MODE_SELETION      CUBIC_PRECISE
+
+/*************************************************************/
+#define SCALE_FACTOR_BILI_DN_FIXPOINT_SHIFT   12   /* 4.12*/
+#define SCALE_FACTOR_BILI_DN_FIXPOINT(x)      \
+       ((INT32)((x) * (1 << SCALE_FACTOR_BILI_DN_FIXPOINT_SHIFT)))
+
+#define SCALE_FACTOR_BILI_UP_FIXPOINT_SHIFT   16   /* 0.16*/
+
+#define SCALE_FACTOR_AVRG_FIXPOINT_SHIFT   16   /*0.16*/
+#define SCALE_FACTOR_AVRG_FIXPOINT(x)      \
+       ((INT32)((x) * (1 << SCALE_FACTOR_AVRG_FIXPOINT_SHIFT)))
+
+#define SCALE_FACTOR_BIC_FIXPOINT_SHIFT    16   /* 0.16*/
+#define SCALE_FACTOR_BIC_FIXPOINT(x)       \
+       ((INT32)((x) * (1 << SCALE_FACTOR_BIC_FIXPOINT_SHIFT)))
+
+#define SCALE_FACTOR_DEFAULT_FIXPOINT_SHIFT    12  /*NONE SCALE,vsd_bil*/
+#define SCALE_FACTOR_VSDBIL_FIXPOINT_SHIFT     12  /*VER SCALE DOWN BIL*/
+
+/*********************************************************/
+
+/*#define GET_SCALE_FACTOR_BILI(src, dst)  \
+       ((((src) - 1) << SCALE_FACTOR_BILI_FIXPOINT_SHIFT) / ((dst) - 1))*/
+/*#define GET_SCALE_FACTOR_BIC(src, dst)   \
+       ((((src) - 1) << SCALE_FACTOR_BIC_FIXPOINT_SHIFT) / ((dst) - 1))*/
+/*modified by hpz*/
+#define GET_SCALE_FACTOR_BILI_DN(src, dst)  \
+       ((((src) * 2 - 3) << (SCALE_FACTOR_BILI_DN_FIXPOINT_SHIFT - 1)) \
+       / ((dst) - 1))
+#define GET_SCALE_FACTOR_BILI_UP(src, dst)  \
+       ((((src) * 2 - 3) << (SCALE_FACTOR_BILI_UP_FIXPOINT_SHIFT - 1)) \
+       / ((dst) - 1))
+#define GET_SCALE_FACTOR_BIC(src, dst)      \
+       ((((src) * 2 - 3) << (SCALE_FACTOR_BIC_FIXPOINT_SHIFT - 1)) \
+       / ((dst) - 1))
+
+/*********************************************************/
+/*NOTE: hardware in order to save resource , srch first to get interlace line
+(srch+vscalednmult-1)/vscalednmult; and do scale*/
+#define GET_SCALE_DN_ACT_HEIGHT(srch, vscalednmult) \
+       (((srch) + (vscalednmult) - 1) / (vscalednmult))
+
+/*#define VSKIP_MORE_PRECISE*/
+
+#ifdef VSKIP_MORE_PRECISE
+#define MIN_SCALE_FACTOR_AFTER_VSKIP        1.5f
+#define GET_SCALE_FACTOR_BILI_DN_VSKIP(srch, dsth, vscalednmult) \
+       (GET_SCALE_FACTOR_BILI_DN(GET_SCALE_DN_ACT_HEIGHT((srch),\
+       (vscalednmult)), (dsth)))
+#else
+#define MIN_SCALE_FACTOR_AFTER_VSKIP        1
+#if 0/*rk3288*/
+#define GET_SCALE_FACTOR_BILI_DN_VSKIP(srch, dsth, vscalednmult) \
+       ((GET_SCALE_DN_ACT_HEIGHT((srch), (vscalednmult)) == (dsth))\
+               ? (GET_SCALE_FACTOR_BILI_DN((srch),\
+               (dsth))/(vscalednmult))\
+               : GET_SCALE_FACTOR_BILI_DN(GET_SCALE_DN_ACT_HEIGHT((srch),\
+               (vscalednmult)), (dsth)))
+#else/*rk3368*/
+#define GET_SCALE_FACTOR_BILI_DN_VSKIP(srch, dsth, vscalednmult) \
+       ((GET_SCALE_DN_ACT_HEIGHT((srch) , (vscalednmult)) == (dsth)) \
+       ? (GET_SCALE_FACTOR_BILI_DN((srch) , (dsth)) / (vscalednmult)) \
+       : (GET_SCALE_DN_ACT_HEIGHT((srch) , (vscalednmult)) == ((dsth) * 2)) \
+       ?  GET_SCALE_FACTOR_BILI_DN(GET_SCALE_DN_ACT_HEIGHT(((srch) - 1),\
+       (vscalednmult)) , (dsth)) : \
+       GET_SCALE_FACTOR_BILI_DN(GET_SCALE_DN_ACT_HEIGHT((srch),\
+       (vscalednmult)) , (dsth)))
+#endif
+
+
+#endif
+/*****************************************************************/
+
+
+/*scalefactor must >= dst/src, or pixels at end of line may be unused*/
+/*scalefactor must < dst/(src-1), or dst buffer may overflow*/
+/*avrg old code: ((((dst) << SCALE_FACTOR_AVRG_FIXPOINT_SHIFT))\
+       /((src) - 1)) hxx_chgsrc*/
+/*modified by hpz:*/
+#define GET_SCALE_FACTOR_AVRG(src, dst)  ((((dst) << \
+       (SCALE_FACTOR_AVRG_FIXPOINT_SHIFT + 1))) / (2 * (src) - 1))
+
+/*************************************************************************/
+/*Scale Coordinate Accumulate, x.16*/
+#define SCALE_COOR_ACC_FIXPOINT_SHIFT     16
+#define SCALE_COOR_ACC_FIXPOINT_ONE (1 << SCALE_COOR_ACC_FIXPOINT_SHIFT)
+#define SCALE_COOR_ACC_FIXPOINT(x) \
+       ((INT32)((x)*(1 << SCALE_COOR_ACC_FIXPOINT_SHIFT)))
+#define SCALE_COOR_ACC_FIXPOINT_REVERT(x) \
+       ((((x) >> (SCALE_COOR_ACC_FIXPOINT_SHIFT - 1)) + 1) >> 1)
+
+#define SCALE_GET_COOR_ACC_FIXPOINT(scalefactor, factorfixpointshift)  \
+       ((scalefactor) << \
+       (SCALE_COOR_ACC_FIXPOINT_SHIFT - (factorfixpointshift)))
+
+
+/************************************************************************/
+/*CoarsePart of Scale Coordinate Accumulate, used for pixel mult-add factor, 0.8*/
+#define SCALE_FILTER_FACTOR_FIXPOINT_SHIFT     8
+#define SCALE_FILTER_FACTOR_FIXPOINT_ONE       \
+       (1 << SCALE_FILTER_FACTOR_FIXPOINT_SHIFT)
+#define SCALE_FILTER_FACTOR_FIXPOINT(x)        \
+       ((INT32)((x) * (1 << SCALE_FILTER_FACTOR_FIXPOINT_SHIFT)))
+#define SCALE_FILTER_FACTOR_FIXPOINT_REVERT(x) \
+       ((((x) >> (SCALE_FILTER_FACTOR_FIXPOINT_SHIFT-1)) + 1) >> 1)
+
+#define SCALE_GET_FILTER_FACTOR_FIXPOINT(cooraccumulate, \
+       cooraccfixpointshift) \
+       (((cooraccumulate) >> \
+       ((cooraccfixpointshift) - SCALE_FILTER_FACTOR_FIXPOINT_SHIFT)) & \
+       (SCALE_FILTER_FACTOR_FIXPOINT_ONE - 1))
+
+#define SCALE_OFFSET_FIXPOINT_SHIFT            8
+#define SCALE_OFFSET_FIXPOINT(x)              \
+       ((INT32)((x) * (1 << SCALE_OFFSET_FIXPOINT_SHIFT)))
+
+#endif
index b773db1c2c8a583eafd0c34610067960498f23b1..f9320f51cde7d59b72befc10a3574fe907f77089 100755 (executable)
@@ -17,8 +17,10 @@ config RK32_LVDS
        depends on RK_TRSM
 
 config RK31XX_LVDS
-        bool "RK312x/RK3190 lvds transmitter support"
-        depends on RK_TRSM
+       bool "RK312x/RK3190/3368 lvds transmitter support"
+       depends on RK_TRSM
+       help
+               If use LVDS or RGB output interface,selete this module.
 
 config RK610_LVDS
        bool "RK610(Jetta) lvds transmitter support"
index de3100b44dbdf372e3104e8618cae91665c2c647..12ca9fc9cc7ce32051e2e348e61077966c479ef1 100755 (executable)
@@ -52,10 +52,12 @@ static int rk31xx_lvds_clk_init(struct rk_lvds_device *lvds)
                return PTR_ERR(lvds->ctrl_pclk);
        }
 
-       lvds->ctrl_hclk = devm_clk_get(lvds->dev, "hclk_vio_h2p");
-       if (IS_ERR(lvds->ctrl_hclk)) {
-               dev_err(lvds->dev, "get ctrl hclk failed\n");
-               return PTR_ERR(lvds->ctrl_hclk);
+       if (lvds->data->soc_type == LVDS_SOC_RK312X) {
+               lvds->ctrl_hclk = devm_clk_get(lvds->dev, "hclk_vio_h2p");
+               if (IS_ERR(lvds->ctrl_hclk)) {
+                       dev_err(lvds->dev, "get ctrl hclk failed\n");
+                       return PTR_ERR(lvds->ctrl_hclk);
+               }
        }
 
        return 0;       
@@ -66,7 +68,8 @@ static int rk31xx_lvds_clk_enable(struct rk_lvds_device *lvds)
        if (!lvds->clk_on) {
                clk_prepare_enable(lvds->pclk);
                clk_prepare_enable(lvds->ctrl_pclk);
-               clk_prepare_enable(lvds->ctrl_hclk);
+               if (lvds->data->soc_type == LVDS_SOC_RK312X)
+                       clk_prepare_enable(lvds->ctrl_hclk);
                lvds->clk_on = true;
        }
 
@@ -77,7 +80,8 @@ static int rk31xx_lvds_clk_disable(struct rk_lvds_device *lvds)
 {
        if (lvds->clk_on) {
                clk_disable_unprepare(lvds->pclk);
-               clk_disable_unprepare(lvds->ctrl_hclk);
+               if (lvds->data->soc_type == LVDS_SOC_RK312X)
+                       clk_disable_unprepare(lvds->ctrl_hclk);
                clk_disable_unprepare(lvds->ctrl_pclk);
                lvds->clk_on = false;
        }
@@ -145,6 +149,7 @@ static int rk31xx_lvds_disable(void)
        rk31xx_lvds_clk_disable(lvds);
 
 #if !defined(CONFIG_RK_FPGA)
+#ifdef CONFIG_PINCTRL
         if (lvds->screen.type == SCREEN_RGB) {
                 if (lvds->dev->pins) {
                         pinctrl_select_state(lvds->dev->pins->p,
@@ -154,6 +159,7 @@ static int rk31xx_lvds_disable(void)
                                              lvds->pins->sleep_state);
                 }
         }
+#endif
 #endif
         lvds->sys_state = false;
        return 0;
@@ -168,14 +174,32 @@ static void rk31xx_output_lvds(struct rk_lvds_device *lvds,
         /* if LVDS transmitter source from VOP, vop_dclk need get invert
          * set iomux in dts pinctrl
          */
-       val = 0;
-       val |= v_LVDSMODE_EN(1) | v_MIPIPHY_TTL_EN(0);  /* enable lvds mode */
-       val |= v_LVDS_DATA_SEL(LVDS_DATA_FROM_LCDC);    /* config data source */
-       val |= v_LVDS_OUTPUT_FORMAT(screen->lvds_format); /* config lvds_format */
-       val |= v_LVDS_MSBSEL(LVDS_MSB_D7);      /* LSB receive mode */
-        val |= v_MIPIPHY_LANE0_EN(1) | v_MIPIDPI_FORCEX_EN(1);
-       grf_writel(val, RK312X_GRF_LVDS_CON0);
-
+       if (lvds->data->soc_type == LVDS_SOC_RK3368) {
+               /* enable lvds mode */
+               val |= v_RK3368_LVDSMODE_EN(1) | v_RK3368_MIPIPHY_TTL_EN(0);
+               /* config data source */
+               /*val |= v_LVDS_DATA_SEL(LVDS_DATA_FROM_LCDC); */
+               /* config lvds_format */
+               val |= v_RK3368_LVDS_OUTPUT_FORMAT(screen->lvds_format);
+               /* LSB receive mode */
+               val |= v_RK3368_LVDS_MSBSEL(LVDS_MSB_D7);
+               val |= v_RK3368_MIPIPHY_LANE0_EN(1) |
+                      v_RK3368_MIPIDPI_FORCEX_EN(1);
+               /*rk3368  RK3368_GRF_SOC_CON7 = 0X0041C*/
+               grf_writel(val, 0x0041C);
+       } else {
+               /* enable lvds mode */
+               val |= v_LVDSMODE_EN(1) | v_MIPIPHY_TTL_EN(0);
+               /* config data source */
+               val |= v_LVDS_DATA_SEL(LVDS_DATA_FROM_LCDC);
+               /* config lvds_format */
+               val |= v_LVDS_OUTPUT_FORMAT(screen->lvds_format);
+               /* LSB receive mode */
+               val |= v_LVDS_MSBSEL(LVDS_MSB_D7);
+               val |= v_MIPIPHY_LANE0_EN(1) | v_MIPIDPI_FORCEX_EN(1);
+               /*rk312x  RK312X_GRF_LVDS_CON0 = 0X00150*/
+               grf_writel(val, 0X00150);
+       }
        /* digital internal disable */
        lvds_msk_reg(lvds, MIPIPHY_REGE1, m_DIG_INTER_EN, v_DIG_INTER_EN(0));
 
@@ -223,8 +247,10 @@ static void rk31xx_output_lvttl(struct rk_lvds_device *lvds,
         grf_writel(0x77771111, 0x00e8); /* RK312X_GRF_GPIO2C_IOMUX2 */
         grf_writel(0x700c1004, RK312X_GRF_GPIO2D_IOMUX);
 #else
+#ifdef CONFIG_PINCTRL
         if (lvds->pins && !IS_ERR(lvds->pins->default_state))
                 pinctrl_select_state(lvds->pins->p, lvds->pins->default_state);
+#endif
 #endif
 
        val |= v_LVDSMODE_EN(0) | v_MIPIPHY_TTL_EN(1);  /* enable lvds mode */
@@ -287,12 +313,34 @@ static struct rk_fb_trsm_ops trsm_lvds_ops = {
        .dsp_pwr_on = rk31xx_lvds_pwr_on,
        .dsp_pwr_off = rk31xx_lvds_pwr_off,
 };
+#if defined(CONFIG_OF)
+static struct rk_lvds_drvdata rk31xx_lvds_drvdata = {
+       .soc_type =  LVDS_SOC_RK312X,
+};
+
+static struct rk_lvds_drvdata rk3368_lvds_drvdata = {
+       .soc_type =  LVDS_SOC_RK3368,
+};
+
+
+static const struct of_device_id rk31xx_lvds_dt_ids[] = {
+       {.compatible = "rockchip,rk31xx-lvds",
+        .data = (void *)&rk31xx_lvds_drvdata,},
+       {.compatible = "rockchip,rk3368-lvds",
+        .data = (void *)&rk3368_lvds_drvdata,},
+       {}
+};
+
+/*MODULE_DEVICE_TABLE(of, rk31xx_lvds_dt_ids);*/
+
+#endif
 
 static int rk31xx_lvds_probe(struct platform_device *pdev)
 {
         struct rk_lvds_device *lvds;
        struct resource *res;
        struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *match;
         int ret = 0;
 
        if (!np) {
@@ -306,6 +354,10 @@ static int rk31xx_lvds_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        lvds->dev = &pdev->dev;
+       match = of_match_node(rk31xx_lvds_dt_ids, np);
+       lvds->data = (struct rk_lvds_drvdata *)match->data;
+       dev_info(lvds->dev, "%s,type=%d\n",
+                __func__, lvds->data->soc_type);
 
        rk_fb_get_prmry_screen(&lvds->screen);
         if ((lvds->screen.type != SCREEN_RGB) && 
@@ -318,6 +370,7 @@ static int rk31xx_lvds_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, lvds);
        dev_set_name(lvds->dev, "rk31xx-lvds");
 
+#ifdef CONFIG_PINCTRL
         if (lvds->dev->pins == NULL && lvds->screen.type == SCREEN_RGB) {
                 lvds->pins = devm_kzalloc(lvds->dev, sizeof(*(lvds->pins)),
                                           GFP_KERNEL);
@@ -344,6 +397,7 @@ static int rk31xx_lvds_probe(struct platform_device *pdev)
                 }
         }
 
+#endif
         /* lvds regs on MIPIPHY_REG */
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mipi_lvds_phy");
        lvds->regbase = devm_ioremap_resource(&pdev->dev, res);
@@ -390,12 +444,6 @@ static void rk31xx_lvds_shutdown(struct platform_device *pdev)
        return;
 }
 
-#if defined(CONFIG_OF)
-static const struct of_device_id rk31xx_lvds_dt_ids[] = {
-       {.compatible = "rockchip,rk31xx-lvds",},
-        {}
-};
-#endif
 
 static struct platform_driver rk31xx_lvds_driver = {
        .driver         = {
index 0c271e476d81532da236a0b24c631e9d9d87760c..326749f9448f1d0bd1eb2539e1d2897b4f039437 100755 (executable)
 #define v_MIPIPHY_LANE0_EN(x)   (BITS_MASK(x, 1, 8) | BITS_EN(1, 8))
 #define v_MIPIDPI_FORCEX_EN(x)  (BITS_MASK(x, 1, 9) | BITS_EN(1, 9))
 
+/* RK3368_GRF_SOC_CON7 */
+#define v_RK3368_LVDS_OUTPUT_FORMAT(x) (BITS_MASK(x, 3, 13) | BITS_EN(3, 13))
+#define v_RK3368_LVDS_MSBSEL(x)        (BITS_MASK(x, 1, 11) | BITS_EN(1, 11))
+#define v_RK3368_LVDSMODE_EN(x)        (BITS_MASK(x, 1, 12) | BITS_EN(1, 12))
+#define v_RK3368_MIPIPHY_TTL_EN(x)     (BITS_MASK(x, 1, 15) | BITS_EN(1, 15))
+#define v_RK3368_MIPIPHY_LANE0_EN(x)   (BITS_MASK(x, 1, 5) | BITS_EN(1, 5))
+#define v_RK3368_MIPIDPI_FORCEX_EN(x)  (BITS_MASK(x, 1, 6) | BITS_EN(1, 6))
 enum {
         LVDS_DATA_FROM_LCDC = 0,
         LVDS_DATA_FORM_EBC,
@@ -98,8 +105,18 @@ enum {
 #define v_LANE1_EN(x)           BITS_MASK(x, 1, 6)
 #define v_LANE0_EN(x)           BITS_MASK(x, 1, 7)
 
+enum {
+       LVDS_SOC_RK312X,
+       LVDS_SOC_RK3368
+};
+
+struct rk_lvds_drvdata  {
+       u8 soc_type;
+       u32 reversed;
+};
 
 struct rk_lvds_device {
+       struct rk_lvds_drvdata *data;
        struct device           *dev;
        void __iomem            *regbase;
        void __iomem            *ctrl_reg;
index a7ce73029f5989de1cf3c53fc318bcfebfc99c87..933241a6ab10069a02d23dfea01bc1e25e53b29e 100644 (file)
@@ -791,6 +791,7 @@ static int virtio_pci_restore(struct device *dev)
        struct pci_dev *pci_dev = to_pci_dev(dev);
        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
        struct virtio_driver *drv;
+       unsigned status = 0;
        int ret;
 
        drv = container_of(vp_dev->vdev.dev.driver,
@@ -801,14 +802,40 @@ static int virtio_pci_restore(struct device *dev)
                return ret;
 
        pci_set_master(pci_dev);
+       /* We always start by resetting the device, in case a previous
+        * driver messed it up. */
+       vp_reset(&vp_dev->vdev);
+
+       /* Acknowledge that we've seen the device. */
+       status |= VIRTIO_CONFIG_S_ACKNOWLEDGE;
+       vp_set_status(&vp_dev->vdev, status);
+
+       /* Maybe driver failed before freeze.
+        * Restore the failed status, for debugging. */
+       status |= vp_dev->saved_status & VIRTIO_CONFIG_S_FAILED;
+       vp_set_status(&vp_dev->vdev, status);
+
+       if (!drv)
+               return 0;
+
+       /* We have a driver! */
+       status |= VIRTIO_CONFIG_S_DRIVER;
+       vp_set_status(&vp_dev->vdev, status);
+
        vp_finalize_features(&vp_dev->vdev);
 
-       if (drv && drv->restore)
+       if (drv->restore) {
                ret = drv->restore(&vp_dev->vdev);
+               if (ret) {
+                       status |= VIRTIO_CONFIG_S_FAILED;
+                       vp_set_status(&vp_dev->vdev, status);
+                       return ret;
+               }
+       }
 
        /* Finally, tell the device we're all set */
-       if (!ret)
-               vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
+       status |= VIRTIO_CONFIG_S_DRIVER_OK;
+       vp_set_status(&vp_dev->vdev, status);
 
        return ret;
 }
index b193bf324a4123685483a7754537938267813734..e4bcfec7787edd729816c3983006bc1d503c140a 100644 (file)
@@ -403,7 +403,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
        ret = 0;
 fail:
        while (ret < 0 && !list_empty(&tmplist)) {
-               sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
+               sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
                list_del(&sums->list);
                kfree(sums);
        }
@@ -754,7 +754,7 @@ again:
                                found_next = 1;
                        if (ret != 0)
                                goto insert;
-                       slot = 0;
+                       slot = path->slots[0];
                }
                btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
                if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
index 8fcd2424e7f9b878c3b53837d176cd1c602b8d13..187911fbabce01a38e673f60fca28c8a64add7c7 100644 (file)
@@ -3545,7 +3545,8 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
         * without delay
         */
        if (!btrfs_is_free_space_inode(inode)
-           && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
+           && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
+           && !root->fs_info->log_root_recovering) {
                btrfs_update_root_times(trans, root);
 
                ret = btrfs_delayed_update_inode(trans, root, inode);
index b3896d5f233a9f280666f0b7f9fd572ccbcee025..0e7f7765b3bbe232938fd92763ec4eb5f117cfcc 100644 (file)
@@ -967,8 +967,11 @@ again:
                                        need_check = false;
                                        list_add_tail(&edge->list[UPPER],
                                                      &list);
-                               } else
+                               } else {
+                                       if (upper->checked)
+                                               need_check = true;
                                        INIT_LIST_HEAD(&edge->list[UPPER]);
+                               }
                        } else {
                                upper = rb_entry(rb_node, struct backref_node,
                                                 rb_node);
index 0544587d74f4be48ece72380ee0f4335ad636f97..1f214689fa5e61a1c650eaed4efba4e03934b4b3 100644 (file)
@@ -524,7 +524,6 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
                if (transid <= root->fs_info->last_trans_committed)
                        goto out;
 
-               ret = -EINVAL;
                /* find specified transaction */
                spin_lock(&root->fs_info->trans_lock);
                list_for_each_entry(t, &root->fs_info->trans_list, list) {
@@ -540,9 +539,16 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
                        }
                }
                spin_unlock(&root->fs_info->trans_lock);
-               /* The specified transaction doesn't exist */
-               if (!cur_trans)
+
+               /*
+                * The specified transaction doesn't exist, or we
+                * raced with btrfs_commit_transaction
+                */
+               if (!cur_trans) {
+                       if (transid > root->fs_info->last_trans_committed)
+                               ret = -EINVAL;
                        goto out;
+               }
        } else {
                /* find newest transaction that is committing | committed */
                spin_lock(&root->fs_info->trans_lock);
index 75964d734444b695c4575eed093504df34529449..83fedaa53b55c0118f3858d4296de3f3acb7e293 100644 (file)
@@ -985,7 +985,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
                bh = page_buffers(page);
                if (bh->b_size == size) {
                        end_block = init_page_buffers(page, bdev,
-                                               index << sizebits, size);
+                                               (sector_t)index << sizebits,
+                                               size);
                        goto done;
                }
                if (!try_to_free_buffers(page))
@@ -1006,7 +1007,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
         */
        spin_lock(&inode->i_mapping->private_lock);
        link_dev_buffers(page, bh);
-       end_block = init_page_buffers(page, bdev, index << sizebits, size);
+       end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
+                       size);
        spin_unlock(&inode->i_mapping->private_lock);
 done:
        ret = (block < end_block) ? 1 : -ENXIO;
@@ -2016,6 +2018,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
                        struct page *page, void *fsdata)
 {
        struct inode *inode = mapping->host;
+       loff_t old_size = inode->i_size;
        int i_size_changed = 0;
 
        copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
@@ -2035,6 +2038,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
        unlock_page(page);
        page_cache_release(page);
 
+       if (old_size < pos)
+               pagecache_isize_extended(inode, old_size, pos);
        /*
         * Don't mark the inode dirty under page lock. First, it unnecessarily
         * makes the holding time of page lock longer. Second, it forces lock
@@ -2252,6 +2257,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
                err = 0;
 
                balance_dirty_pages_ratelimited(mapping);
+
+               if (unlikely(fatal_signal_pending(current))) {
+                       err = -EINTR;
+                       goto out;
+               }
        }
 
        /* page covers the boundary, find the boundary offset */
index e2c2d96491fa8dc879c87fff56089a27b3dab10d..f74dfa89c4c46db569bcb4e157b4779701e3efd0 100644 (file)
 #define SERVER_NAME_LENGTH 40
 #define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
 
-/* used to define string lengths for reversing unicode strings */
-/*         (256+1)*2 = 514                                     */
-/*           (max path length + 1 for null) * 2 for unicode    */
-#define MAX_NAME 514
-
 /* SMB echo "timeout" -- FIXME: tunable? */
 #define SMB_ECHO_INTERVAL (60 * HZ)
 
@@ -380,6 +375,8 @@ struct smb_version_operations {
                        const char *, u32 *);
        int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
                        int);
+       /* check if we need to issue closedir */
+       bool (*dir_needs_close)(struct cifsFileInfo *);
 };
 
 struct smb_version_values {
index 8b0c656f2ab26d79c10648fa38129bb38fc52265..5fcc10fa62bd768d4858d0d2c3a4dae42bfb8bd1 100644 (file)
@@ -735,7 +735,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
 
        cifs_dbg(FYI, "Freeing private data in close dir\n");
        spin_lock(&cifs_file_list_lock);
-       if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
+       if (server->ops->dir_needs_close(cfile)) {
                cfile->invalidHandle = true;
                spin_unlock(&cifs_file_list_lock);
                if (server->ops->close_dir)
@@ -2809,7 +2809,7 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
                total_read += result;
        }
 
-       return total_read > 0 ? total_read : result;
+       return total_read > 0 && result != -EAGAIN ? total_read : result;
 }
 
 static ssize_t
@@ -3232,7 +3232,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
                total_read += result;
        }
 
-       return total_read > 0 ? total_read : result;
+       return total_read > 0 && result != -EAGAIN ? total_read : result;
 }
 
 static int cifs_readpages(struct file *file, struct address_space *mapping,
index 9d463501348f794283585d03f65beab1b4dfb357..0dee93706c9828b49e947820bb9ae05ff85b6198 100644 (file)
@@ -1640,13 +1640,22 @@ cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
 unlink_target:
        /* Try unlinking the target dentry if it's not negative */
        if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
-               tmprc = cifs_unlink(target_dir, target_dentry);
+               if (S_ISDIR(target_dentry->d_inode->i_mode))
+                       tmprc = cifs_rmdir(target_dir, target_dentry);
+               else
+                       tmprc = cifs_unlink(target_dir, target_dentry);
                if (tmprc)
                        goto cifs_rename_exit;
                rc = cifs_do_rename(xid, source_dentry, from_name,
                                    target_dentry, to_name);
        }
 
+       /* force revalidate to go get info when needed */
+       CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
+
+       source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
+               target_dir->i_mtime = current_fs_time(source_dir->i_sb);
+
 cifs_rename_exit:
        kfree(info_buf_source);
        kfree(from_name);
index 036279c064ff423726d27f4a192ece0b3eb3f057..85ebdaa210150f60d5ae4eafb8b8d390b3fc63ac 100644 (file)
@@ -582,11 +582,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon,
                /* close and restart search */
                cifs_dbg(FYI, "search backing up - close and restart search\n");
                spin_lock(&cifs_file_list_lock);
-               if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
+               if (server->ops->dir_needs_close(cfile)) {
                        cfile->invalidHandle = true;
                        spin_unlock(&cifs_file_list_lock);
-                       if (server->ops->close)
-                               server->ops->close(xid, tcon, &cfile->fid);
+                       if (server->ops->close_dir)
+                               server->ops->close_dir(xid, tcon, &cfile->fid);
                } else
                        spin_unlock(&cifs_file_list_lock);
                if (cfile->srch_inf.ntwrk_buf_start) {
index 4885a40f3210ec8a643e861429e42e5ea29984eb..610c6c24d41d23b1dedce5bbd71f3af2b57cc497 100644 (file)
@@ -885,6 +885,12 @@ cifs_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
                           (__u8)type, wait, 0);
 }
 
+static bool
+cifs_dir_needs_close(struct cifsFileInfo *cfile)
+{
+       return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
+}
+
 struct smb_version_operations smb1_operations = {
        .send_cancel = send_nt_cancel,
        .compare_fids = cifs_compare_fids,
@@ -948,6 +954,7 @@ struct smb_version_operations smb1_operations = {
        .mand_lock = cifs_mand_lock,
        .mand_unlock_range = cifs_unlock_range,
        .push_mand_locks = cifs_push_mandatory_locks,
+       .dir_needs_close = cifs_dir_needs_close,
 #ifdef CONFIG_CIFS_XATTR
        .query_all_EAs = CIFSSMBQAllEAs,
        .set_EA = CIFSSMBSetEA,
index 5da1b55a22581c5dd38daec3164c2ac32eb82a71..d801f63cddd03d707ac2bf20c97fd61c40ee4043 100644 (file)
@@ -73,7 +73,7 @@ smb2_open_file(const unsigned int xid, struct cifs_tcon *tcon, const char *path,
                goto out;
        }
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL) {
                rc = -ENOMEM;
index fff6dfba6204ddd291dd67c4213dfdfed4440c0f..6d535797ec764791f978f9b839b4d130ab8611b0 100644 (file)
@@ -123,7 +123,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
 
        *adjust_tz = false;
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL)
                return -ENOMEM;
index 7c2f45c06fc219357fa15d5b46f8aa8ac5449aba..4768cf8be6e2b8a2a3b128727a6f118eea5b7fb7 100644 (file)
@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
        {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
        {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"},
-       {STATUS_NO_MORE_FILES, -EIO, "STATUS_NO_MORE_FILES"},
+       {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
        {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
        {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
        {STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"},
@@ -605,7 +605,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
        {STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
        {STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
-       {STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"},
+       {STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
        {STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
        {STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
        {STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
index e2756bb40b4db49838f11578eb3cb937e61c8d33..e12f258a5ffab761148816fa6505c80c29a3a842 100644 (file)
@@ -243,7 +243,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc;
        struct smb2_file_all_info *smb2_data;
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL)
                return -ENOMEM;
@@ -554,6 +554,12 @@ smb2_new_lease_key(struct cifs_fid *fid)
        get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
 }
 
+static bool
+smb2_dir_needs_close(struct cifsFileInfo *cfile)
+{
+       return !cfile->invalidHandle;
+}
+
 struct smb_version_operations smb21_operations = {
        .compare_fids = smb2_compare_fids,
        .setup_request = smb2_setup_request,
@@ -618,6 +624,7 @@ struct smb_version_operations smb21_operations = {
        .set_lease_key = smb2_set_lease_key,
        .new_lease_key = smb2_new_lease_key,
        .calc_signature = smb2_calc_signature,
+       .dir_needs_close = smb2_dir_needs_close,
 };
 
 
@@ -685,6 +692,7 @@ struct smb_version_operations smb30_operations = {
        .set_lease_key = smb2_set_lease_key,
        .new_lease_key = smb2_new_lease_key,
        .calc_signature = smb3_calc_signature,
+       .dir_needs_close = smb2_dir_needs_close,
 };
 
 struct smb_version_values smb20_values = {
index c7a6fd87bb6eb8e5512f483498da9ef42573e651..eb0de4c3ca764b1c98814d0822f25512a8f87d81 100644 (file)
@@ -809,7 +809,8 @@ tcon_exit:
 tcon_error_exit:
        if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
                cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
-               tcon->bad_network_name = true;
+               if (tcon)
+                       tcon->bad_network_name = true;
        }
        goto tcon_exit;
 }
@@ -1203,7 +1204,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
 {
        return query_info(xid, tcon, persistent_fid, volatile_fid,
                          FILE_ALL_INFORMATION,
-                         sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+                         sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                          sizeof(struct smb2_file_all_info), data);
 }
 
@@ -1799,6 +1800,10 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
 
        if (rc) {
+               if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
+                       srch_inf->endOfSearch = true;
+                       rc = 0;
+               }
                cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
                goto qdir_exit;
        }
@@ -1836,11 +1841,6 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        else
                cifs_dbg(VFS, "illegal search buffer type\n");
 
-       if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
-               srch_inf->endOfSearch = 1;
-       else
-               srch_inf->endOfSearch = 0;
-
        return rc;
 
 qdir_exit:
index dafafbafa7313e2e7ef46d05619482b2edf5645d..1d402ce5b72f63bcbc1c8d58439aa69a0decea05 100644 (file)
@@ -299,7 +299,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        if (unlikely(nr < 0))
                return nr;
 
-       tsk->flags = PF_DUMPCORE;
+       tsk->flags |= PF_DUMPCORE;
        if (atomic_read(&mm->mm_users) == nr + 1)
                goto done;
        /*
index 9a59653d3449753923e3ddfd97e9d9eca2f36f04..25c0a1b5f6c0ecab691c8081f1f282dacb44175a 100644 (file)
@@ -96,8 +96,6 @@ static struct kmem_cache *dentry_cache __read_mostly;
  * This hash-function tries to avoid losing too many bits of hash
  * information, yet avoid using a prime hash-size or similar.
  */
-#define D_HASHBITS     d_hash_shift
-#define D_HASHMASK     d_hash_mask
 
 static unsigned int d_hash_mask __read_mostly;
 static unsigned int d_hash_shift __read_mostly;
@@ -108,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
                                        unsigned int hash)
 {
        hash += (unsigned long) parent / L1_CACHE_BYTES;
-       hash = hash + (hash >> D_HASHBITS);
-       return dentry_hashtable + (hash & D_HASHMASK);
+       return dentry_hashtable + hash_32(hash, d_hash_shift);
 }
 
 /* Statistics gathering. */
index 5eab400e25903b71641d4a056254c79f3a776885..41baf8b5e0eb23070b377a06f52a17e637940463 100644 (file)
@@ -1051,7 +1051,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
        }
 
        rc = vfs_setxattr(lower_dentry, name, value, size, flags);
-       if (!rc)
+       if (!rc && dentry->d_inode)
                fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
 out:
        return rc;
index dd6aa61c85486b74ce70ae802966dc85814c8317..cb7f31c71c6ba60e3e38118494c536751b0a2a7d 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1220,7 +1220,7 @@ EXPORT_SYMBOL(install_exec_creds);
 /*
  * determine how safe it is to execute the proposed program
  * - the caller must hold ->cred_guard_mutex to protect against
- *   PTRACE_ATTACH
+ *   PTRACE_ATTACH or seccomp thread-sync
  */
 static int check_unsafe_exec(struct linux_binprm *bprm)
 {
@@ -1239,7 +1239,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
         * This isn't strictly necessary, but it makes it harder for LSMs to
         * mess up.
         */
-       if (current->no_new_privs)
+       if (task_no_new_privs(current))
                bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
 
        n_fs = 1;
@@ -1286,7 +1286,7 @@ int prepare_binprm(struct linux_binprm *bprm)
        bprm->cred->egid = current_egid();
 
        if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
-           !current->no_new_privs &&
+           !task_no_new_privs(current) &&
            kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
            kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
                /* Set-uid? */
index 0a87bb10998dc00070bc6d05d6f536c21de44e3a..99d84ce038b8d5ee57f46bb333be00c9e58bf9de 100644 (file)
@@ -632,6 +632,8 @@ static int ext2_get_blocks(struct inode *inode,
        int count = 0;
        ext2_fsblk_t first_block = 0;
 
+       BUG_ON(maxblocks == 0);
+
        depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
 
        if (depth == 0)
index 1c3312858fcf42703b7bb1c6be52f24c9b1a5b47..e98171a11cfe19ef92aef9c516bfd6c1fc42cad8 100644 (file)
@@ -35,6 +35,7 @@ __ext2_get_block(struct inode *inode, pgoff_t pgoff, int create,
        int rc;
 
        memset(&tmp, 0, sizeof(struct buffer_head));
+       tmp.b_size = 1 << inode->i_blkbits;
        rc = ext2_get_block(inode, pgoff, &tmp, create);
        *result = tmp.b_blocknr;
 
index 6356665a74bb006a096023399fe8dca5363f1435..882d4bdfd4283385378a94013973392155b9de6f 100644 (file)
@@ -1300,13 +1300,6 @@ set_qf_format:
                                        "not specified.");
                        return 0;
                }
-       } else {
-               if (sbi->s_jquota_fmt) {
-                       ext3_msg(sb, KERN_ERR, "error: journaled quota format "
-                                       "specified with no journaling "
-                                       "enabled.");
-                       return 0;
-               }
        }
 #endif
        return 1;
index 790b14c5f262d415c90aa387185457b2971bb804..6d1ffab091bc08568d47bf528d1c6bfc1f55faee 100644 (file)
@@ -2062,7 +2062,8 @@ extern int ext4_mb_add_groupinfo(struct super_block *sb,
                ext4_group_t i, struct ext4_group_desc *desc);
 extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
                                ext4_fsblk_t block, unsigned long count);
-extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
+extern int ext4_trim_fs(struct super_block *, struct fstrim_range *,
+                               unsigned long blkdev_flags);
 
 /* inode.c */
 struct buffer_head *ext4_getblk(handle_t *, struct inode *,
@@ -2088,6 +2089,7 @@ int do_journal_get_write_access(handle_t *handle,
 #define CONVERT_INLINE_DATA     2
 
 extern struct inode *ext4_iget(struct super_block *, unsigned long);
+extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
 extern int  ext4_write_inode(struct inode *, struct writeback_control *);
 extern int  ext4_setattr(struct dentry *, struct iattr *);
 extern int  ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -2260,8 +2262,8 @@ extern int ext4_register_li_request(struct super_block *sb,
 static inline int ext4_has_group_desc_csum(struct super_block *sb)
 {
        return EXT4_HAS_RO_COMPAT_FEATURE(sb,
-                                         EXT4_FEATURE_RO_COMPAT_GDT_CSUM |
-                                         EXT4_FEATURE_RO_COMPAT_METADATA_CSUM);
+                                         EXT4_FEATURE_RO_COMPAT_GDT_CSUM) ||
+              (EXT4_SB(sb)->s_chksum_driver != NULL);
 }
 
 static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
index 1ecd3a8c2444b123e0b2fb6b69ab33182a350164..4d4718cf25abd2c17bfc6e8d60735cb7f947b741 100644 (file)
@@ -793,6 +793,10 @@ got:
                struct buffer_head *block_bitmap_bh;
 
                block_bitmap_bh = ext4_read_block_bitmap(sb, group);
+               if (!block_bitmap_bh) {
+                       err = -EIO;
+                       goto out;
+               }
                BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
                err = ext4_journal_get_write_access(handle, block_bitmap_bh);
                if (err) {
index f9e11df768d595d1a01d1e60e72d630617d25656..e48bd5a1814b72dbf751b1d4390a0bae167e4cb6 100644 (file)
@@ -2647,6 +2647,20 @@ static int ext4_nonda_switch(struct super_block *sb)
        return 0;
 }
 
+/* We always reserve for an inode update; the superblock could be there too */
+static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
+{
+       if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                               EXT4_FEATURE_RO_COMPAT_LARGE_FILE)))
+               return 1;
+
+       if (pos + len <= 0x7fffffffULL)
+               return 1;
+
+       /* We might need to update the superblock to set LARGE_FILE */
+       return 2;
+}
+
 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                               loff_t pos, unsigned len, unsigned flags,
                               struct page **pagep, void **fsdata)
@@ -2697,7 +2711,8 @@ retry_grab:
         * of file which has an already mapped buffer.
         */
 retry_journal:
-       handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
+       handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
+                               ext4_da_write_credits(inode, pos, len));
        if (IS_ERR(handle)) {
                page_cache_release(page);
                return PTR_ERR(handle);
@@ -4351,6 +4366,13 @@ bad_inode:
        return ERR_PTR(ret);
 }
 
+struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
+{
+       if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
+               return ERR_PTR(-EIO);
+       return ext4_iget(sb, ino);
+}
+
 static int ext4_inode_blocks_set(handle_t *handle,
                                struct ext4_inode *raw_inode,
                                struct ext4_inode_info *ei)
index 42624a995b00ad95f603b2f12f3b30e3518f97a3..8855db670425e83f37f903961988674a8141ffbe 100644 (file)
@@ -549,9 +549,17 @@ group_add_out:
        }
 
        case EXT4_IOC_SWAP_BOOT:
+       {
+               int err;
                if (!(filp->f_mode & FMODE_WRITE))
                        return -EBADF;
-               return swap_inode_boot_loader(sb, inode);
+               err = mnt_want_write_file(filp);
+               if (err)
+                       return err;
+               err = swap_inode_boot_loader(sb, inode);
+               mnt_drop_write_file(filp);
+               return err;
+       }
 
        case EXT4_IOC_RESIZE_FS: {
                ext4_fsblk_t n_blocks_count;
@@ -597,11 +605,13 @@ resizefs_out:
                return err;
        }
 
+       case FIDTRIM:
        case FITRIM:
        {
                struct request_queue *q = bdev_get_queue(sb->s_bdev);
                struct fstrim_range range;
                int ret = 0;
+               int flags  = cmd == FIDTRIM ? BLKDEV_DISCARD_SECURE : 0;
 
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
@@ -609,13 +619,15 @@ resizefs_out:
                if (!blk_queue_discard(q))
                        return -EOPNOTSUPP;
 
+               if ((flags & BLKDEV_DISCARD_SECURE) && !blk_queue_secdiscard(q))
+                       return -EOPNOTSUPP;
                if (copy_from_user(&range, (struct fstrim_range __user *)arg,
                    sizeof(range)))
                        return -EFAULT;
 
                range.minlen = max((unsigned int)range.minlen,
                                   q->limits.discard_granularity);
-               ret = ext4_trim_fs(sb, &range);
+               ret = ext4_trim_fs(sb, &range, flags);
                if (ret < 0)
                        return ret;
 
index 16bb6591561b38ce36826ce6bc17b690f7ac4d07..576155cb0e49dca3932536ce5ffc4a90ef0d0016 100644 (file)
@@ -1396,6 +1396,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
        int last = first + count - 1;
        struct super_block *sb = e4b->bd_sb;
 
+       if (WARN_ON(count == 0))
+               return;
        BUG_ON(last >= (sb->s_blocksize << 3));
        assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
        mb_check_buddy(e4b);
@@ -2705,7 +2707,8 @@ int ext4_mb_release(struct super_block *sb)
 }
 
 static inline int ext4_issue_discard(struct super_block *sb,
-               ext4_group_t block_group, ext4_grpblk_t cluster, int count)
+               ext4_group_t block_group, ext4_grpblk_t cluster, int count,
+               unsigned long flags)
 {
        ext4_fsblk_t discard_block;
 
@@ -2714,7 +2717,7 @@ static inline int ext4_issue_discard(struct super_block *sb,
        count = EXT4_C2B(EXT4_SB(sb), count);
        trace_ext4_discard_blocks(sb,
                        (unsigned long long) discard_block, count);
-       return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
+       return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags);
 }
 
 /*
@@ -2736,7 +2739,7 @@ static void ext4_free_data_callback(struct super_block *sb,
        if (test_opt(sb, DISCARD)) {
                err = ext4_issue_discard(sb, entry->efd_group,
                                         entry->efd_start_cluster,
-                                        entry->efd_count);
+                                        entry->efd_count, 0);
                if (err && err != -EOPNOTSUPP)
                        ext4_msg(sb, KERN_WARNING, "discard request in"
                                 " group:%d block:%d count:%d failed"
@@ -3177,8 +3180,30 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
 {
        struct ext4_prealloc_space *pa = ac->ac_pa;
+       struct ext4_buddy e4b;
+       int err;
 
-       if (pa && pa->pa_type == MB_INODE_PA)
+       if (pa == NULL) {
+               if (ac->ac_f_ex.fe_len == 0)
+                       return;
+               err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
+               if (err) {
+                       /*
+                        * This should never happen since we pin the
+                        * pages in the ext4_allocation_context so
+                        * ext4_mb_load_buddy() should never fail.
+                        */
+                       WARN(1, "mb_load_buddy failed (%d)", err);
+                       return;
+               }
+               ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
+               mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
+                              ac->ac_f_ex.fe_len);
+               ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
+               ext4_mb_unload_buddy(&e4b);
+               return;
+       }
+       if (pa->pa_type == MB_INODE_PA)
                pa->pa_free += ac->ac_b_ex.fe_len;
 }
 
@@ -4765,7 +4790,8 @@ do_more:
                 * them with group lock_held
                 */
                if (test_opt(sb, DISCARD)) {
-                       err = ext4_issue_discard(sb, block_group, bit, count);
+                       err = ext4_issue_discard(sb, block_group, bit, count,
+                                                0);
                        if (err && err != -EOPNOTSUPP)
                                ext4_msg(sb, KERN_WARNING, "discard request in"
                                         " group:%d block:%d count:%lu failed"
@@ -4960,13 +4986,15 @@ error_return:
  * @count:     number of blocks to TRIM
  * @group:     alloc. group we are working with
  * @e4b:       ext4 buddy for the group
+ * @blkdev_flags: flags for the block device
  *
  * Trim "count" blocks starting at "start" in the "group". To assure that no
  * one will allocate those blocks, mark it as used in buddy bitmap. This must
  * be called with under the group lock.
  */
 static int ext4_trim_extent(struct super_block *sb, int start, int count,
-                            ext4_group_t group, struct ext4_buddy *e4b)
+                           ext4_group_t group, struct ext4_buddy *e4b,
+                           unsigned long blkdev_flags)
 {
        struct ext4_free_extent ex;
        int ret = 0;
@@ -4985,7 +5013,7 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
         */
        mb_mark_used(e4b, &ex);
        ext4_unlock_group(sb, group);
-       ret = ext4_issue_discard(sb, group, start, count);
+       ret = ext4_issue_discard(sb, group, start, count, blkdev_flags);
        ext4_lock_group(sb, group);
        mb_free_blocks(NULL, e4b, start, ex.fe_len);
        return ret;
@@ -4998,6 +5026,7 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
  * @start:             first group block to examine
  * @max:               last group block to examine
  * @minblocks:         minimum extent block count
+ * @blkdev_flags:      flags for the block device
  *
  * ext4_trim_all_free walks through group's buddy bitmap searching for free
  * extents. When the free block is found, ext4_trim_extent is called to TRIM
@@ -5012,7 +5041,7 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
 static ext4_grpblk_t
 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
                   ext4_grpblk_t start, ext4_grpblk_t max,
-                  ext4_grpblk_t minblocks)
+                  ext4_grpblk_t minblocks, unsigned long blkdev_flags)
 {
        void *bitmap;
        ext4_grpblk_t next, count = 0, free_count = 0;
@@ -5045,7 +5074,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
 
                if ((next - start) >= minblocks) {
                        ret = ext4_trim_extent(sb, start,
-                                              next - start, group, &e4b);
+                                              next - start, group, &e4b,
+                                              blkdev_flags);
                        if (ret && ret != -EOPNOTSUPP)
                                break;
                        ret = 0;
@@ -5087,6 +5117,7 @@ out:
  * ext4_trim_fs() -- trim ioctl handle function
  * @sb:                        superblock for filesystem
  * @range:             fstrim_range structure
+ * @blkdev_flags:      flags for the block device
  *
  * start:      First Byte to trim
  * len:                number of Bytes to trim from start
@@ -5095,7 +5126,8 @@ out:
  * start to start+len. For each such a group ext4_trim_all_free function
  * is invoked to trim all free space.
  */
-int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range,
+                       unsigned long blkdev_flags)
 {
        struct ext4_group_info *grp;
        ext4_group_t group, first_group, last_group;
@@ -5151,7 +5183,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
 
                if (grp->bb_free >= minlen) {
                        cnt = ext4_trim_all_free(sb, group, first_cluster,
-                                               end, minlen);
+                                               end, minlen, blkdev_flags);
                        if (cnt < 0) {
                                ret = cnt;
                                break;
index ab2f6dc44b3abf88b62902433f48a1aa78ba8561..f1312173fa90a5fdd685362138d06be7b16e8597 100644 (file)
@@ -1430,7 +1430,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
                                         dentry->d_name.name);
                        return ERR_PTR(-EIO);
                }
-               inode = ext4_iget(dir->i_sb, ino);
+               inode = ext4_iget_normal(dir->i_sb, ino);
                if (inode == ERR_PTR(-ESTALE)) {
                        EXT4_ERROR_INODE(dir,
                                         "deleted inode referenced: %u",
@@ -1461,7 +1461,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
                return ERR_PTR(-EIO);
        }
 
-       return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
+       return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
 }
 
 /*
index c503850a61a860ea22b7018d8d59ccb2418957fe..a69bd74ed390eba9479653ad072cdcde0633a246 100644 (file)
@@ -1066,7 +1066,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
                        break;
 
                if (meta_bg == 0)
-                       backup_block = group * bpg + blk_off;
+                       backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
                else
                        backup_block = (ext4_group_first_block_no(sb, group) +
                                        ext4_bg_has_super(sb, group));
index 1fc14f7a08b29e067bdd95c51b4bfb5d117a112e..21a0b43a7d3187507eec5c333b9d42d2df1986e9 100644 (file)
@@ -964,7 +964,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
         * Currently we don't know the generation for parent directory, so
         * a generation of 0 means "accept any"
         */
-       inode = ext4_iget(sb, ino);
+       inode = ext4_iget_normal(sb, ino);
        if (IS_ERR(inode))
                return ERR_CAST(inode);
        if (generation && inode->i_generation != generation) {
@@ -1632,13 +1632,6 @@ static int parse_options(char *options, struct super_block *sb,
                                        "not specified");
                        return 0;
                }
-       } else {
-               if (sbi->s_jquota_fmt) {
-                       ext4_msg(sb, KERN_ERR, "journaled quota format "
-                                       "specified with no journaling "
-                                       "enabled");
-                       return 0;
-               }
        }
 #endif
        if (test_opt(sb, DIOREAD_NOLOCK)) {
@@ -1957,6 +1950,10 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
        }
 
        /* old crc16 code */
+       if (!(sbi->s_es->s_feature_ro_compat &
+             cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)))
+               return 0;
+
        offset = offsetof(struct ext4_group_desc, bg_checksum);
 
        crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
index 298e9c8da3648cf04d3f61c3f4de386da7b10b5e..a20816e7eb3a9542f4a884ec309656f25cec14db 100644 (file)
@@ -189,14 +189,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
 }
 
 static int
-ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
+ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
+                      void *value_start)
 {
-       while (!IS_LAST_ENTRY(entry)) {
-               struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
+       struct ext4_xattr_entry *e = entry;
+
+       while (!IS_LAST_ENTRY(e)) {
+               struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
                if ((void *)next >= end)
                        return -EIO;
-               entry = next;
+               e = next;
        }
+
+       while (!IS_LAST_ENTRY(entry)) {
+               if (entry->e_value_size != 0 &&
+                   (value_start + le16_to_cpu(entry->e_value_offs) <
+                    (void *)e + sizeof(__u32) ||
+                    value_start + le16_to_cpu(entry->e_value_offs) +
+                   le32_to_cpu(entry->e_value_size) > end))
+                       return -EIO;
+               entry = EXT4_XATTR_NEXT(entry);
+       }
+
        return 0;
 }
 
@@ -213,7 +227,8 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
                return -EIO;
        if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
                return -EIO;
-       error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
+       error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
+                                      bh->b_data);
        if (!error)
                set_buffer_verified(bh);
        return error;
@@ -329,7 +344,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
        header = IHDR(inode, raw_inode);
        entry = IFIRST(header);
        end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
-       error = ext4_xattr_check_names(entry, end);
+       error = ext4_xattr_check_names(entry, end, entry);
        if (error)
                goto cleanup;
        error = ext4_xattr_find_entry(&entry, name_index, name,
@@ -457,7 +472,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
        raw_inode = ext4_raw_inode(&iloc);
        header = IHDR(inode, raw_inode);
        end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
-       error = ext4_xattr_check_names(IFIRST(header), end);
+       error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
        if (error)
                goto cleanup;
        error = ext4_xattr_list_entries(dentry, IFIRST(header),
@@ -972,7 +987,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
        is->s.here = is->s.first;
        is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
        if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
-               error = ext4_xattr_check_names(IFIRST(header), is->s.end);
+               error = ext4_xattr_check_names(IFIRST(header), is->s.end,
+                                              IFIRST(header));
                if (error)
                        return error;
                /* Find the named attribute. */
index b5718516825b9b20377cc855f3956dc9f709bd5f..39a986e1da9eff707e2e5c5fc543627d3f116d97 100644 (file)
@@ -461,6 +461,17 @@ static const match_table_t tokens = {
        {OPT_ERR,                       NULL}
 };
 
+static int fuse_match_uint(substring_t *s, unsigned int *res)
+{
+       int err = -ENOMEM;
+       char *buf = match_strdup(s);
+       if (buf) {
+               err = kstrtouint(buf, 10, res);
+               kfree(buf);
+       }
+       return err;
+}
+
 static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
 {
        char *p;
@@ -471,6 +482,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
        while ((p = strsep(&opt, ",")) != NULL) {
                int token;
                int value;
+               unsigned uv;
                substring_t args[MAX_OPT_ARGS];
                if (!*p)
                        continue;
@@ -494,18 +506,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
                        break;
 
                case OPT_USER_ID:
-                       if (match_int(&args[0], &value))
+                       if (fuse_match_uint(&args[0], &uv))
                                return 0;
-                       d->user_id = make_kuid(current_user_ns(), value);
+                       d->user_id = make_kuid(current_user_ns(), uv);
                        if (!uid_valid(d->user_id))
                                return 0;
                        d->user_id_present = 1;
                        break;
 
                case OPT_GROUP_ID:
-                       if (match_int(&args[0], &value))
+                       if (fuse_match_uint(&args[0], &uv))
                                return 0;
-                       d->group_id = make_kgid(current_user_ns(), value);
+                       d->group_id = make_kgid(current_user_ns(), uv);
                        if (!gid_valid(d->group_id))
                                return 0;
                        d->group_id_present = 1;
index e50170ca7c33f446acc16e29a0d0097828919c30..31666c92b46af29919f42ea3e1093caed7127d71 100644 (file)
@@ -157,14 +157,16 @@ out:
 
 int ioprio_best(unsigned short aprio, unsigned short bprio)
 {
-       unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
-       unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
+       unsigned short aclass;
+       unsigned short bclass;
 
-       if (aclass == IOPRIO_CLASS_NONE)
-               aclass = IOPRIO_CLASS_BE;
-       if (bclass == IOPRIO_CLASS_NONE)
-               bclass = IOPRIO_CLASS_BE;
+       if (!ioprio_valid(aprio))
+               aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+       if (!ioprio_valid(bprio))
+               bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
 
+       aclass = IOPRIO_PRIO_CLASS(aprio);
+       bclass = IOPRIO_PRIO_CLASS(bprio);
        if (aclass == bclass)
                return min(aprio, bprio);
        if (aclass > bclass)
index d3705490ff9ca378de6ba1f63d7d1515634d6d98..10489bbd40fc5479d4a5ea366e2d849d1e74475a 100644 (file)
@@ -69,7 +69,7 @@ static void isofs_put_super(struct super_block *sb)
        return;
 }
 
-static int isofs_read_inode(struct inode *);
+static int isofs_read_inode(struct inode *, int relocated);
 static int isofs_statfs (struct dentry *, struct kstatfs *);
 
 static struct kmem_cache *isofs_inode_cachep;
@@ -1274,7 +1274,7 @@ out_toomany:
        goto out;
 }
 
-static int isofs_read_inode(struct inode *inode)
+static int isofs_read_inode(struct inode *inode, int relocated)
 {
        struct super_block *sb = inode->i_sb;
        struct isofs_sb_info *sbi = ISOFS_SB(sb);
@@ -1419,7 +1419,7 @@ static int isofs_read_inode(struct inode *inode)
         */
 
        if (!high_sierra) {
-               parse_rock_ridge_inode(de, inode);
+               parse_rock_ridge_inode(de, inode, relocated);
                /* if we want uid/gid set, override the rock ridge setting */
                if (sbi->s_uid_set)
                        inode->i_uid = sbi->s_uid;
@@ -1498,9 +1498,10 @@ static int isofs_iget5_set(struct inode *ino, void *data)
  * offset that point to the underlying meta-data for the inode.  The
  * code below is otherwise similar to the iget() code in
  * include/linux/fs.h */
-struct inode *isofs_iget(struct super_block *sb,
-                        unsigned long block,
-                        unsigned long offset)
+struct inode *__isofs_iget(struct super_block *sb,
+                          unsigned long block,
+                          unsigned long offset,
+                          int relocated)
 {
        unsigned long hashval;
        struct inode *inode;
@@ -1522,7 +1523,7 @@ struct inode *isofs_iget(struct super_block *sb,
                return ERR_PTR(-ENOMEM);
 
        if (inode->i_state & I_NEW) {
-               ret = isofs_read_inode(inode);
+               ret = isofs_read_inode(inode, relocated);
                if (ret < 0) {
                        iget_failed(inode);
                        inode = ERR_PTR(ret);
index 99167238518d61a30c4a5e6bfc838291d6206a5d..0ac4c1f73fbd6c2616e04ad6310995426c03da68 100644 (file)
@@ -107,7 +107,7 @@ extern int iso_date(char *, int);
 
 struct inode;          /* To make gcc happy */
 
-extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
+extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated);
 extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
 extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *);
 
@@ -118,9 +118,24 @@ extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int
 extern struct buffer_head *isofs_bread(struct inode *, sector_t);
 extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
 
-extern struct inode *isofs_iget(struct super_block *sb,
-                                unsigned long block,
-                                unsigned long offset);
+struct inode *__isofs_iget(struct super_block *sb,
+                          unsigned long block,
+                          unsigned long offset,
+                          int relocated);
+
+static inline struct inode *isofs_iget(struct super_block *sb,
+                                      unsigned long block,
+                                      unsigned long offset)
+{
+       return __isofs_iget(sb, block, offset, 0);
+}
+
+static inline struct inode *isofs_iget_reloc(struct super_block *sb,
+                                            unsigned long block,
+                                            unsigned long offset)
+{
+       return __isofs_iget(sb, block, offset, 1);
+}
 
 /* Because the inode number is no longer relevant to finding the
  * underlying meta-data for an inode, we are free to choose a more
index c0bf42472e408fd16911cee33f3d9079943aa46a..f488bbae541ac8d5db4eb7e963c33452ebb3e937 100644 (file)
@@ -288,12 +288,16 @@ eio:
        goto out;
 }
 
+#define RR_REGARD_XA 1
+#define RR_RELOC_DE 2
+
 static int
 parse_rock_ridge_inode_internal(struct iso_directory_record *de,
-                               struct inode *inode, int regard_xa)
+                               struct inode *inode, int flags)
 {
        int symlink_len = 0;
        int cnt, sig;
+       unsigned int reloc_block;
        struct inode *reloc;
        struct rock_ridge *rr;
        int rootflag;
@@ -305,7 +309,7 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
 
        init_rock_state(&rs, inode);
        setup_rock_ridge(de, inode, &rs);
-       if (regard_xa) {
+       if (flags & RR_REGARD_XA) {
                rs.chr += 14;
                rs.len -= 14;
                if (rs.len < 0)
@@ -485,12 +489,22 @@ repeat:
                                        "relocated directory\n");
                        goto out;
                case SIG('C', 'L'):
-                       ISOFS_I(inode)->i_first_extent =
-                           isonum_733(rr->u.CL.location);
-                       reloc =
-                           isofs_iget(inode->i_sb,
-                                      ISOFS_I(inode)->i_first_extent,
-                                      0);
+                       if (flags & RR_RELOC_DE) {
+                               printk(KERN_ERR
+                                      "ISOFS: Recursive directory relocation "
+                                      "is not supported\n");
+                               goto eio;
+                       }
+                       reloc_block = isonum_733(rr->u.CL.location);
+                       if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
+                           ISOFS_I(inode)->i_iget5_offset == 0) {
+                               printk(KERN_ERR
+                                      "ISOFS: Directory relocation points to "
+                                      "itself\n");
+                               goto eio;
+                       }
+                       ISOFS_I(inode)->i_first_extent = reloc_block;
+                       reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
                        if (IS_ERR(reloc)) {
                                ret = PTR_ERR(reloc);
                                goto out;
@@ -637,9 +651,11 @@ static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
        return rpnt;
 }
 
-int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
+int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
+                          int relocated)
 {
-       int result = parse_rock_ridge_inode_internal(de, inode, 0);
+       int flags = relocated ? RR_RELOC_DE : 0;
+       int result = parse_rock_ridge_inode_internal(de, inode, flags);
 
        /*
         * if rockridge flag was reset and we didn't look for attributes
@@ -647,7 +663,8 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
         */
        if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
            && (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
-               result = parse_rock_ridge_inode_internal(de, inode, 14);
+               result = parse_rock_ridge_inode_internal(de, inode,
+                                                        flags | RR_REGARD_XA);
        }
        return result;
 }
index 626846bac32f8a5f2c01d4e3f39db9332642a25c..6e2fb5cbacde6cae2d5650bf5891707950e0b82e 100644 (file)
@@ -427,6 +427,7 @@ static int do_one_pass(journal_t *journal,
        int                     tag_bytes = journal_tag_bytes(journal);
        __u32                   crc32_sum = ~0; /* Transactional Checksums */
        int                     descr_csum_size = 0;
+       int                     block_error = 0;
 
        /*
         * First thing is to establish what we expect to find in the log
@@ -521,6 +522,7 @@ static int do_one_pass(journal_t *journal,
                            !jbd2_descr_block_csum_verify(journal,
                                                          bh->b_data)) {
                                err = -EIO;
+                               brelse(bh);
                                goto failed;
                        }
 
@@ -599,7 +601,8 @@ static int do_one_pass(journal_t *journal,
                                                       "checksum recovering "
                                                       "block %llu in log\n",
                                                       blocknr);
-                                               continue;
+                                               block_error = 1;
+                                               goto skip_write;
                                        }
 
                                        /* Find a buffer for the new
@@ -798,7 +801,8 @@ static int do_one_pass(journal_t *journal,
                                success = -EIO;
                }
        }
-
+       if (block_error && success == 0)
+               success = -EIO;
        return success;
 
  failed:
index 413ef89c2d1ba32fe8507f29355d11c5873bc7d0..046fee8b6e9b3ee95d1d5f0c7a9ad7f94ef7c0c9 100644 (file)
@@ -134,8 +134,6 @@ struct jffs2_sb_info {
        struct rw_semaphore wbuf_sem;   /* Protects the write buffer */
 
        struct delayed_work wbuf_dwork; /* write-buffer write-out work */
-       int wbuf_queued;                /* non-zero delayed work is queued */
-       spinlock_t wbuf_dwork_lock;     /* protects wbuf_dwork and and wbuf_queued */
 
        unsigned char *oobbuf;
        int oobavail; /* How many bytes are available for JFFS2 in OOB */
index a6597d60d76de751224243ddfe6d990cd6de7e87..09ed55190ee2c077524e38a11b4fe41c00f51ac9 100644 (file)
@@ -1162,10 +1162,6 @@ static void delayed_wbuf_sync(struct work_struct *work)
        struct jffs2_sb_info *c = work_to_sb(work);
        struct super_block *sb = OFNI_BS_2SFFJ(c);
 
-       spin_lock(&c->wbuf_dwork_lock);
-       c->wbuf_queued = 0;
-       spin_unlock(&c->wbuf_dwork_lock);
-
        if (!(sb->s_flags & MS_RDONLY)) {
                jffs2_dbg(1, "%s()\n", __func__);
                jffs2_flush_wbuf_gc(c, 0);
@@ -1180,14 +1176,9 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c)
        if (sb->s_flags & MS_RDONLY)
                return;
 
-       spin_lock(&c->wbuf_dwork_lock);
-       if (!c->wbuf_queued) {
+       delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+       if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
                jffs2_dbg(1, "%s()\n", __func__);
-               delay = msecs_to_jiffies(dirty_writeback_interval * 10);
-               queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
-               c->wbuf_queued = 1;
-       }
-       spin_unlock(&c->wbuf_dwork_lock);
 }
 
 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
@@ -1211,7 +1202,6 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
 
        /* Initialise write buffer */
        init_rwsem(&c->wbuf_sem);
-       spin_lock_init(&c->wbuf_dwork_lock);
        INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
        c->wbuf_pagesize = c->mtd->writesize;
        c->wbuf_ofs = 0xFFFFFFFF;
@@ -1251,7 +1241,6 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
 
        /* Initialize write buffer */
        init_rwsem(&c->wbuf_sem);
-       spin_lock_init(&c->wbuf_dwork_lock);
        INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
        c->wbuf_pagesize =  c->mtd->erasesize;
 
@@ -1311,7 +1300,6 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
 
        /* Initialize write buffer */
        init_rwsem(&c->wbuf_sem);
-       spin_lock_init(&c->wbuf_dwork_lock);
        INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
 
        c->wbuf_pagesize = c->mtd->writesize;
@@ -1346,7 +1334,6 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
                return 0;
 
        init_rwsem(&c->wbuf_sem);
-       spin_lock_init(&c->wbuf_dwork_lock);
        INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
 
        c->wbuf_pagesize =  c->mtd->writesize;
index 1812f026960c4229dd4c3d198b129f9554eb6fc4..6ae664b489af43a5e0936bd2e0d4b40bd016d725 100644 (file)
@@ -159,6 +159,12 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
 
        msg.rpc_proc = &clnt->cl_procinfo[proc];
        status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
+       if (status == -ECONNREFUSED) {
+               dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n",
+                               status);
+               rpc_force_rebind(clnt);
+               status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
+       }
        if (status < 0)
                dprintk("lockd: NSM upcall RPC failed, status=%d\n",
                                status);
index d56a9904e52ab3fff2d004b5fd756b27b0ec7bb5..9c8a5a6d33dfed93e2819dd3215f9fb2cb7916ef 100644 (file)
@@ -253,13 +253,11 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
 
        error = make_socks(serv, net);
        if (error < 0)
-               goto err_socks;
+               goto err_bind;
        set_grace_period(net);
        dprintk("lockd_up_net: per-net data created; net=%p\n", net);
        return 0;
 
-err_socks:
-       svc_rpcb_cleanup(serv, net);
 err_bind:
        ln->nlmsvc_users--;
        return error;
index 6ac16a37ded29dea1d9d1335626540ab50c4ffab..f7c4393f853533da6a7f4fb459896e596345b131 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/device_cgroup.h>
 #include <linux/fs_struct.h>
 #include <linux/posix_acl.h>
+#include <linux/hash.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -1647,8 +1648,7 @@ static inline int can_lookup(struct inode *inode)
 
 static inline unsigned int fold_hash(unsigned long hash)
 {
-       hash += hash >> (8*sizeof(int));
-       return hash;
+       return hash_64(hash, 32);
 }
 
 #else  /* 32-bit case */
index a45ba4f267fe6e834909f122dd166cdd608dcce5..154822397780a3ce3d0d962e81b1042ed193f68e 100644 (file)
@@ -828,8 +828,21 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
 
        mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
        /* Don't allow unprivileged users to change mount flags */
-       if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
-               mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
+       if (flag & CL_UNPRIVILEGED) {
+               mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
+
+               if (mnt->mnt.mnt_flags & MNT_READONLY)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
+
+               if (mnt->mnt.mnt_flags & MNT_NODEV)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
+
+               if (mnt->mnt.mnt_flags & MNT_NOSUID)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
+
+               if (mnt->mnt.mnt_flags & MNT_NOEXEC)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
+       }
 
        atomic_inc(&sb->s_active);
        mnt->mnt.mnt_sb = sb;
@@ -1261,6 +1274,8 @@ static int do_umount(struct mount *mnt, int flags)
                 * Special case for "unmounting" root ...
                 * we just try to remount it readonly.
                 */
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
                down_write(&sb->s_umount);
                if (!(sb->s_flags & MS_RDONLY))
                        retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
@@ -1764,9 +1779,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
        if (readonly_request == __mnt_is_readonly(mnt))
                return 0;
 
-       if (mnt->mnt_flags & MNT_LOCK_READONLY)
-               return -EPERM;
-
        if (readonly_request)
                error = mnt_make_readonly(real_mount(mnt));
        else
@@ -1792,6 +1804,33 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
        if (path->dentry != path->mnt->mnt_root)
                return -EINVAL;
 
+       /* Don't allow changing of locked mnt flags.
+        *
+        * No locks need to be held here while testing the various
+        * MNT_LOCK flags because those flags can never be cleared
+        * once they are set.
+        */
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
+           !(mnt_flags & MNT_READONLY)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
+           !(mnt_flags & MNT_NODEV)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
+           !(mnt_flags & MNT_NOSUID)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
+           !(mnt_flags & MNT_NOEXEC)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
+           ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
+               return -EPERM;
+       }
+
        err = security_sb_remount(sb, data);
        if (err)
                return err;
@@ -1805,7 +1844,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
                err = do_remount_sb(sb, flags, data, 0);
        if (!err) {
                br_write_lock(&vfsmount_lock);
-               mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
+               mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
                mnt->mnt.mnt_flags = mnt_flags;
                br_write_unlock(&vfsmount_lock);
        }
@@ -1991,7 +2030,7 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
                 */
                if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
                        flags |= MS_NODEV;
-                       mnt_flags |= MNT_NODEV;
+                       mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
                }
        }
 
@@ -2309,6 +2348,14 @@ long do_mount(const char *dev_name, const char *dir_name,
        if (flags & MS_RDONLY)
                mnt_flags |= MNT_READONLY;
 
+       /* The default atime for remount is preservation */
+       if ((flags & MS_REMOUNT) &&
+           ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
+                      MS_STRICTATIME)) == 0)) {
+               mnt_flags &= ~MNT_ATIME_MASK;
+               mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
+       }
+
        flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
                   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
                   MS_STRICTATIME);
@@ -2649,6 +2696,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        /* make sure we can reach put_old from new_root */
        if (!is_path_reachable(old_mnt, old.dentry, &new))
                goto out4;
+       /* make certain new is below the root */
+       if (!is_path_reachable(new_mnt, new.dentry, &root))
+               goto out4;
        root_mp->m_count++; /* pin it so it won't go away */
        br_write_lock(&vfsmount_lock);
        detach_mnt(new_mnt, &parent_path);
index 4b49a8c6ccade73d7c28f6c5b30dbb05d208b820..ef0c394b7bf55cc5891e01fc4647acff2db41307 100644 (file)
@@ -108,6 +108,8 @@ again:
                        continue;
                if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
                        continue;
+               if (!nfs4_valid_open_stateid(state))
+                       continue;
                if (!nfs4_stateid_match(&state->stateid, stateid))
                        continue;
                get_nfs_open_context(ctx);
@@ -175,7 +177,11 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
 {
        int res = 0;
 
-       res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
+       if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+               res = nfs4_proc_delegreturn(inode,
+                               delegation->cred,
+                               &delegation->stateid,
+                               issync);
        nfs_free_delegation(delegation);
        return res;
 }
@@ -361,11 +367,13 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
 {
        struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
        struct nfs_inode *nfsi = NFS_I(inode);
-       int err;
+       int err = 0;
 
        if (delegation == NULL)
                return 0;
        do {
+               if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+                       break;
                err = nfs_delegation_claim_opens(inode, &delegation->stateid);
                if (!issync || err != -EAGAIN)
                        break;
@@ -586,10 +594,23 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
        rcu_read_unlock();
 }
 
+static void nfs_revoke_delegation(struct inode *inode)
+{
+       struct nfs_delegation *delegation;
+       rcu_read_lock();
+       delegation = rcu_dereference(NFS_I(inode)->delegation);
+       if (delegation != NULL) {
+               set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
+               nfs_mark_return_delegation(NFS_SERVER(inode), delegation);
+       }
+       rcu_read_unlock();
+}
+
 void nfs_remove_bad_delegation(struct inode *inode)
 {
        struct nfs_delegation *delegation;
 
+       nfs_revoke_delegation(inode);
        delegation = nfs_inode_detach_delegation(inode);
        if (delegation) {
                nfs_inode_find_state_and_recover(inode, &delegation->stateid);
index 9a79c7a99d6d6dd64b03f58481dc8d0fb4b32818..e02b090ab9da86f6c5c3d7c8217b07bd72c282cc 100644 (file)
@@ -31,6 +31,7 @@ enum {
        NFS_DELEGATION_RETURN_IF_CLOSED,
        NFS_DELEGATION_REFERENCED,
        NFS_DELEGATION_RETURNING,
+       NFS_DELEGATION_REVOKED,
 };
 
 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
index 0bd7a55a5f073befd4d0ce97e87cca2dd0d42e37..725e87538c98ad71ae5f28ded813d9e61fb8a6d1 100644 (file)
@@ -180,6 +180,7 @@ static void nfs_direct_req_free(struct kref *kref)
 {
        struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
 
+       nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
        if (dreq->l_ctx != NULL)
                nfs_put_lock_context(dreq->l_ctx);
        if (dreq->ctx != NULL)
index 79872e22e4aeea8417713ecb40a5bd18e13a4310..e2bb3012d025755e085564fb7b276e8850705f85 100644 (file)
@@ -519,7 +519,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
 {
        struct inode *inode = dentry->d_inode;
        int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
-       int err;
+       int err = 0;
 
        /* Flush out writes to the server in order to update c/mtime.  */
        if (S_ISREG(inode->i_mode)) {
index 4a1aafba6a20030532ba589ac9db021b10c21ceb..8c34f57a9aef4280bbb594afbb3c3b0390fa8f78 100644 (file)
@@ -305,7 +305,10 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
                .rpc_argp       = &args,
                .rpc_resp       = &fattr,
        };
-       int status;
+       int status = 0;
+
+       if (acl == NULL && (!S_ISDIR(inode->i_mode) || dfacl == NULL))
+               goto out;
 
        status = -EOPNOTSUPP;
        if (!nfs_server_capable(inode, NFS_CAP_ACLS))
index 02773aab43c52a828ec890f5f56e4279f906d5b2..cc143ee7a56ed80695f93a1685d39aa1ed87f0f5 100644 (file)
@@ -311,6 +311,16 @@ int nfs40_walk_client_list(struct nfs_client *new,
 
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
+
+               if (pos->rpc_ops != new->rpc_ops)
+                       continue;
+
+               if (pos->cl_proto != new->cl_proto)
+                       continue;
+
+               if (pos->cl_minorversion != new->cl_minorversion)
+                       continue;
+
                /* If "pos" isn't marked ready, we can't trust the
                 * remaining fields in "pos" */
                if (pos->cl_cons_state > NFS_CS_READY) {
@@ -330,15 +340,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
                if (pos->cl_cons_state != NFS_CS_READY)
                        continue;
 
-               if (pos->rpc_ops != new->rpc_ops)
-                       continue;
-
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
-               if (pos->cl_minorversion != new->cl_minorversion)
-                       continue;
-
                if (pos->cl_clientid != new->cl_clientid)
                        continue;
 
@@ -444,6 +445,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
 
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
+
+               if (pos->rpc_ops != new->rpc_ops)
+                       continue;
+
+               if (pos->cl_proto != new->cl_proto)
+                       continue;
+
+               if (pos->cl_minorversion != new->cl_minorversion)
+                       continue;
+
                /* If "pos" isn't marked ready, we can't trust the
                 * remaining fields in "pos", especially the client
                 * ID and serverowner fields.  Wait for CREATE_SESSION
@@ -469,15 +480,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (pos->cl_cons_state != NFS_CS_READY)
                        continue;
 
-               if (pos->rpc_ops != new->rpc_ops)
-                       continue;
-
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
-               if (pos->cl_minorversion != new->cl_minorversion)
-                       continue;
-
                if (!nfs4_match_clientids(pos, new))
                        continue;
 
index bfeb1d13b08fe557b12fc314b2512298f5ab60e1..a4eaa40e7bdb50f9e1eadc87d006736fb20bfc6d 100644 (file)
@@ -1416,7 +1416,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
                        nfs_inode_find_state_and_recover(state->inode,
                                        stateid);
                        nfs4_schedule_stateid_recovery(server, state);
-                       return 0;
+                       return -EAGAIN;
                case -NFS4ERR_DELAY:
                case -NFS4ERR_GRACE:
                        set_bit(NFS_DELEGATED_STATE, &state->flags);
@@ -1845,6 +1845,28 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
        return ret;
 }
 
+static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
+{
+       nfs_remove_bad_delegation(state->inode);
+       write_seqlock(&state->seqlock);
+       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+       write_sequnlock(&state->seqlock);
+       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+}
+
+static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
+{
+       if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
+               nfs_finish_clear_delegation_stateid(state);
+}
+
+static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+       /* NFSv4.0 doesn't allow for delegation recovery on open expire */
+       nfs40_clear_delegation_stateid(state);
+       return nfs4_open_expired(sp, state);
+}
+
 #if defined(CONFIG_NFS_V4_1)
 static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
 {
@@ -2287,6 +2309,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
        struct nfs4_closedata *calldata = data;
        struct nfs4_state *state = calldata->state;
        struct inode *inode = calldata->inode;
+       bool is_rdonly, is_wronly, is_rdwr;
        int call_close = 0;
 
        dprintk("%s: begin!\n", __func__);
@@ -2294,21 +2317,27 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                goto out_wait;
 
        task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
-       calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
        spin_lock(&state->owner->so_lock);
+       is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
+       is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
+       is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
        /* Calculate the change in open mode */
+       calldata->arg.fmode = 0;
        if (state->n_rdwr == 0) {
-               if (state->n_rdonly == 0) {
-                       call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
-                       call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
-                       calldata->arg.fmode &= ~FMODE_READ;
-               }
-               if (state->n_wronly == 0) {
-                       call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
-                       call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
-                       calldata->arg.fmode &= ~FMODE_WRITE;
-               }
-       }
+               if (state->n_rdonly == 0)
+                       call_close |= is_rdonly;
+               else if (is_rdonly)
+                       calldata->arg.fmode |= FMODE_READ;
+               if (state->n_wronly == 0)
+                       call_close |= is_wronly;
+               else if (is_wronly)
+                       calldata->arg.fmode |= FMODE_WRITE;
+       } else if (is_rdwr)
+               calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
+
+       if (calldata->arg.fmode == 0)
+               call_close |= is_rdwr;
+
        if (!nfs4_valid_open_stateid(state))
                call_close = 0;
        spin_unlock(&state->owner->so_lock);
@@ -6060,7 +6089,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
        int ret = 0;
 
        if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
-               return 0;
+               return -EAGAIN;
        task = _nfs41_proc_sequence(clp, cred, false);
        if (IS_ERR(task))
                ret = PTR_ERR(task);
@@ -6967,7 +6996,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
        .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
        .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
-       .recover_open   = nfs4_open_expired,
+       .recover_open   = nfs40_open_expired,
        .recover_lock   = nfs4_lock_expired,
        .establish_clid = nfs4_init_clientid,
        .get_clid_cred  = nfs4_get_setclientid_cred,
index 1720d32ffa545670398d16e077a094aae781528b..e1ba58c3d1ad305ab28d932a5b90ac269092f98b 100644 (file)
@@ -88,10 +88,18 @@ nfs4_renew_state(struct work_struct *work)
                        }
                        nfs_expire_all_delegations(clp);
                } else {
+                       int ret;
+
                        /* Queue an asynchronous RENEW. */
-                       ops->sched_state_renewal(clp, cred, renew_flags);
+                       ret = ops->sched_state_renewal(clp, cred, renew_flags);
                        put_rpccred(cred);
-                       goto out_exp;
+                       switch (ret) {
+                       default:
+                               goto out_exp;
+                       case -EAGAIN:
+                       case -ENOMEM:
+                               break;
+                       }
                }
        } else {
                dprintk("%s: failed to call renewd. Reason: lease not expired \n",
index 2c37442ed9369c7bccdba4f75797dcea314357b7..d482b86d0e0bcfff249d5bb57517632152cc01a4 100644 (file)
@@ -1699,7 +1699,8 @@ restart:
                        if (status < 0) {
                                set_bit(ops->owner_flag_bit, &sp->so_flags);
                                nfs4_put_state_owner(sp);
-                               return nfs4_recovery_handle_error(clp, status);
+                               status = nfs4_recovery_handle_error(clp, status);
+                               return (status != 0) ? status : -EAGAIN;
                        }
 
                        nfs4_put_state_owner(sp);
@@ -1708,7 +1709,7 @@ restart:
                spin_unlock(&clp->cl_lock);
        }
        rcu_read_unlock();
-       return status;
+       return 0;
 }
 
 static int nfs4_check_lease(struct nfs_client *clp)
@@ -1755,7 +1756,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
                break;
        case -NFS4ERR_STALE_CLIENTID:
                clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
-               nfs4_state_clear_reclaim_reboot(clp);
                nfs4_state_start_reclaim_reboot(clp);
                break;
        case -NFS4ERR_CLID_INUSE:
@@ -2174,14 +2174,11 @@ static void nfs4_state_manager(struct nfs_client *clp)
                        section = "reclaim reboot";
                        status = nfs4_do_reclaim(clp,
                                clp->cl_mvops->reboot_recovery_ops);
-                       if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
-                           test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
-                               continue;
-                       nfs4_state_end_reclaim_reboot(clp);
-                       if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
+                       if (status == -EAGAIN)
                                continue;
                        if (status < 0)
                                goto out_error;
+                       nfs4_state_end_reclaim_reboot(clp);
                }
 
                /* Now recover expired state... */
@@ -2189,9 +2186,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
                        section = "reclaim nograce";
                        status = nfs4_do_reclaim(clp,
                                clp->cl_mvops->nograce_recovery_ops);
-                       if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
-                           test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
-                           test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
+                       if (status == -EAGAIN)
                                continue;
                        if (status < 0)
                                goto out_error;
index 3eaa6e30a2dc701a1a179c292c32c168942b6898..cc8c5b32043cb9f221a739957a95a4e6fd71f78b 100644 (file)
@@ -672,7 +672,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
                clp->cl_cb_session = ses;
                args.bc_xprt = conn->cb_xprt;
                args.prognumber = clp->cl_cb_session->se_cb_prog;
-               args.protocol = XPRT_TRANSPORT_BC_TCP;
+               args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
+                               XPRT_TRANSPORT_BC;
                args.authflavor = ses->se_cb_sec.flavor;
        }
        /* Create RPC client */
index 0f9ce13972d03cc30c57816fbcbf666b5150e57a..9240dd1678da6ee3890ad6651288764d64c2a29d 100644 (file)
@@ -1191,7 +1191,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
         */
        if (argp->opcnt == resp->opcnt)
                return false;
-
+       if (next->opnum == OP_ILLEGAL)
+               return false;
        nextd = OPDESC(next);
        /*
         * Rest of 2.6.3.1.1: certain operations will return WRONGSEC
index 262df5ccbf59db0c4fd516a30fa721945b55f600..8016892f3f052d5c3763a4e7ce2fdbc461858646 100644 (file)
@@ -220,7 +220,8 @@ static int nfsd_startup_generic(int nrservs)
         */
        ret = nfsd_racache_init(2*nrservs);
        if (ret)
-               return ret;
+               goto dec_users;
+
        ret = nfs4_state_start();
        if (ret)
                goto out_racache;
@@ -228,6 +229,8 @@ static int nfsd_startup_generic(int nrservs)
 
 out_racache:
        nfsd_racache_shutdown();
+dec_users:
+       nfsd_users--;
        return ret;
 }
 
index bccfec8343c5ee34925cea97b8fc4006f8265084..2e1372efbb00ede1c899ac8d9941a2ca1bac86aa 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/buffer_head.h>
 #include <linux/gfp.h>
 #include <linux/mpage.h>
+#include <linux/pagemap.h>
 #include <linux/writeback.h>
 #include <linux/aio.h>
 #include "nilfs.h"
@@ -219,10 +220,10 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 
 static int nilfs_set_page_dirty(struct page *page)
 {
+       struct inode *inode = page->mapping->host;
        int ret = __set_page_dirty_nobuffers(page);
 
        if (page_has_buffers(page)) {
-               struct inode *inode = page->mapping->host;
                unsigned nr_dirty = 0;
                struct buffer_head *bh, *head;
 
@@ -245,6 +246,10 @@ static int nilfs_set_page_dirty(struct page *page)
 
                if (nr_dirty)
                        nilfs_set_file_dirty(inode, nr_dirty);
+       } else if (ret) {
+               unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+
+               nilfs_set_file_dirty(inode, nr_dirty);
        }
        return ret;
 }
index f1680cdbd88bcecc78fa3565a89e99b061002d05..9be6b4163406fe216dc699355e4e0b45e9e16df8 100644 (file)
@@ -69,7 +69,7 @@ static int create_fd(struct fsnotify_group *group,
 
        pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 
-       client_fd = get_unused_fd();
+       client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
        if (client_fd < 0)
                return client_fd;
 
index 238a5930cb3c7d16e1c76952e66c6bf24f5299ae..9d7e2b9659cbdf2687e26bbbf0dc784ba7cced6d 100644 (file)
@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
 {
        struct {
                struct file_handle handle;
-               u8 pad[64];
+               u8 pad[MAX_HANDLE_SZ];
        } f;
        int size, ret, i;
 
@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
        size = f.handle.handle_bytes >> 2;
 
        ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
-       if ((ret == 255) || (ret == -ENOSPC)) {
+       if ((ret == FILEID_INVALID) || (ret < 0)) {
                WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
                return 0;
        }
index 33ecbe0e6734a7deaf0c8712934b9eb78fb44197..2b941113e42332b3e914f386e5318f690c69e6d6 100644 (file)
@@ -653,12 +653,9 @@ void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
        clear_bit(bit, res->refmap);
 }
 
-
-void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
                                   struct dlm_lock_resource *res)
 {
-       assert_spin_locked(&res->spinlock);
-
        res->inflight_locks++;
 
        mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
@@ -666,6 +663,13 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
             __builtin_return_address(0));
 }
 
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
+       __dlm_lockres_grab_inflight_ref(dlm, res);
+}
+
 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
                                   struct dlm_lock_resource *res)
 {
@@ -855,10 +859,8 @@ lookup:
        /* finally add the lockres to its hash bucket */
        __dlm_insert_lockres(dlm, res);
 
-       /* Grab inflight ref to pin the resource */
-       spin_lock(&res->spinlock);
-       dlm_lockres_grab_inflight_ref(dlm, res);
-       spin_unlock(&res->spinlock);
+       /* since this lockres is new it doesn't not require the spinlock */
+       __dlm_lockres_grab_inflight_ref(dlm, res);
 
        /* get an extra ref on the mle in case this is a BLOCK
         * if so, the creator of the BLOCK may try to put the last
index cbd0f1b324b972b96f036139fcd96bf53a03fb01..09f0d9c374a32dd79d1fc10f5087510915db8a97 100644 (file)
@@ -304,15 +304,11 @@ static void render_cap_t(struct seq_file *m, const char *header,
        seq_puts(m, header);
        CAP_FOR_EACH_U32(__capi) {
                seq_printf(m, "%08x",
-                          a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
+                          a->cap[CAP_LAST_U32 - __capi]);
        }
        seq_putc(m, '\n');
 }
 
-/* Remove non-existent capabilities */
-#define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \
-                               CAP_TO_MASK(CAP_LAST_CAP + 1) - 1)
-
 static inline void task_cap(struct seq_file *m, struct task_struct *p)
 {
        const struct cred *cred;
@@ -326,11 +322,6 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
        cap_bset        = cred->cap_bset;
        rcu_read_unlock();
 
-       NORM_CAPS(cap_inheritable);
-       NORM_CAPS(cap_permitted);
-       NORM_CAPS(cap_effective);
-       NORM_CAPS(cap_bset);
-
        render_cap_t(m, "CapInh:\t", &cap_inheritable);
        render_cap_t(m, "CapPrm:\t", &cap_permitted);
        render_cap_t(m, "CapEff:\t", &cap_effective);
index e4bcb2cf055a1dc8f81df7cbb38e8cdab4add0b0..3ba30825f387d847c04054f85608f735458e811b 100644 (file)
@@ -316,10 +316,10 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
                sprintf(name, "dmesg-%s-%lld", psname, id);
                break;
        case PSTORE_TYPE_CONSOLE:
-               sprintf(name, "console-%s", psname);
+               sprintf(name, "console-%s-%lld", psname, id);
                break;
        case PSTORE_TYPE_FTRACE:
-               sprintf(name, "ftrace-%s", psname);
+               sprintf(name, "ftrace-%s-%lld", psname, id);
                break;
        case PSTORE_TYPE_MCE:
                sprintf(name, "mce-%s-%lld", psname, id);
index 38802d683969ed95b5177ca475fdea517bd7e1aa..7a10e047bc33bd039e0dbe7b82081254c47b64b4 100644 (file)
@@ -637,7 +637,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
                        dqstats_inc(DQST_LOOKUPS);
                        err = sb->dq_op->write_dquot(dquot);
                        if (!ret && err)
-                               err = ret;
+                               ret = err;
                        dqput(dquot);
                        spin_lock(&dq_list_lock);
                }
index 3dd44db1465e9a55c7e7eb9cb0c39e5a6bc429ff..d84ca5932c73a3fea1883e369bd4d5f729a6c8fe 100644 (file)
@@ -8,8 +8,10 @@
 #include <linux/fs.h>
 #include <linux/export.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/cred.h>
+#include <linux/mm.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -30,6 +32,16 @@ static void seq_set_overflow(struct seq_file *m)
        m->count = m->size;
 }
 
+static void *seq_buf_alloc(unsigned long size)
+{
+       void *buf;
+
+       buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+       if (!buf && size > PAGE_SIZE)
+               buf = vmalloc(size);
+       return buf;
+}
+
 /**
  *     seq_open -      initialize sequential file
  *     @file: file we initialize
@@ -96,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset)
                return 0;
        }
        if (!m->buf) {
-               m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+               m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
                if (!m->buf)
                        return -ENOMEM;
        }
@@ -135,8 +147,8 @@ static int traverse(struct seq_file *m, loff_t offset)
 
 Eoverflow:
        m->op->stop(m, p);
-       kfree(m->buf);
-       m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+       kvfree(m->buf);
+       m->buf = seq_buf_alloc(m->size <<= 1);
        return !m->buf ? -ENOMEM : -EAGAIN;
 }
 
@@ -191,7 +203,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
 
        /* grab buffer if we didn't have one */
        if (!m->buf) {
-               m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+               m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
                if (!m->buf)
                        goto Enomem;
        }
@@ -231,8 +243,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
                if (m->count < m->size)
                        goto Fill;
                m->op->stop(m, p);
-               kfree(m->buf);
-               m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+               kvfree(m->buf);
+               m->buf = seq_buf_alloc(m->size <<= 1);
                if (!m->buf)
                        goto Enomem;
                m->count = 0;
@@ -349,7 +361,7 @@ EXPORT_SYMBOL(seq_lseek);
 int seq_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = file->private_data;
-       kfree(m->buf);
+       kvfree(m->buf);
        kfree(m);
        return 0;
 }
@@ -604,13 +616,13 @@ EXPORT_SYMBOL(single_open);
 int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
                void *data, size_t size)
 {
-       char *buf = kmalloc(size, GFP_KERNEL);
+       char *buf = seq_buf_alloc(size);
        int ret;
        if (!buf)
                return -ENOMEM;
        ret = single_open(file, show, data);
        if (ret) {
-               kfree(buf);
+               kvfree(buf);
                return ret;
        }
        ((struct seq_file *)file->private_data)->buf = buf;
index 68307c029228c51efc0e7967c90c39edd1a85113..e028b508db253ea3a134325fc5f8d2984554c1d9 100644 (file)
@@ -76,6 +76,8 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
 
        total_objects = sb->s_nr_dentry_unused +
                        sb->s_nr_inodes_unused + fs_objects + 1;
+       if (!total_objects)
+               total_objects = 1;
 
        if (sc->nr_to_scan) {
                int     dentries;
index ff8229340cd537286fb612efa7041fd125378617..26b69b2d4a452488405f7ccbc2a4f179c1961bbf 100644 (file)
@@ -164,17 +164,12 @@ static int do_commit(struct ubifs_info *c)
        if (err)
                goto out;
        err = ubifs_orphan_end_commit(c);
-       if (err)
-               goto out;
-       old_ltail_lnum = c->ltail_lnum;
-       err = ubifs_log_end_commit(c, new_ltail_lnum);
        if (err)
                goto out;
        err = dbg_check_old_index(c, &zroot);
        if (err)
                goto out;
 
-       mutex_lock(&c->mst_mutex);
        c->mst_node->cmt_no      = cpu_to_le64(c->cmt_no);
        c->mst_node->log_lnum    = cpu_to_le32(new_ltail_lnum);
        c->mst_node->root_lnum   = cpu_to_le32(zroot.lnum);
@@ -203,8 +198,9 @@ static int do_commit(struct ubifs_info *c)
                c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
        else
                c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
-       err = ubifs_write_master(c);
-       mutex_unlock(&c->mst_mutex);
+
+       old_ltail_lnum = c->ltail_lnum;
+       err = ubifs_log_end_commit(c, new_ltail_lnum);
        if (err)
                goto out;
 
index 36bd4efd0819e96ee299acd030f24fa7113e4fb2..06649d21b056408398623351d7535a1043a9cf9f 100644 (file)
@@ -106,10 +106,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
        h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
        t = (long long)c->ltail_lnum * c->leb_size;
 
-       if (h >= t)
+       if (h > t)
                return c->log_bytes - h + t;
-       else
+       else if (h != t)
                return t - h;
+       else if (c->lhead_lnum != c->ltail_lnum)
+               return 0;
+       else
+               return c->log_bytes;
 }
 
 /**
@@ -447,9 +451,9 @@ out:
  * @ltail_lnum: new log tail LEB number
  *
  * This function is called on when the commit operation was finished. It
- * moves log tail to new position and unmaps LEBs which contain obsolete data.
- * Returns zero in case of success and a negative error code in case of
- * failure.
+ * moves log tail to new position and updates the master node so that it stores
+ * the new log tail LEB number. Returns zero in case of success and a negative
+ * error code in case of failure.
  */
 int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
 {
@@ -477,7 +481,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
        spin_unlock(&c->buds_lock);
 
        err = dbg_check_bud_bytes(c);
+       if (err)
+               goto out;
 
+       err = ubifs_write_master(c);
+
+out:
        mutex_unlock(&c->log_mutex);
        return err;
 }
index ab83ace9910a0964544a41c7d289c84c2b075057..1a4bb9e8b3b8925b57be7a6c0a62c1675ddb9bac 100644 (file)
@@ -352,10 +352,9 @@ int ubifs_read_master(struct ubifs_info *c)
  * ubifs_write_master - write master node.
  * @c: UBIFS file-system description object
  *
- * This function writes the master node. The caller has to take the
- * @c->mst_mutex lock before calling this function. Returns zero in case of
- * success and a negative error code in case of failure. The master node is
- * written twice to enable recovery.
+ * This function writes the master node. Returns zero in case of success and a
+ * negative error code in case of failure. The master node is written twice to
+ * enable recovery.
  */
 int ubifs_write_master(struct ubifs_info *c)
 {
index 879b9976c12bf9ab7cc841a1930e0c9b3df41446..05115d719408f73085a09a1ef752839ceca50d19 100644 (file)
@@ -1970,7 +1970,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
                mutex_init(&c->lp_mutex);
                mutex_init(&c->tnc_mutex);
                mutex_init(&c->log_mutex);
-               mutex_init(&c->mst_mutex);
                mutex_init(&c->umount_mutex);
                mutex_init(&c->bu_mutex);
                mutex_init(&c->write_reserve_mutex);
index b2babce4d70f21845778dbb82a0b2b83fbb97d75..bd51277f6fe1956c13d90a081f3c92009b497e2b 100644 (file)
@@ -1042,7 +1042,6 @@ struct ubifs_debug_info;
  *
  * @mst_node: master node
  * @mst_offs: offset of valid master node
- * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
  *
  * @max_bu_buf_len: maximum bulk-read buffer length
  * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
@@ -1282,7 +1281,6 @@ struct ubifs_info {
 
        struct ubifs_mst_node *mst_node;
        int mst_offs;
-       struct mutex mst_mutex;
 
        int max_bu_buf_len;
        struct mutex bu_mutex;
index b6d15d349810fe5ca21649208bd86d220caf338c..aa023283cc8a2967e006fc6d9dbeb17c3a578b39 100644 (file)
@@ -1270,13 +1270,22 @@ update_time:
        return 0;
 }
 
+/*
+ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
+ * arbitrary - just that we hopefully don't limit any real use of rewritten
+ * inode on write-once media but avoid looping for too long on corrupted media.
+ */
+#define UDF_MAX_ICB_NESTING 1024
+
 static void __udf_read_inode(struct inode *inode)
 {
        struct buffer_head *bh = NULL;
        struct fileEntry *fe;
        uint16_t ident;
        struct udf_inode_info *iinfo = UDF_I(inode);
+       unsigned int indirections = 0;
 
+reread:
        /*
         * Set defaults, but the inode is still incomplete!
         * Note: get_new_inode() sets the following on a new inode:
@@ -1313,28 +1322,26 @@ static void __udf_read_inode(struct inode *inode)
                ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
                                        &ident);
                if (ident == TAG_IDENT_IE && ibh) {
-                       struct buffer_head *nbh = NULL;
                        struct kernel_lb_addr loc;
                        struct indirectEntry *ie;
 
                        ie = (struct indirectEntry *)ibh->b_data;
                        loc = lelb_to_cpu(ie->indirectICB.extLocation);
 
-                       if (ie->indirectICB.extLength &&
-                               (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
-                                                       &ident))) {
-                               if (ident == TAG_IDENT_FE ||
-                                       ident == TAG_IDENT_EFE) {
-                                       memcpy(&iinfo->i_location,
-                                               &loc,
-                                               sizeof(struct kernel_lb_addr));
-                                       brelse(bh);
-                                       brelse(ibh);
-                                       brelse(nbh);
-                                       __udf_read_inode(inode);
+                       if (ie->indirectICB.extLength) {
+                               brelse(bh);
+                               brelse(ibh);
+                               memcpy(&iinfo->i_location, &loc,
+                                      sizeof(struct kernel_lb_addr));
+                               if (++indirections > UDF_MAX_ICB_NESTING) {
+                                       udf_err(inode->i_sb,
+                                               "too many ICBs in ICB hierarchy"
+                                               " (max %d supported)\n",
+                                               UDF_MAX_ICB_NESTING);
+                                       make_bad_inode(inode);
                                        return;
                                }
-                               brelse(nbh);
+                               goto reread;
                        }
                }
                brelse(ibh);
index 41a695048be7b09b87baf4517fd8124b6d7a8ffb..cfbb4c1b2f17d1d415791c65083ae558a86b0e96 100644 (file)
@@ -1661,11 +1661,72 @@ xfs_vm_readpages(
        return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
 }
 
+/*
+ * This is basically a copy of __set_page_dirty_buffers() with one
+ * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
+ * dirty, we'll never be able to clean them because we don't write buffers
+ * beyond EOF, and that means we can't invalidate pages that span EOF
+ * that have been marked dirty. Further, the dirty state can leak into
+ * the file interior if the file is extended, resulting in all sorts of
+ * bad things happening as the state does not match the underlying data.
+ *
+ * XXX: this really indicates that bufferheads in XFS need to die. Warts like
+ * this only exist because of bufferheads and how the generic code manages them.
+ */
+STATIC int
+xfs_vm_set_page_dirty(
+       struct page             *page)
+{
+       struct address_space    *mapping = page->mapping;
+       struct inode            *inode = mapping->host;
+       loff_t                  end_offset;
+       loff_t                  offset;
+       int                     newly_dirty;
+
+       if (unlikely(!mapping))
+               return !TestSetPageDirty(page);
+
+       end_offset = i_size_read(inode);
+       offset = page_offset(page);
+
+       spin_lock(&mapping->private_lock);
+       if (page_has_buffers(page)) {
+               struct buffer_head *head = page_buffers(page);
+               struct buffer_head *bh = head;
+
+               do {
+                       if (offset < end_offset)
+                               set_buffer_dirty(bh);
+                       bh = bh->b_this_page;
+                       offset += 1 << inode->i_blkbits;
+               } while (bh != head);
+       }
+       newly_dirty = !TestSetPageDirty(page);
+       spin_unlock(&mapping->private_lock);
+
+       if (newly_dirty) {
+               /* sigh - __set_page_dirty() is static, so copy it here, too */
+               unsigned long flags;
+
+               spin_lock_irqsave(&mapping->tree_lock, flags);
+               if (page->mapping) {    /* Race with truncate? */
+                       WARN_ON_ONCE(!PageUptodate(page));
+                       account_page_dirtied(page, mapping);
+                       radix_tree_tag_set(&mapping->page_tree,
+                                       page_index(page), PAGECACHE_TAG_DIRTY);
+               }
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
+               __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+       }
+       return newly_dirty;
+}
+
 const struct address_space_operations xfs_address_space_operations = {
        .readpage               = xfs_vm_readpage,
        .readpages              = xfs_vm_readpages,
        .writepage              = xfs_vm_writepage,
        .writepages             = xfs_vm_writepages,
+       .set_page_dirty         = xfs_vm_set_page_dirty,
        .releasepage            = xfs_vm_releasepage,
        .invalidatepage         = xfs_vm_invalidatepage,
        .write_begin            = xfs_vm_write_begin,
index 044e97a33c8d0a155f1ea026a20881f95dd95274..bac3e1635b7d3ba53a0f71914b4db61f951e8b24 100644 (file)
@@ -1104,7 +1104,8 @@ xfs_qm_dqflush(
         * Get the buffer containing the on-disk dquot
         */
        error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
-                                  mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
+                                  mp->m_quotainfo->qi_dqchunklen, 0, &bp,
+                                  &xfs_dquot_buf_ops);
        if (error)
                goto out_unlock;
 
index a5f2042aec8b27e730f0cbdedaef9eb50c9422f0..9f457fedbcfcc996b5b84bd9e08d084e9f8cd9bd 100644 (file)
@@ -298,7 +298,16 @@ xfs_file_aio_read(
                                xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
                                return ret;
                        }
-                       truncate_pagecache_range(VFS_I(ip), pos, -1);
+
+                       /*
+                        * Invalidate whole pages. This can return an error if
+                        * we fail to invalidate a page, but this should never
+                        * happen on XFS. Warn if it does fail.
+                        */
+                       ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
+                                               pos >> PAGE_CACHE_SHIFT, -1);
+                       WARN_ON_ONCE(ret);
+                       ret = 0;
                }
                xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
        }
@@ -677,7 +686,15 @@ xfs_file_dio_aio_write(
                                                    pos, -1);
                if (ret)
                        goto out;
-               truncate_pagecache_range(VFS_I(ip), pos, -1);
+               /*
+                * Invalidate whole pages. This can return an error if
+                * we fail to invalidate a page, but this should never
+                * happen on XFS. Warn if it does fail.
+                */
+               ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
+                                               pos >> PAGE_CACHE_SHIFT, -1);
+               WARN_ON_ONCE(ret);
+               ret = 0;
        }
 
        /*
index b75c9bb6e71e34b0c65158f63ba475a217d9e347..29d1ca567ed335eb6c3bc837e772eec70df9dfd4 100644 (file)
@@ -935,6 +935,12 @@ xfs_qm_dqiter_bufs(
                if (error)
                        break;
 
+               /*
+                * A corrupt buffer might not have a verifier attached, so
+                * make sure we have the correct one attached before writeback
+                * occurs.
+                */
+               bp->b_ops = &xfs_dquot_buf_ops;
                xfs_qm_reset_dqcounts(mp, bp, firstid, type);
                xfs_buf_delwri_queue(bp, buffer_list);
                xfs_buf_relse(bp);
@@ -1018,7 +1024,7 @@ xfs_qm_dqiterate(
                                        xfs_buf_readahead(mp->m_ddev_targp,
                                               XFS_FSB_TO_DADDR(mp, rablkno),
                                               mp->m_quotainfo->qi_dqchunklen,
-                                              NULL);
+                                              &xfs_dquot_buf_ops);
                                        rablkno++;
                                }
                        }
index 639d7a4d033bc2a0dc2413003d9209540504ebe3..01613b382b0e37a14c0339daf32fd574d2fd81a0 100644 (file)
 #define read_barrier_depends()         do {} while (0)
 #define smp_read_barrier_depends()     do {} while (0)
 
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       smp_mb();                                                       \
+       ACCESS_ONCE(*p) = (v);                                          \
+} while (0)
+
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
+       compiletime_assert_atomic_type(*p);                             \
+       smp_mb();                                                       \
+       ___p1;                                                          \
+})
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
new file mode 100644 (file)
index 0000000..663ac3d
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * include/asm-generic/seccomp.h
+ *
+ * Copyright (C) 2014 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi <at> linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_GENERIC_SECCOMP_H
+#define _ASM_GENERIC_SECCOMP_H
+
+#include <asm-generic/unistd.h>
+
+#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32)
+#define __NR_seccomp_read_32           __NR_read
+#define __NR_seccomp_write_32          __NR_write
+#define __NR_seccomp_exit_32           __NR_exit
+#define __NR_seccomp_sigreturn_32      __NR_rt_sigreturn
+#endif /* CONFIG_COMPAT && ! already defined */
+
+#define __NR_seccomp_read              __NR_read
+#define __NR_seccomp_write             __NR_write
+#define __NR_seccomp_exit              __NR_exit
+#define __NR_seccomp_sigreturn         __NR_rt_sigreturn
+
+#endif /* _ASM_GENERIC_SECCOMP_H */
+
index 5b09392db6734f6ddab3f01e58ee526a65ae5e6b..d401e5463fb02daae31c1ad34013c1690937cbda 100644 (file)
@@ -144,8 +144,6 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
 
 /**
  * syscall_get_arch - return the AUDIT_ARCH for the current system call
- * @task:      task of interest, must be in system call entry tracing
- * @regs:      task_pt_regs() of @task
  *
  * Returns the AUDIT_ARCH_* based on the system call convention in use.
  *
@@ -155,5 +153,5 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
  * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
  * provide an implementation of this.
  */
-int syscall_get_arch(struct task_struct *task, struct pt_regs *regs);
+int syscall_get_arch(void);
 #endif /* _ASM_SYSCALL_H */
index ecaef57f9f6cb6b280685083467e5abb22696c07..d7b717090f2f294177e3b8ccb1eba77d70300f94 100644 (file)
@@ -52,7 +52,6 @@
        {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
-       {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
        {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
        {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
        {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/dt-bindings/clock/rockchip,rk3368.h b/include/dt-bindings/clock/rockchip,rk3368.h
new file mode 100644 (file)
index 0000000..7b095f7
--- /dev/null
@@ -0,0 +1,262 @@
+#ifndef _DT_BINDINGS_CLOCK_ROCKCHIP_RK3368_H
+#define _DT_BINDINGS_CLOCK_ROCKCHIP_RK3368_H
+
+#include "rockchip.h"
+
+/* reset id */
+#define RK3368_SRST_CORE_B_0_SC         0
+#define RK3368_SRST_CORE_B_1            1
+#define RK3368_SRST_CORE_B_2            2
+#define RK3368_SRST_CORE_B_3            3
+#define RK3368_SRST_CORE_B_PO0_SC       4
+#define RK3368_SRST_CORE_B_PO1          5
+#define RK3368_SRST_CORE_B_PO2          6
+#define RK3368_SRST_CORE_B_PO3          7
+#define RK3368_SRST_L2_B_SC             8
+#define RK3368_SRST_ADB_B_SC            9
+#define RK3368_SRST_PD_CORE_B_NIU       10
+#define RK3368_SRST_STRC_SYS_A_SC       11
+#define RK3368_SRST_0RES12              12
+#define RK3368_SRST_0RES13              13
+#define RK3368_SRST_SOCDBG_B            14
+#define RK3368_SRST_CORE_B_DBG          15
+
+#define RK3368_SRST_1RES0               16
+#define RK3368_SRST_1RES1               17
+#define RK3368_SRST_DMA1                18
+#define RK3368_SRST_INTMEM              19
+#define RK3368_SRST_ROM                 20
+#define RK3368_SRST_SPDIF_8CH           21
+#define RK3368_SRST_1RES6               22
+#define RK3368_SRST_I2S                 23
+#define RK3368_SRST_MAILBOX             24
+#define RK3368_SRST_I2S_2CH             25
+#define RK3368_SRST_EFUSE_256_P         26
+#define RK3368_SRST_1RES11              27
+#define RK3368_SRST_MCU_SYS             28
+#define RK3368_SRST_MCU_PO              29
+#define RK3368_SRST_MCU_NOC_H           30
+#define RK3368_SRST_EFUSE_P             31
+
+#define RK3368_SRST_GPIO0               32
+#define RK3368_SRST_GPIO1               33
+#define RK3368_SRST_GPIO2               34
+#define RK3368_SRST_GPIO3               35
+#define RK3368_SRST_GPIO4               36
+#define RK3368_SRST_2RES5               37
+#define RK3368_SRST_2RES6               38
+#define RK3368_SRST_2RES7               39
+#define RK3368_SRST_2RES8               40
+#define RK3368_SRST_PMUGRF_P            41
+#define RK3368_SRST_I2C0                42
+#define RK3368_SRST_I2C1                43
+#define RK3368_SRST_I2C2                44
+#define RK3368_SRST_I2C3                45
+#define RK3368_SRST_I2C4                46
+#define RK3368_SRST_I2C5                47
+
+#define RK3368_SRST_DW_PWM              48
+#define RK3368_SRST_MMC_PERI            49
+#define RK3368_SRST_PERIPH_MMU          50
+#define RK3368_SRST_3RES3               51
+#define RK3368_SRST_3RES4               52
+#define RK3368_SRST_3RES5               53
+#define RK3368_SRST_3RES6               54
+#define RK3368_SRST_GRF                 55
+#define RK3368_SRST_PMU                 56
+#define RK3368_SRST_PERIPH_SYS_A        57
+#define RK3368_SRST_PERIPH_SYS_H        58
+#define RK3368_SRST_PERIPH_SYS_P        59
+#define RK3368_SRST_PERIPH_NIU          60
+#define RK3368_SRST_PD_PERI_AHB_ARBITOR 61
+#define RK3368_SRST_EMEM_PERI           62
+#define RK3368_SRST_USB_PERI            63
+
+#define RK3368_SRST_DMA2                64
+#define RK3368_SRST_4RES1               65
+#define RK3368_SRST_MAC                 66
+#define RK3368_SRST_GPS                 67
+#define RK3368_SRST_4RES4               68
+#define RK3368_SRST_RK_PWM              69
+#define RK3368_SRST_4RES6               70
+#define RK3368_SRST_4RES7               71
+#define RK3368_SRST_HOST0_H             72
+#define RK3368_SRST_HSIC                73
+#define RK3368_SRST_HSIC_AUX            74
+#define RK3368_SRST_HSICPHY             75
+#define RK3368_SRST_HSADC_H             76
+#define RK3368_SRST_NANDC0              77
+#define RK3368_SRST_4RES14              78
+#define RK3368_SRST_SFC                 79
+
+#define RK3368_SRST_5RES0               80
+#define RK3368_SRST_5RES1               81
+#define RK3368_SRST_5RES2               82
+#define RK3368_SRST_SPI0                83
+#define RK3368_SRST_SPI1                84
+#define RK3368_SRST_SPI2                85
+#define RK3368_SRST_5RES6               86
+#define RK3368_SRST_SARADC              87
+#define RK3368_SRST_PD_ALIVE_NIU_P      88
+#define RK3368_SRST_PD_PMU_INTMEM_P     89
+#define RK3368_SRST_PD_PMU_NIU_P        90
+#define RK3368_SRST_SGRF_P              91
+#define RK3368_SRST_5RES12              92
+#define RK3368_SRST_5RES13              93
+#define RK3368_SRST_5RES14              94
+#define RK3368_SRST_5RES15              95
+
+#define RK3368_SRST_VIO_ARBI_H          96
+#define RK3368_SRST_RGA_NIU_A           97
+#define RK3368_SRST_VIO0_NIU_A          98
+#define RK3368_SRST_VIO0_BUS_H          99
+#define RK3368_SRST_LCDC0_A             100
+#define RK3368_SRST_LCDC0_H             101
+#define RK3368_SRST_LCDC0_D             102
+#define RK3368_SRST_6RES7               103
+#define RK3368_SRST_VIP                 104
+#define RK3368_SRST_RGA_CORE            105
+#define RK3368_SRST_IEP_A               106
+#define RK3368_SRST_IEP_H               107
+#define RK3368_SRST_RGA_A               108
+#define RK3368_SRST_RGA_H               109
+#define RK3368_SRST_ISP                 110
+#define RK3368_SRST_EDP_24M             111
+
+#define RK3368_SRST_VIDEO_A             112
+#define RK3368_SRST_VIDEO_H             113
+#define RK3368_SRST_MIPIDPHYTX_P        114
+#define RK3368_SRST_MIPIDSI0_P          115
+#define RK3368_SRST_MIPIDPHYRX_P        116
+#define RK3368_SRST_MIPICSI_P           117
+#define RK3368_SRST_7RES6               118
+#define RK3368_SRST_7RES7               119
+#define RK3368_SRST_GPU_CORE            120
+#define RK3368_SRST_HDMI                121
+#define RK3368_SRST_EDP_P               122
+#define RK3368_SRST_PMU_PVTM            123
+#define RK3368_SRST_CORE_PVTM           124
+#define RK3368_SRST_GPU_PVTM            125
+#define RK3368_SRST_GPU_SYS_A           126
+#define RK3368_SRST_GPU_MEM_NIU_A       127
+
+#define RK3368_SRST_MMC0                128
+#define RK3368_SRST_SDIO0               129
+#define RK3368_SRST_8RES2               130
+#define RK3368_SRST_EMMC                131
+#define RK3368_SRST_USBOTG0_H           132
+#define RK3368_SRST_USBOTGPHY0          133
+#define RK3368_SRST_USBOTGC0            134
+#define RK3368_SRST_USBHOSTC0_H         135
+#define RK3368_SRST_USBOTGPHY1          136
+#define RK3368_SRST_USBHOSTC0           137
+#define RK3368_SRST_USBPHY0_UTMI        138
+#define RK3368_SRST_USBPHY1_UTMI        139
+#define RK3368_SRST_8RES12              140
+#define RK3368_SRST_USB_ADP             141
+#define RK3368_SRST_8RES14              142
+#define RK3368_SRST_8RES15              143
+
+#define RK3368_SRST_DBG                 144
+#define RK3368_SRST_PD_CORE_AHB_NOC     145
+#define RK3368_SRST_PD_CORE_APB_NOC     146
+#define RK3368_SRST_9RES3               147
+#define RK3368_SRST_GIC                 148
+#define RK3368_SRST_LCDCPWM0            149
+#define RK3368_SRST_9RES6               150
+#define RK3368_SRST_9RES7               151
+#define RK3368_SRST_9RES8               152
+#define RK3368_SRST_RGA_H2P_BRG         153
+#define RK3368_SRST_VIDEO               154
+#define RK3368_SRST_9RES11              155
+#define RK3368_SRST_9RES12              156
+#define RK3368_SRST_GPU_CFG_NIU_A       157
+#define RK3368_SRST_9RES14              158
+#define RK3368_SRST_TSADC_P             159
+
+#define RK3368_SRST_DDRPHY0             160
+#define RK3368_SRST_DDRPHY0_P           161
+#define RK3368_SRST_DDRCTRL0            162
+#define RK3368_SRST_DDRCTRL0_P          163
+#define RK3368_SRST_10RES4              164
+#define RK3368_SRST_VIDEO_NIU_A         165
+#define RK3368_SRST_10RES6              166
+#define RK3368_SRST_VIDEO_NIU_H         167
+#define RK3368_SRST_10RES8              168
+#define RK3368_SRST_10RES9              169
+#define RK3368_SRST_DDRMSCH0            170
+#define RK3368_SRST_10RES11             171
+#define RK3368_SRST_10RES12             172
+#define RK3368_SRST_SYS_BUS             173
+#define RK3368_SRST_CRYPTO              174
+#define RK3368_SRST_10RES15             175
+
+#define RK3368_SRST_11RES0              176
+#define RK3368_SRST_11RES1              177
+#define RK3368_SRST_11RES2              178
+#define RK3368_SRST_UART0               179
+#define RK3368_SRST_UART1               180
+#define RK3368_SRST_UART2               181
+#define RK3368_SRST_UART3               182
+#define RK3368_SRST_UART4               183
+#define RK3368_SRST_11RES8              184
+#define RK3368_SRST_11RES9              185
+#define RK3368_SRST_SIMC_P              186
+#define RK3368_SRST_11RES11             187
+#define RK3368_SRST_TSP_H               188
+#define RK3368_SRST_TSP_CLKIN0          189
+#define RK3368_SRST_11RES14             190
+#define RK3368_SRST_11RES15             191
+
+#define RK3368_SRST_CORE_L_0_SC         192
+#define RK3368_SRST_CORE_L_1            193
+#define RK3368_SRST_CORE_L_2            194
+#define RK3368_SRST_CORE_L_3            195
+#define RK3368_SRST_CORE_L_PO0_SC       196
+#define RK3368_SRST_CORE_L_PO1          197
+#define RK3368_SRST_CORE_L_PO2          198
+#define RK3368_SRST_CORE_L_PO3          199
+#define RK3368_SRST_L2_L_SC             200
+#define RK3368_SRST_ADB_L_SC            201
+#define RK3368_SRST_PD_CORE_L_NIU_A_SC  202
+#define RK3368_SRST_CCI400_SYS_SC       203
+#define RK3368_SRST_CCI400_DDR_SC       204
+#define RK3368_SRST_CCI400_SC           205
+#define RK3368_SRST_SOCDBG_L            206
+#define RK3368_SRST_CORE_L_DBG          207
+
+#define RK3368_SRST_CORE_B_0            208
+#define RK3368_SRST_CORE_B_PO0          209
+#define RK3368_SRST_L2_B                210
+#define RK3368_SRST_ADB_B               211
+#define RK3368_SRST_PD_CORE_B_NIU_A     212
+#define RK3368_SRST_STRC_SYS_A          213
+#define RK3368_SRST_CORE_L_0            214
+#define RK3368_SRST_CORE_L_PO0          215
+#define RK3368_SRST_L2_L                216
+#define RK3368_SRST_ADB_L               217
+#define RK3368_SRST_PD_CORE_L_NIU_A     218
+#define RK3368_SRST_CCI400_SYS          219
+#define RK3368_SRST_CCI400_DDR          220
+#define RK3368_SRST_CCI400              221
+#define RK3368_SRST_TRACE               222
+#define RK3368_SRST_13RES15             223
+
+#define RK3368_SRST_TIMER00             224
+#define RK3368_SRST_TIMER01             225
+#define RK3368_SRST_TIMER02             226
+#define RK3368_SRST_TIMER03             227
+#define RK3368_SRST_TIMER04             228
+#define RK3368_SRST_TIMER05             229
+#define RK3368_SRST_TIMER10             230
+#define RK3368_SRST_TIMER11             231
+#define RK3368_SRST_TIMER12             232
+#define RK3368_SRST_TIMER13             233
+#define RK3368_SRST_TIMER14             234
+#define RK3368_SRST_TIMER15             235
+#define RK3368_SRST_TIMER0_P            236
+#define RK3368_SRST_TIMER1_P            237
+#define RK3368_SRST_14RES14             238
+#define RK3368_SRST_14RES15             239
+
+#endif /* _DT_BINDINGS_CLOCK_ROCKCHIP_RK3368_H */
index 5c1234e967e3ab78b0b0ea007b2127c6fdab7744..765d3ab15740ea3ebc5610f5f448f44277126b56 100644 (file)
@@ -43,6 +43,8 @@
 #define CLK_PLL_3036_APLL      BIT(6)
 #define CLK_PLL_3036PLUS_AUTO  BIT(7)
 #define CLK_PLL_312XPLUS       BIT(8)
+#define CLK_PLL_3368_APLLB     BIT(9)
+#define CLK_PLL_3368_APLLL     BIT(10)
 
 
 /* rate_ops index */
@@ -64,6 +66,8 @@
 #define CLKOPS_RATE_RK3288_DCLK_LCDC0  16
 #define CLKOPS_RATE_RK3288_DCLK_LCDC1  17
 #define CLKOPS_RATE_DDR_DIV2           18
+#define CLKOPS_RATE_DDR_DIV4           19
+#define CLKOPS_RATE_RK3368_MUX_DIV_NPLL 20
 #define CLKOPS_TABLE_END               (~0)
 
 /* pd id */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
new file mode 100644 (file)
index 0000000..ad9db60
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM_KVM_ARCH_TIMER_H
+#define __ASM_ARM_KVM_ARCH_TIMER_H
+
+#include <linux/clocksource.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
+
+struct arch_timer_kvm {
+#ifdef CONFIG_KVM_ARM_TIMER
+       /* Is the timer enabled */
+       bool                    enabled;
+
+       /* Virtual offset */
+       cycle_t                 cntvoff;
+#endif
+};
+
+struct arch_timer_cpu {
+#ifdef CONFIG_KVM_ARM_TIMER
+       /* Registers: control register, timer value */
+       u32                             cntv_ctl;       /* Saved/restored */
+       cycle_t                         cntv_cval;      /* Saved/restored */
+
+       /*
+        * Anything that is not used directly from assembly code goes
+        * here.
+        */
+
+       /* Background timer used when the guest is not running */
+       struct hrtimer                  timer;
+
+       /* Work queued with the above timer expires */
+       struct work_struct              expired;
+
+       /* Background timer active */
+       bool                            armed;
+
+       /* Timer IRQ */
+       const struct kvm_irq_level      *irq;
+#endif
+};
+
+#ifdef CONFIG_KVM_ARM_TIMER
+int kvm_timer_hyp_init(void);
+int kvm_timer_init(struct kvm *kvm);
+void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+                         const struct kvm_irq_level *irq);
+void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
+void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
+
+u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
+int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
+
+#else
+static inline int kvm_timer_hyp_init(void)
+{
+       return 0;
+};
+
+static inline int kvm_timer_init(struct kvm *kvm)
+{
+       return 0;
+}
+
+static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+                                       const struct kvm_irq_level *irq) {}
+static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
+static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
+
+static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
+{
+       return 0;
+}
+
+static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
+{
+       return 0;
+}
+#endif
+
+#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
new file mode 100644 (file)
index 0000000..2f2aac8
--- /dev/null
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM_KVM_VGIC_H
+#define __ASM_ARM_KVM_VGIC_H
+
+#include <linux/kernel.h>
+#include <linux/kvm.h>
+#include <linux/irqreturn.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define VGIC_NR_IRQS_LEGACY    256
+#define VGIC_NR_SGIS           16
+#define VGIC_NR_PPIS           16
+#define VGIC_NR_PRIVATE_IRQS   (VGIC_NR_SGIS + VGIC_NR_PPIS)
+
+#define VGIC_V2_MAX_LRS                (1 << 6)
+#define VGIC_V3_MAX_LRS                16
+#define VGIC_MAX_IRQS          1024
+
+/* Sanity checks... */
+#if (KVM_MAX_VCPUS > 8)
+#error Invalid number of CPU interfaces
+#endif
+
+#if (VGIC_NR_IRQS_LEGACY & 31)
+#error "VGIC_NR_IRQS must be a multiple of 32"
+#endif
+
+#if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
+#error "VGIC_NR_IRQS must be <= 1024"
+#endif
+
+/*
+ * The GIC distributor registers describing interrupts have two parts:
+ * - 32 per-CPU interrupts (SGI + PPI)
+ * - a bunch of shared interrupts (SPI)
+ */
+struct vgic_bitmap {
+       /*
+        * - One UL per VCPU for private interrupts (assumes UL is at
+        *   least 32 bits)
+        * - As many UL as necessary for shared interrupts.
+        *
+        * The private interrupts are accessed via the "private"
+        * field, one UL per vcpu (the state for vcpu n is in
+        * private[n]). The shared interrupts are accessed via the
+        * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
+        */
+       unsigned long *private;
+       unsigned long *shared;
+};
+
+struct vgic_bytemap {
+       /*
+        * - 8 u32 per VCPU for private interrupts
+        * - As many u32 as necessary for shared interrupts.
+        *
+        * The private interrupts are accessed via the "private"
+        * field, (the state for vcpu n is in private[n*8] to
+        * private[n*8 + 7]). The shared interrupts are accessed via
+        * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
+        * shared[(n-32)/4] word).
+        */
+       u32 *private;
+       u32 *shared;
+};
+
+struct kvm_vcpu;
+
+enum vgic_type {
+       VGIC_V2,                /* Good ol' GICv2 */
+       VGIC_V3,                /* New fancy GICv3 */
+};
+
+#define LR_STATE_PENDING       (1 << 0)
+#define LR_STATE_ACTIVE                (1 << 1)
+#define LR_STATE_MASK          (3 << 0)
+#define LR_EOI_INT             (1 << 2)
+
+struct vgic_lr {
+       u16     irq;
+       u8      source;
+       u8      state;
+};
+
+struct vgic_vmcr {
+       u32     ctlr;
+       u32     abpr;
+       u32     bpr;
+       u32     pmr;
+};
+
+struct vgic_ops {
+       struct vgic_lr  (*get_lr)(const struct kvm_vcpu *, int);
+       void    (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
+       void    (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
+       u64     (*get_elrsr)(const struct kvm_vcpu *vcpu);
+       u64     (*get_eisr)(const struct kvm_vcpu *vcpu);
+       u32     (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
+       void    (*enable_underflow)(struct kvm_vcpu *vcpu);
+       void    (*disable_underflow)(struct kvm_vcpu *vcpu);
+       void    (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+       void    (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+       void    (*enable)(struct kvm_vcpu *vcpu);
+};
+
+struct vgic_params {
+       /* vgic type */
+       enum vgic_type  type;
+       /* Physical address of vgic virtual cpu interface */
+       phys_addr_t     vcpu_base;
+       /* Number of list registers */
+       u32             nr_lr;
+       /* Interrupt number */
+       unsigned int    maint_irq;
+       /* Virtual control interface base address */
+       void __iomem    *vctrl_base;
+};
+
+struct vgic_dist {
+#ifdef CONFIG_KVM_ARM_VGIC
+       spinlock_t              lock;
+       bool                    in_kernel;
+       bool                    ready;
+
+       int                     nr_cpus;
+       int                     nr_irqs;
+
+       /* Virtual control interface mapping */
+       void __iomem            *vctrl_base;
+
+       /* Distributor and vcpu interface mapping in the guest */
+       phys_addr_t             vgic_dist_base;
+       phys_addr_t             vgic_cpu_base;
+
+       /* Distributor enabled */
+       u32                     enabled;
+
+       /* Interrupt enabled (one bit per IRQ) */
+       struct vgic_bitmap      irq_enabled;
+
+       /* Level-triggered interrupt external input is asserted */
+       struct vgic_bitmap      irq_level;
+
+       /*
+        * Interrupt state is pending on the distributor
+        */
+       struct vgic_bitmap      irq_pending;
+
+       /*
+        * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
+        * interrupts.  Essentially holds the state of the flip-flop in
+        * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
+        * Once set, it is only cleared for level-triggered interrupts on
+        * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
+        */
+       struct vgic_bitmap      irq_soft_pend;
+
+       /* Level-triggered interrupt queued on VCPU interface */
+       struct vgic_bitmap      irq_queued;
+
+       /* Interrupt priority. Not used yet. */
+       struct vgic_bytemap     irq_priority;
+
+       /* Level/edge triggered */
+       struct vgic_bitmap      irq_cfg;
+
+       /*
+        * Source CPU per SGI and target CPU:
+        *
+        * Each byte represent a SGI observable on a VCPU, each bit of
+        * this byte indicating if the corresponding VCPU has
+        * generated this interrupt. This is a GICv2 feature only.
+        *
+        * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
+        * the SGIs observable on VCPUn.
+        */
+       u8                      *irq_sgi_sources;
+
+       /*
+        * Target CPU for each SPI:
+        *
+        * Array of available SPI, each byte indicating the target
+        * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
+        */
+       u8                      *irq_spi_cpu;
+
+       /*
+        * Reverse lookup of irq_spi_cpu for faster compute pending:
+        *
+        * Array of bitmaps, one per VCPU, describing if IRQn is
+        * routed to a particular VCPU.
+        */
+       struct vgic_bitmap      *irq_spi_target;
+
+       /* Bitmap indicating which CPU has something pending */
+       unsigned long           *irq_pending_on_cpu;
+#endif
+};
+
+struct vgic_v2_cpu_if {
+       u32             vgic_hcr;
+       u32             vgic_vmcr;
+       u32             vgic_misr;      /* Saved only */
+       u32             vgic_eisr[2];   /* Saved only */
+       u32             vgic_elrsr[2];  /* Saved only */
+       u32             vgic_apr;
+       u32             vgic_lr[VGIC_V2_MAX_LRS];
+};
+
+struct vgic_v3_cpu_if {
+#ifdef CONFIG_ARM_GIC_V3
+       u32             vgic_hcr;
+       u32             vgic_vmcr;
+       u32             vgic_misr;      /* Saved only */
+       u32             vgic_eisr;      /* Saved only */
+       u32             vgic_elrsr;     /* Saved only */
+       u32             vgic_ap0r[4];
+       u32             vgic_ap1r[4];
+       u64             vgic_lr[VGIC_V3_MAX_LRS];
+#endif
+};
+
+struct vgic_cpu {
+#ifdef CONFIG_KVM_ARM_VGIC
+       /* per IRQ to LR mapping */
+       u8              *vgic_irq_lr_map;
+
+       /* Pending interrupts on this VCPU */
+       DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
+       unsigned long   *pending_shared;
+
+       /* Bitmap of used/free list registers */
+       DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
+
+       /* Number of list registers on this CPU */
+       int             nr_lr;
+
+       /* CPU vif control registers for world switch */
+       union {
+               struct vgic_v2_cpu_if   vgic_v2;
+               struct vgic_v3_cpu_if   vgic_v3;
+       };
+#endif
+};
+
+#define LR_EMPTY       0xff
+
+#define INT_STATUS_EOI         (1 << 0)
+#define INT_STATUS_UNDERFLOW   (1 << 1)
+
+struct kvm;
+struct kvm_vcpu;
+struct kvm_run;
+struct kvm_exit_mmio;
+
+#ifdef CONFIG_KVM_ARM_VGIC
+int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
+int kvm_vgic_hyp_init(void);
+int kvm_vgic_init(struct kvm *kvm);
+int kvm_vgic_create(struct kvm *kvm);
+void kvm_vgic_destroy(struct kvm *kvm);
+void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
+void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
+void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
+int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+                       bool level);
+int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                     struct kvm_exit_mmio *mmio);
+
+#define irqchip_in_kernel(k)   (!!((k)->arch.vgic.in_kernel))
+#define vgic_initialized(k)    ((k)->arch.vgic.ready)
+
+int vgic_v2_probe(struct device_node *vgic_node,
+                 const struct vgic_ops **ops,
+                 const struct vgic_params **params);
+#ifdef CONFIG_ARM_GIC_V3
+int vgic_v3_probe(struct device_node *vgic_node,
+                 const struct vgic_ops **ops,
+                 const struct vgic_params **params);
+#else
+static inline int vgic_v3_probe(struct device_node *vgic_node,
+                               const struct vgic_ops **ops,
+                               const struct vgic_params **params)
+{
+       return -ENODEV;
+}
+#endif
+
+#else
+static inline int kvm_vgic_hyp_init(void)
+{
+       return 0;
+}
+
+static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
+{
+       return 0;
+}
+
+static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
+{
+       return -ENXIO;
+}
+
+static inline int kvm_vgic_init(struct kvm *kvm)
+{
+       return 0;
+}
+
+static inline int kvm_vgic_create(struct kvm *kvm)
+{
+       return 0;
+}
+
+static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
+
+static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
+                                     unsigned int irq_num, bool level)
+{
+       return 0;
+}
+
+static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                                   struct kvm_exit_mmio *mmio)
+{
+       return false;
+}
+
+static inline int irqchip_in_kernel(struct kvm *kvm)
+{
+       return 0;
+}
+
+static inline bool vgic_initialized(struct kvm *kvm)
+{
+       return true;
+}
+#endif
+
+#endif
index 2fdb4a451b49bd626d9415b231c76b7ac927cf69..494d228a91dd2bbfb7e1f02a45e5250a5bf1af92 100644 (file)
@@ -1187,10 +1187,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
 {
        unsigned int granularity = max(lim->physical_block_size, lim->io_min);
-       unsigned int alignment = (sector << 9) & (granularity - 1);
+       unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
 
-       return (granularity + lim->alignment_offset - alignment)
-               & (granularity - 1);
+       return (granularity + lim->alignment_offset - alignment) % granularity;
 }
 
 static inline int bdev_alignment_offset(struct block_device *bdev)
index 15f90929fb51b615c7a778bc33045cea0363b099..9b4378af414cdd73f7506e0a5f13f3a1dc859fd4 100644 (file)
@@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set;
 # error Fix up hand-coded capability macro initializers
 #else /* HAND-CODED capability initializers */
 
+#define CAP_LAST_U32                   ((_KERNEL_CAPABILITY_U32S) - 1)
+#define CAP_LAST_U32_VALID_MASK                (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
+
 # define CAP_EMPTY_SET    ((kernel_cap_t){{ 0, 0 }})
-# define CAP_FULL_SET     ((kernel_cap_t){{ ~0, ~0 }})
+# define CAP_FULL_SET     ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
 # define CAP_FS_SET       ((kernel_cap_t){{ CAP_FS_MASK_B0 \
                                    | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
                                    CAP_FS_MASK_B1 } })
index 7c1420bb1dcef40e6f8e4cc571ef5ea2c25df913..6ade97de7a850faf8c7a8932cc3f4bbde42dd1c9 100644 (file)
@@ -157,7 +157,7 @@ struct ceph_msg {
        bool front_is_vmalloc;
        bool more_to_follow;
        bool needs_out_seq;
-       int front_max;
+       int front_alloc_len;
        unsigned long ack_stamp;        /* tx: when we were acked */
 
        struct ceph_msgpool *pool;
index 32a4f95d0bd75b3c1bffba0a5ba2d878be6faec8..a6fc777288abb51e0e49f1070a5f458dbc7d7f11 100644 (file)
@@ -869,6 +869,17 @@ unsigned short css_id(struct cgroup_subsys_state *css);
 unsigned short css_depth(struct cgroup_subsys_state *css);
 struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
 
+/*
+ * Default Android check for whether the current process is allowed to move a
+ * task across cgroups, either because CAP_SYS_NICE is set or because the uid
+ * of the calling process is the same as the moved task or because we are
+ * running as root.
+ * Returns 0 if this is allowed, or -EACCES otherwise.
+ */
+int subsys_cgroup_allow_attach(struct cgroup *cgrp,
+                              struct cgroup_taskset *tset);
+
+
 #else /* !CONFIG_CGROUPS */
 
 static inline int cgroup_init_early(void) { return 0; }
@@ -892,6 +903,11 @@ static inline int cgroup_attach_task_all(struct task_struct *from,
        return 0;
 }
 
+static inline int subsys_cgroup_allow_attach(struct cgroup *cgrp,
+                                            struct cgroup_taskset *tset)
+{
+       return 0;
+}
 #endif /* !CONFIG_CGROUPS */
 
 #endif /* _LINUX_CGROUP_H */
index 7279b94c01da3fdb31108e9ca3d3e8fb5202e923..91aa89e1aaa06bbdcfcd3fc33401369f8e00968f 100644 (file)
@@ -285,7 +285,7 @@ extern struct clocksource* clocksource_get_next(void);
 extern void clocksource_change_rating(struct clocksource *cs, int rating);
 extern void clocksource_suspend(void);
 extern void clocksource_resume(void);
-extern struct clocksource * __init __weak clocksource_default_clock(void);
+extern struct clocksource * __init clocksource_default_clock(void);
 extern void clocksource_mark_unstable(struct clocksource *cs);
 
 extern void
index 24545cd90a252b8dea0a9eec462b97a03b86adaa..02ae99e8e6d38a49ba59e76ecb722fce64376a51 100644 (file)
@@ -37,6 +37,9 @@
     __asm__ ("" : "=r"(__ptr) : "0"(ptr));             \
     (typeof(ptr)) (__ptr + (off)); })
 
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
+
 #ifdef __CHECKER__
 #define __must_be_array(arr) 0
 #else
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
new file mode 100644 (file)
index 0000000..cdd1cc2
--- /dev/null
@@ -0,0 +1,66 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used                         __attribute__((__used__))
+#define __must_check                   __attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b)      __builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+   to them will be unlikely.  This means a lot of manual unlikely()s
+   are unnecessary now for any paths leading to the usual suspects
+   like BUG(), printk(), panic() etc. [but let's keep them for now for
+   older compilers]
+
+   Early snapshots of gcc 4.3 don't support this and we can't detect this
+   in the preprocessor, but we can live with this because they're unreleased.
+   Maketime probing would be overkill here.
+
+   gcc also has a __attribute__((__hot__)) to move hot functions into
+   a special section, but I don't see any sense in this right now in
+   the kernel context */
+#define __cold                 __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable.  This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased.  Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone      __attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ * Fixed in GCC 4.8.2 and later versions.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...)        do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
index dc1bd3dcf11fd6b72f93c5d3d9b674957bb705a0..5529c52394219a25f274143aaab152bed8b4e157 100644 (file)
@@ -15,6 +15,7 @@
  */
 #undef barrier
 #undef RELOC_HIDE
+#undef OPTIMIZER_HIDE_VAR
 
 #define barrier() __memory_barrier()
 
      __ptr = (unsigned long) (ptr);                            \
     (typeof(ptr)) (__ptr + (off)); })
 
+/* This should act as an optimization barrier on var.
+ * Given that this compiler does not have inline assembly, a compiler barrier
+ * is the best we can do.
+ */
+#define OPTIMIZER_HIDE_VAR(var) barrier()
+
 /* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
 #define __must_be_array(a) 0
 
index 92669cd182a6daca2550e2de8e4b8c7e2fd65a59..2472740d7ab2201f58d68ce864448b3def30cdaf 100644 (file)
@@ -170,6 +170,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
     (typeof(ptr)) (__ptr + (off)); })
 #endif
 
+#ifndef OPTIMIZER_HIDE_VAR
+#define OPTIMIZER_HIDE_VAR(var) barrier()
+#endif
+
 /* Not-quite-unique ID. */
 #ifndef __UNIQUE_ID
 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
@@ -298,6 +302,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
 #endif
 
+/* Is this type a native word size -- useful for atomic operations */
+#ifndef __native_word
+# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+#endif
+
 /* Compile time object size, -1 for unknown */
 #ifndef __compiletime_object_size
 # define __compiletime_object_size(obj) -1
@@ -337,6 +346,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
 #define compiletime_assert(condition, msg) \
        _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
 
+#define compiletime_assert_atomic_type(t)                              \
+       compiletime_assert(__native_word(t),                            \
+               "Need native word sized stores/loads for atomicity.")
+
 /*
  * Prevent the compiler from merging or refetching accesses.  The compiler
  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
index 3869c525b052171fff8bbe3000d61d5999bae7ee..545deb1496557ff0303de2dce5b849e5ee2c1abe 100644 (file)
@@ -83,10 +83,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
        put_device(&trig->dev);
 }
 
-static inline void iio_trigger_get(struct iio_trigger *trig)
+static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
 {
        get_device(&trig->dev);
        __module_get(trig->ops->owner);
+
+       return trig;
 }
 
 /**
index 5cd0f09499271283795bb49a7b18b8ed3f1930cd..998f4dfedecf4ed8c48777fdcbd1c9582c4454bf 100644 (file)
@@ -40,6 +40,7 @@ extern struct fs_struct init_fs;
 
 #define INIT_SIGNALS(sig) {                                            \
        .nr_threads     = 1,                                            \
+       .thread_head    = LIST_HEAD_INIT(init_task.thread_node),        \
        .wait_chldexit  = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
        .shared_pending = {                                             \
                .list = LIST_HEAD_INIT(sig.shared_pending.list),        \
@@ -213,6 +214,7 @@ extern struct task_group root_task_group;
                [PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),            \
        },                                                              \
        .thread_group   = LIST_HEAD_INIT(tsk.thread_group),             \
+       .thread_node    = LIST_HEAD_INIT(init_signals.thread_head),     \
        INIT_IDS                                                        \
        INIT_PERF_EVENTS(tsk)                                           \
        INIT_TRACE_IRQFLAGS                                             \
index 97e3f0926a453c57384b2550aa81df5cf4c9304b..ebc6306661ebb5fe17956e4916fc41b2765fcbbc 100644 (file)
@@ -41,6 +41,7 @@ struct ipv6_devconf {
        __s32           accept_source_route;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
        __s32           optimistic_dad;
+       __s32           use_optimistic;
 #endif
 #ifdef CONFIG_IPV6_MROUTE
        __s32           mc_forwarding;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
new file mode 100644 (file)
index 0000000..03a4ea3
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
+#define __LINUX_IRQCHIP_ARM_GIC_V3_H
+
+#include <asm/sysreg.h>
+
+/*
+ * Distributor registers. We assume we're running non-secure, with ARE
+ * being set. Secure-only and non-ARE registers are not described.
+ */
+#define GICD_CTLR                      0x0000
+#define GICD_TYPER                     0x0004
+#define GICD_IIDR                      0x0008
+#define GICD_STATUSR                   0x0010
+#define GICD_SETSPI_NSR                        0x0040
+#define GICD_CLRSPI_NSR                        0x0048
+#define GICD_SETSPI_SR                 0x0050
+#define GICD_CLRSPI_SR                 0x0058
+#define GICD_SEIR                      0x0068
+#define GICD_ISENABLER                 0x0100
+#define GICD_ICENABLER                 0x0180
+#define GICD_ISPENDR                   0x0200
+#define GICD_ICPENDR                   0x0280
+#define GICD_ISACTIVER                 0x0300
+#define GICD_ICACTIVER                 0x0380
+#define GICD_IPRIORITYR                        0x0400
+#define GICD_ICFGR                     0x0C00
+#define GICD_IROUTER                   0x6000
+#define GICD_PIDR2                     0xFFE8
+
+#define GICD_CTLR_RWP                  (1U << 31)
+#define GICD_CTLR_ARE_NS               (1U << 4)
+#define GICD_CTLR_ENABLE_G1A           (1U << 1)
+#define GICD_CTLR_ENABLE_G1            (1U << 0)
+
+#define GICD_IROUTER_SPI_MODE_ONE      (0U << 31)
+#define GICD_IROUTER_SPI_MODE_ANY      (1U << 31)
+
+#define GIC_PIDR2_ARCH_MASK            0xf0
+#define GIC_PIDR2_ARCH_GICv3           0x30
+#define GIC_PIDR2_ARCH_GICv4           0x40
+
+/*
+ * Re-Distributor registers, offsets from RD_base
+ */
+#define GICR_CTLR                      GICD_CTLR
+#define GICR_IIDR                      0x0004
+#define GICR_TYPER                     0x0008
+#define GICR_STATUSR                   GICD_STATUSR
+#define GICR_WAKER                     0x0014
+#define GICR_SETLPIR                   0x0040
+#define GICR_CLRLPIR                   0x0048
+#define GICR_SEIR                      GICD_SEIR
+#define GICR_PROPBASER                 0x0070
+#define GICR_PENDBASER                 0x0078
+#define GICR_INVLPIR                   0x00A0
+#define GICR_INVALLR                   0x00B0
+#define GICR_SYNCR                     0x00C0
+#define GICR_MOVLPIR                   0x0100
+#define GICR_MOVALLR                   0x0110
+#define GICR_PIDR2                     GICD_PIDR2
+
+#define GICR_WAKER_ProcessorSleep      (1U << 1)
+#define GICR_WAKER_ChildrenAsleep      (1U << 2)
+
+/*
+ * Re-Distributor registers, offsets from SGI_base
+ */
+#define GICR_ISENABLER0                        GICD_ISENABLER
+#define GICR_ICENABLER0                        GICD_ICENABLER
+#define GICR_ISPENDR0                  GICD_ISPENDR
+#define GICR_ICPENDR0                  GICD_ICPENDR
+#define GICR_ISACTIVER0                        GICD_ISACTIVER
+#define GICR_ICACTIVER0                        GICD_ICACTIVER
+#define GICR_IPRIORITYR0               GICD_IPRIORITYR
+#define GICR_ICFGR0                    GICD_ICFGR
+
+#define GICR_TYPER_VLPIS               (1U << 1)
+#define GICR_TYPER_LAST                        (1U << 4)
+
+/*
+ * CPU interface registers
+ */
+#define ICC_CTLR_EL1_EOImode_drop_dir  (0U << 1)
+#define ICC_CTLR_EL1_EOImode_drop      (1U << 1)
+#define ICC_SRE_EL1_SRE                        (1U << 0)
+
+/*
+ * Hypervisor interface registers (SRE only)
+ */
+#define ICH_LR_VIRTUAL_ID_MASK         ((1UL << 32) - 1)
+
+#define ICH_LR_EOI                     (1UL << 41)
+#define ICH_LR_GROUP                   (1UL << 60)
+#define ICH_LR_STATE                   (3UL << 62)
+#define ICH_LR_PENDING_BIT             (1UL << 62)
+#define ICH_LR_ACTIVE_BIT              (1UL << 63)
+
+#define ICH_MISR_EOI                   (1 << 0)
+#define ICH_MISR_U                     (1 << 1)
+
+#define ICH_HCR_EN                     (1 << 0)
+#define ICH_HCR_UIE                    (1 << 1)
+
+#define ICH_VMCR_CTLR_SHIFT            0
+#define ICH_VMCR_CTLR_MASK             (0x21f << ICH_VMCR_CTLR_SHIFT)
+#define ICH_VMCR_BPR1_SHIFT            18
+#define ICH_VMCR_BPR1_MASK             (7 << ICH_VMCR_BPR1_SHIFT)
+#define ICH_VMCR_BPR0_SHIFT            21
+#define ICH_VMCR_BPR0_MASK             (7 << ICH_VMCR_BPR0_SHIFT)
+#define ICH_VMCR_PMR_SHIFT             24
+#define ICH_VMCR_PMR_MASK              (0xffUL << ICH_VMCR_PMR_SHIFT)
+
+#define ICC_EOIR1_EL1                  sys_reg(3, 0, 12, 12, 1)
+#define ICC_IAR1_EL1                   sys_reg(3, 0, 12, 12, 0)
+#define ICC_SGI1R_EL1                  sys_reg(3, 0, 12, 11, 5)
+#define ICC_PMR_EL1                    sys_reg(3, 0, 4, 6, 0)
+#define ICC_CTLR_EL1                   sys_reg(3, 0, 12, 12, 4)
+#define ICC_SRE_EL1                    sys_reg(3, 0, 12, 12, 5)
+#define ICC_GRPEN1_EL1                 sys_reg(3, 0, 12, 12, 7)
+
+#define ICC_IAR1_EL1_SPURIOUS          0x3ff
+
+#define ICC_SRE_EL2                    sys_reg(3, 4, 12, 9, 5)
+
+#define ICC_SRE_EL2_SRE                        (1 << 0)
+#define ICC_SRE_EL2_ENABLE             (1 << 3)
+
+/*
+ * System register definitions
+ */
+#define ICH_VSEIR_EL2                  sys_reg(3, 4, 12, 9, 4)
+#define ICH_HCR_EL2                    sys_reg(3, 4, 12, 11, 0)
+#define ICH_VTR_EL2                    sys_reg(3, 4, 12, 11, 1)
+#define ICH_MISR_EL2                   sys_reg(3, 4, 12, 11, 2)
+#define ICH_EISR_EL2                   sys_reg(3, 4, 12, 11, 3)
+#define ICH_ELSR_EL2                   sys_reg(3, 4, 12, 11, 5)
+#define ICH_VMCR_EL2                   sys_reg(3, 4, 12, 11, 7)
+
+#define __LR0_EL2(x)                   sys_reg(3, 4, 12, 12, x)
+#define __LR8_EL2(x)                   sys_reg(3, 4, 12, 13, x)
+
+#define ICH_LR0_EL2                    __LR0_EL2(0)
+#define ICH_LR1_EL2                    __LR0_EL2(1)
+#define ICH_LR2_EL2                    __LR0_EL2(2)
+#define ICH_LR3_EL2                    __LR0_EL2(3)
+#define ICH_LR4_EL2                    __LR0_EL2(4)
+#define ICH_LR5_EL2                    __LR0_EL2(5)
+#define ICH_LR6_EL2                    __LR0_EL2(6)
+#define ICH_LR7_EL2                    __LR0_EL2(7)
+#define ICH_LR8_EL2                    __LR8_EL2(0)
+#define ICH_LR9_EL2                    __LR8_EL2(1)
+#define ICH_LR10_EL2                   __LR8_EL2(2)
+#define ICH_LR11_EL2                   __LR8_EL2(3)
+#define ICH_LR12_EL2                   __LR8_EL2(4)
+#define ICH_LR13_EL2                   __LR8_EL2(5)
+#define ICH_LR14_EL2                   __LR8_EL2(6)
+#define ICH_LR15_EL2                   __LR8_EL2(7)
+
+#define __AP0Rx_EL2(x)                 sys_reg(3, 4, 12, 8, x)
+#define ICH_AP0R0_EL2                  __AP0Rx_EL2(0)
+#define ICH_AP0R1_EL2                  __AP0Rx_EL2(1)
+#define ICH_AP0R2_EL2                  __AP0Rx_EL2(2)
+#define ICH_AP0R3_EL2                  __AP0Rx_EL2(3)
+
+#define __AP1Rx_EL2(x)                 sys_reg(3, 4, 12, 9, x)
+#define ICH_AP1R0_EL2                  __AP1Rx_EL2(0)
+#define ICH_AP1R1_EL2                  __AP1Rx_EL2(1)
+#define ICH_AP1R2_EL2                  __AP1Rx_EL2(2)
+#define ICH_AP1R3_EL2                  __AP1Rx_EL2(3)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+static inline void gic_write_eoir(u64 irq)
+{
+       asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
+       isb();
+}
+
+#endif
+
+#endif
index 40643ca79cd90216290aa3256934c09059ca619a..1f004b16641ee8210edefb07370dc654bfdbc4ce 100644 (file)
 #define GIC_CPU_EOI                    0x10
 #define GIC_CPU_RUNNINGPRI             0x14
 #define GIC_CPU_HIGHPRI                        0x18
+#define GIC_CPU_ALIAS_BINPOINT         0x1c
+#define GIC_CPU_ACTIVEPRIO             0xd0
+#define GIC_CPU_IDENT                  0xfc
+
+#define GICC_IAR_INT_ID_MASK           0x3ff
 
 #define GIC_DIST_CTRL                  0x000
 #define GIC_DIST_CTR                   0x004
 #define GICH_LR_ACTIVE_BIT             (1 << 29)
 #define GICH_LR_EOI                    (1 << 19)
 
+#define GICH_VMCR_CTRL_SHIFT           0
+#define GICH_VMCR_CTRL_MASK            (0x21f << GICH_VMCR_CTRL_SHIFT)
+#define GICH_VMCR_PRIMASK_SHIFT                27
+#define GICH_VMCR_PRIMASK_MASK         (0x1f << GICH_VMCR_PRIMASK_SHIFT)
+#define GICH_VMCR_BINPOINT_SHIFT       21
+#define GICH_VMCR_BINPOINT_MASK                (0x7 << GICH_VMCR_BINPOINT_SHIFT)
+#define GICH_VMCR_ALIAS_BINPOINT_SHIFT 18
+#define GICH_VMCR_ALIAS_BINPOINT_MASK  (0x7 << GICH_VMCR_ALIAS_BINPOINT_SHIFT)
+
 #define GICH_MISR_EOI                  (1 << 0)
 #define GICH_MISR_U                    (1 << 1)
 
@@ -68,6 +82,7 @@ extern struct irq_chip gic_arch_extn;
 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
                    u32 offset, struct device_node *);
 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
+void gic_cpu_if_down(void);
 
 void gic_cpu_if_down(void);
 
@@ -82,6 +97,11 @@ int gic_get_cpu_id(unsigned int cpu);
 void gic_migrate_target(unsigned int new_cpu_id);
 unsigned long gic_get_sgir_physaddr(void);
 
+extern const struct irq_domain_ops *gic_routable_irq_domain_ops;
+static inline void __init register_routable_domain_ops
+                                       (const struct irq_domain_ops *ops)
+{
+       gic_routable_irq_domain_ops = ops;
+}
 #endif /* __ASSEMBLY */
-
 #endif
index 7b5d4a8ab199da3b1be9b056970f560baeaed172..c039fe1315eb3c137eff8d756d646d560d457a6f 100644 (file)
@@ -254,23 +254,11 @@ extern unsigned long preset_lpj;
 #define SEC_JIFFIE_SC (32 - SHIFT_HZ)
 #endif
 #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
 #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
                                 TICK_NSEC -1) / (u64)TICK_NSEC))
 
 #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
                                         TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION  \
-                    ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
-                                        TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion.  See there
- * for more details.  It is the scaled resolution rounding value.  Note
- * that it is a 64-bit value.  Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
 /*
  * The maximum jiffie value is (MAX_INT >> 1).  Here we translate that
  * into seconds.  The 64-bit case will overflow if we are not careful,
index c6e091bf39a52e73a0964036ef3e1888e7be61d9..bdfc95bddde965d8d9dad1d69e497e1af088cbad 100644 (file)
@@ -283,7 +283,7 @@ struct kgdb_io {
 
 extern struct kgdb_arch                arch_kgdb_ops;
 
-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
+extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
 
 #ifdef CONFIG_SERIAL_KGDB_NMI
 extern int kgdb_register_nmi_console(void);
index 8db53cfaccdb64fdbd18d29935e698d101bf5bf1..f64e941a4213ecd0fffc274e274b6f51be24f1e2 100644 (file)
@@ -129,11 +129,9 @@ static inline bool is_error_page(struct page *page)
 #define KVM_USERSPACE_IRQ_SOURCE_ID            0
 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID       1
 
-struct kvm;
-struct kvm_vcpu;
 extern struct kmem_cache *kvm_vcpu_cache;
 
-extern raw_spinlock_t kvm_lock;
+extern spinlock_t kvm_lock;
 extern struct list_head vm_list;
 
 struct kvm_io_range {
@@ -175,13 +173,12 @@ struct kvm_async_pf {
        gva_t gva;
        unsigned long addr;
        struct kvm_arch_async_pf arch;
-       struct page *page;
-       bool done;
+       bool   wakeup_all;
 };
 
 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
                       struct kvm_arch_async_pf *arch);
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
@@ -302,25 +299,6 @@ struct kvm_kernel_irq_routing_entry {
        struct hlist_node link;
 };
 
-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
-
-struct kvm_irq_routing_table {
-       int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
-       struct kvm_kernel_irq_routing_entry *rt_entries;
-       u32 nr_rt_entries;
-       /*
-        * Array indexed by gsi. Each entry contains list of irq chips
-        * the gsi is connected to.
-        */
-       struct hlist_head map[0];
-};
-
-#else
-
-struct kvm_irq_routing_table {};
-
-#endif
-
 #ifndef KVM_PRIVATE_MEM_SLOTS
 #define KVM_PRIVATE_MEM_SLOTS 0
 #endif
@@ -347,6 +325,7 @@ struct kvm {
        struct mm_struct *mm; /* userspace tied to this vm */
        struct kvm_memslots *memslots;
        struct srcu_struct srcu;
+       struct srcu_struct irq_srcu;
 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
        u32 bsp_vcpu_id;
 #endif
@@ -377,11 +356,12 @@ struct kvm {
        struct mutex irq_lock;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
        /*
-        * Update side is protected by irq_lock and,
-        * if configured, irqfds.lock.
+        * Update side is protected by irq_lock.
         */
        struct kvm_irq_routing_table __rcu *irq_routing;
        struct hlist_head mask_notifier_list;
+#endif
+#ifdef CONFIG_HAVE_KVM_IRQFD
        struct hlist_head irq_ack_notifier_list;
 #endif
 
@@ -431,7 +411,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
 void vcpu_put(struct kvm_vcpu *vcpu);
 
-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+#ifdef CONFIG_HAVE_KVM_IRQFD
 int kvm_irqfd_init(void);
 void kvm_irqfd_exit(void);
 #else
@@ -450,8 +430,6 @@ void kvm_exit(void);
 
 void kvm_get_kvm(struct kvm *kvm);
 void kvm_put_kvm(struct kvm *kvm);
-void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
-                    u64 last_generation);
 
 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 {
@@ -494,9 +472,11 @@ int kvm_set_memory_region(struct kvm *kvm,
                          struct kvm_userspace_memory_region *mem);
 int __kvm_set_memory_region(struct kvm *kvm,
                            struct kvm_userspace_memory_region *mem);
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
                           struct kvm_memory_slot *dont);
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages);
+void kvm_arch_memslots_updated(struct kvm *kvm);
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                struct kvm_memory_slot *memslot,
                                struct kvm_userspace_memory_region *mem,
@@ -518,10 +498,12 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
 
 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
+unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
+unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
+                                     bool *writable);
 void kvm_release_page_clean(struct page *page);
 void kvm_release_page_dirty(struct page *page);
-void kvm_set_page_dirty(struct page *page);
 void kvm_set_page_accessed(struct page *page);
 
 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
@@ -533,7 +515,6 @@ pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
 
-void kvm_release_pfn_dirty(pfn_t pfn);
 void kvm_release_pfn_clean(pfn_t pfn);
 void kvm_set_pfn_dirty(pfn_t pfn);
 void kvm_set_pfn_accessed(pfn_t pfn);
@@ -560,14 +541,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
-void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
-                            gfn_t gfn);
 
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
-void kvm_resched(struct kvm_vcpu *vcpu);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 
@@ -582,15 +560,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg);
 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
 
-int kvm_dev_ioctl_check_extension(long ext);
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
 
 int kvm_get_dirty_log(struct kvm *kvm,
                        struct kvm_dirty_log *log, int *is_dirty);
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                                struct kvm_dirty_log *log);
 
-int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
-                                  struct kvm_userspace_memory_region *mem);
 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
                        bool line_status);
 long kvm_arch_vm_ioctl(struct file *filp,
@@ -622,6 +598,8 @@ void kvm_arch_exit(void);
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
+
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
@@ -630,16 +608,14 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
 
-int kvm_arch_hardware_enable(void *garbage);
-void kvm_arch_hardware_disable(void *garbage);
+int kvm_arch_hardware_enable(void);
+void kvm_arch_hardware_disable(void);
 int kvm_arch_hardware_setup(void);
 void kvm_arch_hardware_unsetup(void);
 void kvm_arch_check_processor_compat(void *rtn);
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 
-void kvm_free_physmem(struct kvm *kvm);
-
 void *kvm_kvzalloc(unsigned long size);
 void kvm_kvfree(const void *addr);
 
@@ -717,6 +693,10 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
                             bool mask);
 
+int kvm_irq_map_gsi(struct kvm *kvm,
+                   struct kvm_kernel_irq_routing_entry *entries, int gsi);
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
+
 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
                bool line_status);
 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
@@ -773,7 +753,7 @@ static inline void kvm_guest_enter(void)
 
        /* KVM does not hold any references to rcu protected data when it
         * switches CPU into a guest mode. In fact switching to a guest mode
-        * is very similar to exiting to userspase from rcu point of view. In
+        * is very similar to exiting to userspace from rcu point of view. In
         * addition CPU may stay in a guest mode for quite a long time (up to
         * one time slice). Lets treat guest mode as quiescent state, just like
         * we do with user-mode execution.
@@ -826,13 +806,6 @@ static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
        return gfn_to_memslot(kvm, gfn)->id;
 }
 
-static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
-{
-       /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
-       return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
-               (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
-}
-
 static inline gfn_t
 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
 {
@@ -856,6 +829,13 @@ static inline hpa_t pfn_to_hpa(pfn_t pfn)
        return (hpa_t)pfn << PAGE_SHIFT;
 }
 
+static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
+{
+       unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+
+       return kvm_is_error_hva(hva);
+}
+
 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
 {
        set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
@@ -906,28 +886,27 @@ int kvm_set_irq_routing(struct kvm *kvm,
                        const struct kvm_irq_routing_entry *entries,
                        unsigned nr,
                        unsigned flags);
-int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
-                         struct kvm_kernel_irq_routing_entry *e,
+int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
                          const struct kvm_irq_routing_entry *ue);
 void kvm_free_irq_routing(struct kvm *kvm);
 
-int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
-
 #else
 
 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 
 #endif
 
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
+
 #ifdef CONFIG_HAVE_KVM_EVENTFD
 
 void kvm_eventfd_init(struct kvm *kvm);
 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
+#ifdef CONFIG_HAVE_KVM_IRQFD
 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 void kvm_irqfd_release(struct kvm *kvm);
-void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
+void kvm_irq_routing_update(struct kvm *);
 #else
 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
@@ -949,10 +928,8 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 static inline void kvm_irqfd_release(struct kvm *kvm) {}
 
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
-static inline void kvm_irq_routing_update(struct kvm *kvm,
-                                         struct kvm_irq_routing_table *irq_rt)
+static inline void kvm_irq_routing_update(struct kvm *kvm)
 {
-       rcu_assign_pointer(kvm->irq_routing, irq_rt);
 }
 #endif
 
@@ -1013,8 +990,6 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
 
 extern bool kvm_rebooting;
 
-struct kvm_device_ops;
-
 struct kvm_device {
        struct kvm_device_ops *ops;
        struct kvm *kvm;
@@ -1047,6 +1022,7 @@ struct kvm_device_ops {
 void kvm_device_get(struct kvm_device *dev);
 void kvm_device_put(struct kvm_device *dev);
 struct kvm_device *kvm_device_from_filp(struct file *filp);
+int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
 
 extern struct kvm_device_ops kvm_mpic_ops;
 extern struct kvm_device_ops kvm_xics_ops;
@@ -1071,12 +1047,6 @@ static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
 {
 }
-
-static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
-{
-       return true;
-}
-
 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
 #endif
 
index b0bcce0ddc95a531dd30c30e91a89c92e961b8d4..b606bb689a3e0329d937f54e7a0e1e0a783f7e5f 100644 (file)
 #ifndef __KVM_TYPES_H__
 #define __KVM_TYPES_H__
 
+struct kvm;
+struct kvm_async_pf;
+struct kvm_device_ops;
+struct kvm_interrupt;
+struct kvm_irq_routing_table;
+struct kvm_memory_slot;
+struct kvm_one_reg;
+struct kvm_run;
+struct kvm_userspace_memory_region;
+struct kvm_vcpu;
+struct kvm_vcpu_init;
+
+enum kvm_mr_change;
+
 #include <asm/types.h>
 
 /*
index eec130af2dfa41cb510f38379c44325e6e9167b0..cc82cfb6625956231fbdaad162ffaf3524a3e753 100644 (file)
@@ -547,6 +547,7 @@ struct ata_host {
        struct device           *dev;
        void __iomem * const    *iomap;
        unsigned int            n_ports;
+       unsigned int            n_tags;                 /* nr of NCQ tags */
        void                    *private_data;
        struct ata_port_operations *ops;
        unsigned long           flags;
index 955f3d7641e828e904ae84f35dcb2bbb16594837..307d9cab2026071c98a296611b4e3a52c65f6fc8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Linaro Ltd.
+ * Copyright (C) 2013-2014 Linaro Ltd.
  * Author: Jassi Brar <jassisinghbrar@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
 #define __MAILBOX_CLIENT_H
 
 #include <linux/of.h>
+#include <linux/device.h>
 
 struct mbox_chan;
 
 /**
  * struct mbox_client - User of a mailbox
  * @dev:               The client device
- * @chan_name:         The "controller:channel" this client wants
- * @rx_callback:       Atomic callback to provide client the data received
- * @tx_done:           Atomic callback to tell client of data transmission
  * @tx_block:          If the mbox_send_message should block until data is
  *                     transmitted.
  * @tx_tout:           Max block period in ms before TX is assumed failure
- * @knows_txdone:      if the client could run the TX state machine. Usually
+ * @knows_txdone:      If the client could run the TX state machine. Usually
  *                     if the client receives some ACK packet for transmission.
  *                     Unused if the controller already has TX_Done/RTR IRQ.
+ * @rx_callback:       Atomic callback to provide client the data received
+ * @tx_done:           Atomic callback to tell client of data transmission
  */
 struct mbox_client {
        struct device *dev;
-       const char *chan_name;
-       void (*rx_callback)(struct mbox_client *cl, void *mssg);
-       void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
        bool tx_block;
        unsigned long tx_tout;
        bool knows_txdone;
+
+       void (*rx_callback)(struct mbox_client *cl, void *mssg);
+       void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
 };
 
-struct mbox_chan *mbox_request_channel(struct mbox_client *cl);
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
 int mbox_send_message(struct mbox_chan *chan, void *mssg);
-void mbox_client_txdone(struct mbox_chan *chan, int r);
-bool mbox_client_peek_data(struct mbox_chan *chan);
-void mbox_free_channel(struct mbox_chan *chan);
+void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
+bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
+void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
 
 #endif /* __MAILBOX_CLIENT_H */
index 5d1915b9af6026d2c24ae40b41d8beb1b3e185f4..d4cf96f07cfc42452f7bafb1d546cf64b984319e 100644 (file)
@@ -8,31 +8,38 @@
 #define __MAILBOX_CONTROLLER_H
 
 #include <linux/of.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/device.h>
+#include <linux/completion.h>
 
 struct mbox_chan;
 
 /**
- * struct mbox_chan_ops - s/w representation of a communication chan
+ * struct mbox_chan_ops - methods to control mailbox channels
  * @send_data: The API asks the MBOX controller driver, in atomic
  *             context try to transmit a message on the bus. Returns 0 if
  *             data is accepted for transmission, -EBUSY while rejecting
  *             if the remote hasn't yet read the last data sent. Actual
  *             transmission of data is reported by the controller via
  *             mbox_chan_txdone (if it has some TX ACK irq). It must not
- *             block.
+ *             sleep.
  * @startup:   Called when a client requests the chan. The controller
  *             could ask clients for additional parameters of communication
  *             to be provided via client's chan_data. This call may
  *             block. After this call the Controller must forward any
  *             data received on the chan by calling mbox_chan_received_data.
+ *             The controller may do stuff that need to sleep.
  * @shutdown:  Called when a client relinquishes control of a chan.
- *             This call may block too. The controller must not forwared
+ *             This call may block too. The controller must not forward
  *             any received data anymore.
+ *             The controller may do stuff that need to sleep.
  * @last_tx_done: If the controller sets 'txdone_poll', the API calls
  *               this to poll status of last TX. The controller must
  *               give priority to IRQ method over polling and never
  *               set both txdone_poll and txdone_irq. Only in polling
  *               mode 'send_data' is expected to return -EBUSY.
+ *               The controller may do stuff that need to sleep/block.
  *               Used only if txdone_poll:=true && txdone_irq:=false
  * @peek_data: Atomic check for any received data. Return true if controller
  *               has some data to push to the client. False otherwise.
@@ -46,11 +53,11 @@ struct mbox_chan_ops {
 };
 
 /**
- * struct mbox_controller - Controller of a class of communication chans
+ * struct mbox_controller - Controller of a class of communication channels
  * @dev:               Device backing this controller
- * @controller_name:   Literal name of the controller.
  * @ops:               Operators that work on each communication chan
- * @chans:             Null terminated array of chans.
+ * @chans:             Array of channels
+ * @num_chans:         Number of channels in the 'chans' array.
  * @txdone_irq:                Indicates if the controller can report to API when
  *                     the last transmitted data was read by the remote.
  *                     Eg, if it has some TX ACK irq.
@@ -59,6 +66,9 @@ struct mbox_chan_ops {
  *                     no interrupt rises. Ignored if 'txdone_irq' is set.
  * @txpoll_period:     If 'txdone_poll' is in effect, the API polls for
  *                     last TX's status after these many millisecs
+ * @of_xlate:          Controller driver specific mapping of channel via DT
+ * @poll:              API private. Used to poll for TXDONE on all channels.
+ * @node:              API private. To hook into list of controllers.
  */
 struct mbox_controller {
        struct device *dev;
@@ -69,14 +79,9 @@ struct mbox_controller {
        bool txdone_poll;
        unsigned txpoll_period;
        struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
-                                       const struct of_phandle_args *sp);
-       /*
-        * If the controller supports only TXDONE_BY_POLL,
-        * this timer polls all the links for txdone.
-        */
+                                     const struct of_phandle_args *sp);
+       /* Internal to API */
        struct timer_list poll;
-       unsigned period;
-       /* Hook to add to the global controller list */
        struct list_head node;
 };
 
@@ -84,38 +89,45 @@ struct mbox_controller {
  * The length of circular buffer for queuing messages from a client.
  * 'msg_count' tracks the number of buffered messages while 'msg_free'
  * is the index where the next message would be buffered.
- * We shouldn't need it too big because every transferr is interrupt
+ * We shouldn't need it too big because every transfer is interrupt
  * triggered and if we have lots of data to transfer, the interrupt
  * latencies are going to be the bottleneck, not the buffer length.
  * Besides, mbox_send_message could be called from atomic context and
  * the client could also queue another message from the notifier 'tx_done'
  * of the last transfer done.
- * REVIST: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
+ * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
  * print, it needs to be taken from config option or somesuch.
  */
 #define MBOX_TX_QUEUE_LEN      20
 
+/**
+ * struct mbox_chan - s/w representation of a communication chan
+ * @mbox:              Pointer to the parent/provider of this channel
+ * @txdone_method:     Way to detect TXDone chosen by the API
+ * @cl:                        Pointer to the current owner of this channel
+ * @tx_complete:       Transmission completion
+ * @active_req:                Currently active request hook
+ * @msg_count:         No. of mssg currently queued
+ * @msg_free:          Index of next available mssg slot
+ * @msg_data:          Hook for data packet
+ * @lock:              Serialise access to the channel
+ * @con_priv:          Hook for controller driver to attach private data
+ */
 struct mbox_chan {
-       struct mbox_controller *mbox; /* Parent Controller */
+       struct mbox_controller *mbox;
        unsigned txdone_method;
-
-       /* client */
        struct mbox_client *cl;
        struct completion tx_complete;
-
        void *active_req;
        unsigned msg_count, msg_free;
        void *msg_data[MBOX_TX_QUEUE_LEN];
-       /* Access to the channel */
-       spinlock_t lock;
-
-       /* Private data for controller */
+       spinlock_t lock; /* Serialise access to the channel */
        void *con_priv;
 };
 
-int mbox_controller_register(struct mbox_controller *mbox);
-void mbox_chan_received_data(struct mbox_chan *chan, void *data);
-void mbox_chan_txdone(struct mbox_chan *chan, int r);
-void mbox_controller_unregister(struct mbox_controller *mbox);
+int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */
+void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */
+void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
+void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
 
 #endif /* __MAILBOX_CONTROLLER_H */
index d6183f06d8c182951fac67c17a2c05d2ce9f20fa..a3b4812f494f0c74a093fa5693f5e8b07fe6c40f 100644 (file)
@@ -124,6 +124,25 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
                                        struct page *newpage);
 
+static inline void mem_cgroup_oom_enable(void)
+{
+       WARN_ON(current->memcg_oom.may_oom);
+       current->memcg_oom.may_oom = 1;
+}
+
+static inline void mem_cgroup_oom_disable(void)
+{
+       WARN_ON(!current->memcg_oom.may_oom);
+       current->memcg_oom.may_oom = 0;
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+       return p->memcg_oom.memcg;
+}
+
+bool mem_cgroup_oom_synchronize(bool wait);
+
 #ifdef CONFIG_MEMCG_SWAP
 extern int do_swap_account;
 #endif
@@ -347,6 +366,24 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
 {
 }
 
+static inline void mem_cgroup_oom_enable(void)
+{
+}
+
+static inline void mem_cgroup_oom_disable(void)
+{
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+       return false;
+}
+
+static inline bool mem_cgroup_oom_synchronize(bool wait)
+{
+       return false;
+}
+
 static inline void mem_cgroup_inc_page_stat(struct page *page,
                                            enum mem_cgroup_page_stat_item idx)
 {
index fc3883852f9e9e58e17f69bf374b37d6fef312c8..ec4e37ce33c1fddffca2a08e72b1a7bd0cf0ab8a 100644 (file)
@@ -52,6 +52,9 @@ extern unsigned long sysctl_admin_reserve_kbytes;
 /* to align the pointer to the (next) page boundary */
 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
 
+/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
+#define PAGE_ALIGNED(addr)     IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
+
 /*
  * Linux kernel virtual memory manager primitives.
  * The idea being to have a "virtual" mm in the same way
@@ -167,6 +170,7 @@ extern pgprot_t protection_map[16];
 #define FAULT_FLAG_RETRY_NOWAIT        0x10    /* Don't drop mmap_sem and wait when retrying */
 #define FAULT_FLAG_KILLABLE    0x20    /* The fault task is in SIGKILL killable region */
 #define FAULT_FLAG_TRIED       0x40    /* second try */
+#define FAULT_FLAG_USER                0x80    /* The fault originated in userspace */
 
 /*
  * vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -324,6 +328,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
 }
 #endif
 
+extern void kvfree(const void *addr);
+
 static inline void compound_lock(struct page *page)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -1005,6 +1011,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
 
 extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
 extern void truncate_setsize(struct inode *inode, loff_t newsize);
+void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
 int truncate_inode_page(struct address_space *mapping, struct page *page);
 int generic_error_remove_page(struct address_space *mapping, struct page *page);
index a8127dda1f8c83d236fb510514940cb1529b035e..1eb5d455ad88083e953d9195607e5f60d7d35693 100755 (executable)
@@ -293,7 +293,7 @@ struct dw_mci_board {
        struct block_settings *blk_settings;
 };
 #define grf_writel(v, offset)   do \
-        { writel_relaxed(v, RK_GRF_VIRT + offset); dsb(); } \
+        { writel_relaxed(v, RK_GRF_VIRT + offset); dsb(sy); } \
                 while (0)
 
 #endif /* LINUX_MMC_DW_MMC_H */
index 73005f9957ead2b95adb4107329434afbbf482a7..8eeb8f6ab1101e6ba9ab5d8742e3f96c00480fc7 100644 (file)
@@ -42,11 +42,18 @@ struct mnt_namespace;
  * flag, consider how it interacts with shared mounts.
  */
 #define MNT_SHARED_MASK        (MNT_UNBINDABLE)
-#define MNT_PROPAGATION_MASK   (MNT_SHARED | MNT_UNBINDABLE)
+#define MNT_USER_SETTABLE_MASK  (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
+                                | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
+                                | MNT_READONLY)
 
+#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
 
 #define MNT_INTERNAL   0x4000
 
+#define MNT_LOCK_ATIME         0x040000
+#define MNT_LOCK_NOEXEC                0x080000
+#define MNT_LOCK_NOSUID                0x100000
+#define MNT_LOCK_NODEV         0x200000
 #define MNT_LOCK_READONLY      0x400000
 
 struct vfsmount {
index 104b62f23ee025a51d730228e061ea49ce4513e0..54e351aa4d2e5652c03711ec70e416b71d7ee3a5 100644 (file)
@@ -1184,11 +1184,22 @@ struct nfs41_free_stateid_res {
        unsigned int                    status;
 };
 
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+       kfree(cinfo->buckets);
+}
+
 #else
 
 struct pnfs_ds_commit_info {
 };
 
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 struct nfs_page;
index 5169c7a708e2fe4d451d4fcd06f22d3f3b64ab42..59f21325ed64c5d08d814efb74bd30cad9cdfcbe 100644 (file)
@@ -256,14 +256,12 @@ extern int of_property_read_u64(const struct device_node *np,
 extern int of_property_read_string(struct device_node *np,
                                   const char *propname,
                                   const char **out_string);
-extern int of_property_read_string_index(struct device_node *np,
-                                        const char *propname,
-                                        int index, const char **output);
 extern int of_property_match_string(struct device_node *np,
                                    const char *propname,
                                    const char *string);
-extern int of_property_count_strings(struct device_node *np,
-                                    const char *propname);
+extern int of_property_read_string_helper(struct device_node *np,
+                                             const char *propname,
+                                             const char **out_strs, size_t sz, int index);
 extern int of_device_is_compatible(const struct device_node *device,
                                   const char *);
 extern int of_device_is_available(const struct device_node *device);
@@ -455,15 +453,9 @@ static inline int of_property_read_string(struct device_node *np,
        return -ENOSYS;
 }
 
-static inline int of_property_read_string_index(struct device_node *np,
-                                               const char *propname, int index,
-                                               const char **out_string)
-{
-       return -ENOSYS;
-}
-
-static inline int of_property_count_strings(struct device_node *np,
-                                           const char *propname)
+static inline int of_property_read_string_helper(struct device_node *np,
+                                                const char *propname,
+                                                const char **out_strs, size_t sz, int index)
 {
        return -ENOSYS;
 }
@@ -549,6 +541,70 @@ static inline int of_node_to_nid(struct device_node *np)
 #define of_node_to_nid of_node_to_nid
 #endif
 
+/**
+ * of_property_read_string_array() - Read an array of strings from a multiple
+ * strings property.
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ * @out_strs:  output array of string pointers.
+ * @sz:                number of array elements to read.
+ *
+ * Search for a property in a device tree node and retrieve a list of
+ * terminated string values (pointer to data, not a copy) in that property.
+ *
+ * If @out_strs is NULL, the number of strings in the property is returned.
+ */
+static inline int of_property_read_string_array(struct device_node *np,
+                                               const char *propname, const char **out_strs,
+                                               size_t sz)
+{
+       return of_property_read_string_helper(np, propname, out_strs, sz, 0);
+}
+
+/**
+ * of_property_count_strings() - Find and return the number of strings from a
+ * multiple strings property.
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ *
+ * Search for a property in a device tree node and retrieve the number of null
+ * terminated string contain in it. Returns the number of strings on
+ * success, -EINVAL if the property does not exist, -ENODATA if property
+ * does not have a value, and -EILSEQ if the string is not null-terminated
+ * within the length of the property data.
+ */
+static inline int of_property_count_strings(struct device_node *np,
+                                           const char *propname)
+{
+       return of_property_read_string_helper(np, propname, NULL, 0, 0);
+}
+
+/**
+ * of_property_read_string_index() - Find and read a string from a multiple
+ * strings property.
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ * @index:     index of the string in the list of strings
+ * @out_string:        pointer to null terminated return string, modified only if
+ *             return value is 0.
+ *
+ * Search for a property in a device tree node and retrieve a null
+ * terminated string value (pointer to data, not a copy) in the list of strings
+ * contained in that property.
+ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ * property does not have a value, and -EILSEQ if the string is not
+ * null-terminated within the length of the property data.
+ *
+ * The out_string pointer is modified only if a valid string can be decoded.
+ */
+static inline int of_property_read_string_index(struct device_node *np,
+                                               const char *propname,
+                                               int index, const char **output)
+{
+       int rc = of_property_read_string_helper(np, propname, output, 1, index);
+       return rc < 0 ? rc : 0;
+}
+
 /**
  * of_property_read_bool - Findfrom a property
  * @np:                device node from which the property value is to be read.
index da60007075b509b864d386b3d9092c77aec97c64..297cda528855b4df0e8f6ad571d04829ea238c14 100644 (file)
@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
 extern unsigned long oom_badness(struct task_struct *p,
                struct mem_cgroup *memcg, const nodemask_t *nodemask,
                unsigned long totalpages);
+
+extern int oom_kills_count(void);
+extern void note_oom_kill(void);
 extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
                             unsigned int points, unsigned long totalpages,
                             struct mem_cgroup *memcg, nodemask_t *nodemask,
index 22c7052e937248e4c3337778d8608b3508a24ce5..708b8a84f6c02b4f6c232d808743671b94dc8982 100644 (file)
@@ -124,9 +124,9 @@ asmlinkage __printf(1, 2) __cold
 int printk(const char *fmt, ...);
 
 /*
- * Special printk facility for scheduler use only, _DO_NOT_USE_ !
+ * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
  */
-__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
+__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
 
 /*
  * Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -161,7 +161,7 @@ int printk(const char *s, ...)
        return 0;
 }
 static inline __printf(1, 2) __cold
-int printk_sched(const char *s, ...)
+int printk_deferred(const char *s, ...)
 {
        return 0;
 }
index 3a8d78abd2056700602cb0785f1057acde1fd624..c726653fec4ea59384949dcc62f4591265c6091e 100644 (file)
@@ -92,7 +92,12 @@ struct notifier_block;
  * OVER_TEMP      Regulator over temp.
  * FORCE_DISABLE  Regulator forcibly shut down by software.
  * VOLTAGE_CHANGE Regulator voltage changed.
+ *                Data passed is old voltage cast to (void *).
  * DISABLE        Regulator was disabled.
+ * PRE_VOLTAGE_CHANGE   Regulator is about to have voltage changed.
+ *                      Data passed is "struct pre_voltage_change_data"
+ * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
+ *                      Data passed is old voltage cast to (void *).
  *
  * NOTE: These events can be OR'ed together when passed into handler.
  */
@@ -105,6 +110,21 @@ struct notifier_block;
 #define REGULATOR_EVENT_FORCE_DISABLE          0x20
 #define REGULATOR_EVENT_VOLTAGE_CHANGE         0x40
 #define REGULATOR_EVENT_DISABLE                0x80
+#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE     0x100
+#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE   0x200
+
+/**
+ * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
+ *
+ * @old_uV: Current voltage before change.
+ * @min_uV: Min voltage we'll change to.
+ * @max_uV: Max voltage we'll change to.
+ */
+struct pre_voltage_change_data {
+       unsigned long old_uV;
+       unsigned long min_uV;
+       unsigned long max_uV;
+};
 
 struct regulator;
 
index 871ba05175f825cf22d9f5b838caeae052401b37..d4cbb4b2f056b6c46afd523738db011e11697070 100755 (executable)
@@ -121,7 +121,7 @@ static inline void rk3288_cru_set_soft_reset(u32 idx, bool on)
        void __iomem *reg = RK_CRU_VIRT + RK3288_CRU_SOFTRSTS_CON(idx >> 4);
        u32 val = on ? 0x10001U << (idx & 0xf) : 0x10000U << (idx & 0xf);
        writel_relaxed(val, reg);
-       dsb();
+       dsb(sy);
 }
 
 #define RK3036_CRU_MODE_CON 0x0040
@@ -241,4 +241,15 @@ enum rk312x_cru_clk_gate {
        RK312X_CLKGATE_PCLK_UART1,
        RK312X_CLKGATE_PCLK_UART2,
 };
+
+/*************************RK3368********************************/
+
+/*******************CRU OFFSET*********************/
+#define RK3368_CRU_CLKSEL_CON          0x100
+#define RK3368_CRU_CLKGATE_CON         0x200
+
+#define RK3368_PLL_CONS(id, i)         ((id) * 0x10 + ((i) * 4))
+#define RK3368_CRU_CLKSELS_CON(i)      (RK3368_CRU_CLKSEL_CON + ((i) * 4))
+#define RK3368_CRU_CLKGATES_CON(i)     (RK3368_CRU_CLKGATE_CON + ((i) * 4))
+
 #endif
index 31a58d5551eadae73cc5e5bc863d8cf0641a2468..f8785fa69803f2217bb9521d0400d9d9c8a60fc6 100755 (executable)
@@ -5,7 +5,11 @@
 #include <asm/io.h>
 #endif
 
+#ifdef IOMEM
 #define RK_IO_ADDRESS(x)                IOMEM(0xFED00000 + x)
+#else
+#define RK_IO_ADDRESS(x)                ((void __force __iomem *)(0xFED00000 + x))
+#endif
 
 #define RK_CRU_VIRT                     RK_IO_ADDRESS(0x00000000)
 #define RK_GRF_VIRT                     RK_IO_ADDRESS(0x00010000)
index 7ce449dea2b47b26d2edff618bbc493205a04886..6b44a496e7683ed6f5221132bce33b7a7f90271c 100644 (file)
@@ -75,7 +75,7 @@ struct ion_phys_data {
 
 struct ion_share_id_data {
        int fd;
-       unsigned int id;
+       unsigned long id;
 };
 
 #define ION_IOC_ROCKCHIP_MAGIC 'R'
index a3c8b270931be92971f9b81b943d066dfdc0481d..5229df9d71077a3de80646f1e80d8d1586994791 100644 (file)
@@ -480,6 +480,7 @@ struct signal_struct {
        atomic_t                sigcnt;
        atomic_t                live;
        int                     nr_threads;
+       struct list_head        thread_head;
 
        wait_queue_head_t       wait_chldexit;  /* for wait4() */
 
@@ -950,6 +951,14 @@ struct sched_avg {
        u32 usage_avg_sum;
 };
 
+#ifdef CONFIG_SCHED_HMP
+/*
+ * We want to avoid boosting any processes forked from init (PID 1)
+ * and kthreadd (assumed to be PID 2).
+ */
+#define hmp_task_should_forkboost(task) ((task->parent && task->parent->pid > 2))
+#endif
+
 #ifdef CONFIG_SCHEDSTATS
 struct sched_statistics {
        u64                     wait_start;
@@ -1134,13 +1143,12 @@ struct task_struct {
                                 * execve */
        unsigned in_iowait:1;
 
-       /* task may not gain privileges */
-       unsigned no_new_privs:1;
-
        /* Revert to default priority/policy when forking */
        unsigned sched_reset_on_fork:1;
        unsigned sched_contributes_to_load:1;
 
+       unsigned long atomic_flags; /* Flags needing atomic access. */
+
        pid_t pid;
        pid_t tgid;
 
@@ -1173,6 +1181,7 @@ struct task_struct {
        /* PID/PID hash table linkage. */
        struct pid_link pids[PIDTYPE_MAX];
        struct list_head thread_group;
+       struct list_head thread_node;
 
        struct completion *vfork_done;          /* for vfork() */
        int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
@@ -1422,6 +1431,12 @@ struct task_struct {
                unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
        } memcg_batch;
        unsigned int memcg_kmem_skip_account;
+       struct memcg_oom_info {
+               struct mem_cgroup *memcg;
+               gfp_t gfp_mask;
+               int order;
+               unsigned int may_oom:1;
+       } memcg_oom;
 #endif
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        atomic_t ptrace_bp_refcnt;
@@ -1684,11 +1699,13 @@ extern int task_free_unregister(struct notifier_block *n);
 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
 #define used_math() tsk_used_math(current)
 
-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
+/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
+ * __GFP_FS is also cleared as it implies __GFP_IO.
+ */
 static inline gfp_t memalloc_noio_flags(gfp_t flags)
 {
        if (unlikely(current->flags & PF_MEMALLOC_NOIO))
-               flags &= ~__GFP_IO;
+               flags &= ~(__GFP_IO | __GFP_FS);
        return flags;
 }
 
@@ -1704,6 +1721,19 @@ static inline void memalloc_noio_restore(unsigned int flags)
        current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
 }
 
+/* Per-process atomic flags. */
+#define PFA_NO_NEW_PRIVS 0x00000001    /* May not gain new privileges. */
+
+static inline bool task_no_new_privs(struct task_struct *p)
+{
+       return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
+}
+
+static inline void task_set_no_new_privs(struct task_struct *p)
+{
+       set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
+}
+
 /*
  * task->jobctl flags
  */
@@ -2183,6 +2213,16 @@ extern bool current_is_single_threaded(void);
 #define while_each_thread(g, t) \
        while ((t = next_thread(t)) != g)
 
+#define __for_each_thread(signal, t)   \
+       list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
+
+#define for_each_thread(p, t)          \
+       __for_each_thread((p)->signal, t)
+
+/* Careful: this is a double loop, 'break' won't work as expected. */
+#define for_each_process_thread(p, t)  \
+       for_each_process(p) for_each_thread(p, t)
+
 static inline int get_nr_threads(struct task_struct *tsk)
 {
        return tsk->signal->nr_threads;
index 6f19cfd1840e4adea37b84dea76175361d3187bc..9687691799ffeba6e6681e0ab304aa3ef745f0f1 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <uapi/linux/seccomp.h>
 
+#define SECCOMP_FILTER_FLAG_MASK       (SECCOMP_FILTER_FLAG_TSYNC)
+
 #ifdef CONFIG_SECCOMP
 
 #include <linux/thread_info.h>
@@ -14,11 +16,11 @@ struct seccomp_filter;
  *
  * @mode:  indicates one of the valid values above for controlled
  *         system calls available to a process.
- * @filter: The metadata and ruleset for determining what system calls
- *          are allowed for a task.
+ * @filter: must always point to a valid seccomp-filter or NULL as it is
+ *          accessed without locking during system call entry.
  *
  *          @filter must only be accessed from the context of current as there
- *          is no locking.
+ *          is no read locking.
  */
 struct seccomp {
        int mode;
index ac889c5ea11bd68a846b7c84bb788115ba9b2cec..0ed878d0465cd837b8f3190066fe7981ca7fbd6f 100644 (file)
@@ -129,7 +129,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
 #endif
 
 extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
-                       const void *from, size_t available);
+                                      const void *from, size_t available);
 
 /**
  * strstarts - does @str start with @prefix?
@@ -141,7 +141,8 @@ static inline bool strstarts(const char *str, const char *prefix)
        return strncmp(str, prefix, strlen(prefix)) == 0;
 }
 
-extern size_t memweight(const void *ptr, size_t bytes);
+size_t memweight(const void *ptr, size_t bytes);
+void memzero_explicit(void *s, size_t count);
 
 /**
  * kbasename - return the last part of a pathname.
index b05963f09ebf7b499902973a2707fad9aaa471e0..f5bfb1a80abe382808437a1dcf87c9666063a5f3 100644 (file)
@@ -32,6 +32,7 @@ struct svc_xprt_class {
        struct svc_xprt_ops     *xcl_ops;
        struct list_head        xcl_list;
        u32                     xcl_max_payload;
+       int                     xcl_ident;
 };
 
 /*
index d4e3f16d5e8932c56e059d906add432cf88c8dbc..a34821358ae5055a11de88dbeff5b108fd5aaf12 100644 (file)
@@ -363,7 +363,7 @@ extern bool pm_wakeup_pending(void);
 extern bool pm_get_wakeup_count(unsigned int *count, bool block);
 extern bool pm_save_wakeup_count(unsigned int count);
 extern void pm_wakep_autosleep_enabled(bool set);
-
+extern void pm_get_active_wakeup_sources(char *pending_sources, size_t max);
 static inline void lock_system_sleep(void)
 {
        current->flags |= PF_FREEZER_SKIP;
index 84662ecc7b51468233175959ddd5c04bd0efac0d..4e98d7174134e610f7fe0d33472087a69599f30e 100644 (file)
@@ -846,4 +846,6 @@ asmlinkage long sys_process_vm_writev(pid_t pid,
 asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
                         unsigned long idx1, unsigned long idx2);
 asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags);
+asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
+                           const char __user *uargs);
 #endif
index 52f944dfe2fd68305f1f19a4f08c018e9226595c..49587dc22f5d055a782a8a947e76a8535a953dcb 100644 (file)
@@ -30,4 +30,7 @@
    descriptor */
 #define USB_QUIRK_DELAY_INIT           0x00000040
 
+/* device generates spurious wakeup, ignore remote wakeup capability */
+#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
+
 #endif /* __LINUX_USB_QUIRKS_H */
index 7ce50f0debc47eebfd241cc5cbd2ac4c770a106d..ad8b76936c7fb70e35f69e4e7327d41d0d7ac8ba 100644 (file)
 #ifndef _LINUX_WAKEUP_REASON_H
 #define _LINUX_WAKEUP_REASON_H
 
+#define MAX_SUSPEND_ABORT_LEN 256
+
 void log_wakeup_reason(int irq);
+void log_suspend_abort_reason(const char *fmt, ...);
+int check_wakeup_reason(int irq);
 
 #endif /* _LINUX_WAKEUP_REASON_H */
index 40ec3482d1efbc4fccf79ab7a732e3e0358f527b..8ad2dbd0c296783850c6680bba0c197cc42d30a0 100644 (file)
 #ifndef _LINUX_WLAN_PLAT_H_
 #define _LINUX_WLAN_PLAT_H_
 
+#define WLAN_PLAT_NODFS_FLAG   0x01
+
 struct wifi_platform_data {
        int (*set_power)(int val);
        int (*set_reset)(int val);
        int (*set_carddetect)(int val);
        void *(*mem_prealloc)(int section, unsigned long size);
        int (*get_mac_addr)(unsigned char *buf);
-       void *(*get_country_code)(char *ccode);
+       void *(*get_country_code)(char *ccode, u32 flags);
 };
 
 #endif
index a9f4119c7e2e3daa4ee63e56dee36475647c57d8..ef4df5b86e8528a971541544f890d2701a6120aa 100644 (file)
@@ -449,7 +449,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
 #define create_freezable_workqueue(name)                               \
        alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 #define create_singlethread_workqueue(name)                            \
-       alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
+       alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
index d88a098d1affccf3da0c7fe79d5519a4e5475193..2cc4e0df9c5dae316f622972090823feba14fefa 100644 (file)
@@ -318,6 +318,9 @@ struct v4l2_fh;
  * @done_wq:   waitqueue for processes waiting for buffers ready to be dequeued
  * @alloc_ctx: memory type/allocator-specific contexts for each plane
  * @streaming: current streaming state
+ * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
+ *             buffers. Only set for capture queues if qbuf has not yet been
+ *             called since poll() needs to return POLLERR in that situation.
  * @fileio:    file io emulator internal data, used only if emulator is active
  */
 struct vb2_queue {
@@ -350,6 +353,7 @@ struct vb2_queue {
        unsigned int                    plane_sizes[VIDEO_MAX_PLANES];
 
        unsigned int                    streaming:1;
+       unsigned int                    waiting_for_buffers:1;
 
        struct vb2_fileio_data          *fileio;
 };
index 304e41381a1f8f2a2e34f828201016da08b4e876..d9681a288ce6bc7b83b491a9dbf2523f0783dd09 100644 (file)
@@ -2497,7 +2497,7 @@ struct wiphy_vendor_command {
        struct nl80211_vendor_cmd_info info;
        u32 flags;
        int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev,
-                   void *data, int data_len);
+                   const void *data, int data_len);
 };
 
 /**
@@ -3696,8 +3696,8 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp);
 static inline struct sk_buff *
 cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
 {
-       return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE,
-                                         NL80211_ATTR_TESTDATA, approxlen);
+       return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_VENDOR,
+                                         NL80211_ATTR_VENDOR_DATA, approxlen);
 }
 
 /**
index e361f4882426d26fa8e21dd20e9a6a285c578546..4ac12e14c6d98b36c2ecdde79de1f0051ab2fe22 100644 (file)
@@ -23,6 +23,8 @@ struct fib_rule {
        struct fib_rule __rcu   *ctarget;
        char                    iifname[IFNAMSIZ];
        char                    oifname[IFNAMSIZ];
+       kuid_t                  uid_start;
+       kuid_t                  uid_end;
        struct rcu_head         rcu;
        struct net *            fr_net;
 };
@@ -80,7 +82,9 @@ struct fib_rules_ops {
        [FRA_FWMARK]    = { .type = NLA_U32 }, \
        [FRA_FWMASK]    = { .type = NLA_U32 }, \
        [FRA_TABLE]     = { .type = NLA_U32 }, \
-       [FRA_GOTO]      = { .type = NLA_U32 }
+       [FRA_GOTO]      = { .type = NLA_U32 }, \
+       [FRA_UID_START] = { .type = NLA_U32 }, \
+       [FRA_UID_END]   = { .type = NLA_U32 }
 
 static inline void fib_rule_get(struct fib_rule *rule)
 {
index 628e11b98c580d7f66ace375f5a4e0598d03b471..c91e2aae3fb125c1ec704ebacd5f19cac02bdc3d 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/socket.h>
 #include <linux/in6.h>
 #include <linux/atomic.h>
+#include <linux/uidgid.h>
 
 struct flowi_common {
        int     flowic_oif;
@@ -23,6 +24,7 @@ struct flowi_common {
 #define FLOWI_FLAG_CAN_SLEEP           0x02
 #define FLOWI_FLAG_KNOWN_NH            0x04
        __u32   flowic_secid;
+       kuid_t  flowic_uid;
 };
 
 union flowi_uli {
@@ -59,6 +61,7 @@ struct flowi4 {
 #define flowi4_proto           __fl_common.flowic_proto
 #define flowi4_flags           __fl_common.flowic_flags
 #define flowi4_secid           __fl_common.flowic_secid
+#define flowi4_uid             __fl_common.flowic_uid
 
        /* (saddr,daddr) must be grouped, same order as in IP header */
        __be32                  saddr;
@@ -78,7 +81,8 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
                                      __u32 mark, __u8 tos, __u8 scope,
                                      __u8 proto, __u8 flags,
                                      __be32 daddr, __be32 saddr,
-                                     __be16 dport, __be16 sport)
+                                     __be16 dport, __be16 sport,
+                                     kuid_t uid)
 {
        fl4->flowi4_oif = oif;
        fl4->flowi4_iif = 0;
@@ -88,6 +92,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
        fl4->flowi4_proto = proto;
        fl4->flowi4_flags = flags;
        fl4->flowi4_secid = 0;
+       fl4->flowi4_uid = uid;
        fl4->daddr = daddr;
        fl4->saddr = saddr;
        fl4->fl4_dport = dport;
@@ -115,6 +120,7 @@ struct flowi6 {
 #define flowi6_proto           __fl_common.flowic_proto
 #define flowi6_flags           __fl_common.flowic_flags
 #define flowi6_secid           __fl_common.flowic_secid
+#define flowi6_uid             __fl_common.flowic_uid
        struct in6_addr         daddr;
        struct in6_addr         saddr;
        __be32                  flowlabel;
@@ -158,6 +164,7 @@ struct flowi {
 #define flowi_proto    u.__fl_common.flowic_proto
 #define flowi_flags    u.__fl_common.flowic_flags
 #define flowi_secid    u.__fl_common.flowic_secid
+#define flowi_uid      u.__fl_common.flowic_uid
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
index de2c78529afaf81f00abff62bb29708f7f6941a2..0a8f6f961baa749b4b4aaed155ebe999d4c59a0d 100644 (file)
@@ -62,6 +62,7 @@ struct inet_connection_sock_af_ops {
        void        (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
        int         (*bind_conflict)(const struct sock *sk,
                                     const struct inet_bind_bucket *tb, bool relax);
+       void        (*mtu_reduced)(struct sock *sk);
 };
 
 /** inet_connection_sock - INET connection oriented sock
index 6ca347a0717efedf386b8ccaa318c5e34c7e643b..bb06fd26a7bd9501b7bd98d6323c8e3f581b9ae2 100644 (file)
@@ -41,14 +41,13 @@ struct inet_peer {
                struct rcu_head     gc_rcu;
        };
        /*
-        * Once inet_peer is queued for deletion (refcnt == -1), following fields
-        * are not available: rid, ip_id_count
+        * Once inet_peer is queued for deletion (refcnt == -1), following field
+        * is not available: rid
         * We can share memory with rcu_head to help keep inet_peer small.
         */
        union {
                struct {
                        atomic_t                        rid;            /* Frag reception counter */
-                       atomic_t                        ip_id_count;    /* IP ID for the next packet */
                };
                struct rcu_head         rcu;
                struct inet_peer        *gc_next;
@@ -166,7 +165,7 @@ extern void inetpeer_invalidate_tree(struct inet_peer_base *);
 extern void inetpeer_invalidate_family(int family);
 
 /*
- * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * temporary check to make sure we dont access rid, tcp_ts,
  * tcp_ts_stamp if no refcount is taken on inet_peer
  */
 static inline void inet_peer_refcheck(const struct inet_peer *p)
@@ -174,13 +173,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
        WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
 }
 
-
-/* can be called with or without local BH being disabled */
-static inline int inet_getid(struct inet_peer *p, int more)
-{
-       more++;
-       inet_peer_refcheck(p);
-       return atomic_add_return(more, &p->ip_id_count) - more;
-}
-
 #endif /* _NET_INETPEER_H */
index 9f2c12a17fca110ca1671406d6c0d106b84160c1..1f6794b2cac219a56a1416f8ac66c1dea3e8c3bf 100644 (file)
@@ -153,6 +153,7 @@ struct ip_reply_arg {
                                /* -1 if not needed */ 
        int         bound_dev_if;
        u8          tos;
+       kuid_t      uid;
 }; 
 
 #define IP_REPLY_ARG_NOSRCCHECK 1
@@ -255,9 +256,10 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
                 !(dst_metric_locked(dst, RTAX_MTU)));
 }
 
-extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
+u32 ip_idents_reserve(u32 hash, int segs);
+void __ip_select_ident(struct iphdr *iph, int segs);
 
-static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
+static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
 {
        struct iphdr *iph = ip_hdr(skb);
 
@@ -267,24 +269,20 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s
                 * does not change, they drop every other packet in
                 * a TCP stream using header compression.
                 */
-               iph->id = (sk && inet_sk(sk)->inet_daddr) ?
-                                       htons(inet_sk(sk)->inet_id++) : 0;
-       } else
-               __ip_select_ident(iph, dst, 0);
-}
-
-static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
-{
-       struct iphdr *iph = ip_hdr(skb);
-
-       if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
                if (sk && inet_sk(sk)->inet_daddr) {
                        iph->id = htons(inet_sk(sk)->inet_id);
-                       inet_sk(sk)->inet_id += 1 + more;
-               } else
+                       inet_sk(sk)->inet_id += segs;
+               } else {
                        iph->id = 0;
-       } else
-               __ip_select_ident(iph, dst, more);
+               }
+       } else {
+               __ip_select_ident(iph, segs);
+       }
+}
+
+static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
+{
+       ip_select_ident_segs(skb, sk, 1);
 }
 
 /*
index 8d977b3436474b057bd29d3898bfa771e14c23fb..6be6debb5361bd8a8ca569973cb40a97f4b25ac7 100644 (file)
@@ -136,7 +136,7 @@ extern int                  rt6_route_rcv(struct net_device *dev,
                                              const struct in6_addr *gwaddr);
 
 extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
-                           int oif, u32 mark);
+                           int oif, u32 mark, kuid_t uid);
 extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
                               __be32 mtu);
 extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
index efd270da5847999284ab20ba9006531dca7b0fc6..ac1d532965a27acd5456a8d7cd74a3172733a27f 100644 (file)
@@ -539,14 +539,19 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
 }
 
 /* more secured version of ipv6_addr_hash() */
-static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
+static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
 {
        u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
 
        return jhash_3words(v,
                            (__force u32)a->s6_addr32[2],
                            (__force u32)a->s6_addr32[3],
-                           ipv6_hash_secret);
+                           initval);
+}
+
+static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
+{
+       return __ipv6_addr_jhash(a, ipv6_hash_secret);
 }
 
 static inline bool ipv6_addr_loopback(const struct in6_addr *a)
@@ -658,8 +663,6 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
        return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
-extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
-
 /*
  *     Header manipulation
  */
index 2ea40c1b5e009746dacb8f146fdcc37309aa7ff3..647bb2adbffd55608afe36e365a90c7c39a9a48f 100644 (file)
@@ -142,7 +142,7 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
        flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
                           RT_SCOPE_UNIVERSE, proto,
                           sk ? inet_sk_flowi_flags(sk) : 0,
-                          daddr, saddr, dport, sport);
+                          daddr, saddr, dport, sport, sk ? sock_i_uid(sk) : 0);
        if (sk)
                security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
        return ip_route_output_flow(net, fl4, sk);
@@ -253,7 +253,8 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
                flow_flags |= FLOWI_FLAG_CAN_SLEEP;
 
        flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
-                          protocol, flow_flags, dst, src, dport, sport);
+                          protocol, flow_flags, dst, src, dport, sport,
+                          sock_i_uid(sk));
 }
 
 static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
index 35247271e5571153110fa7759e6c24c13c1114aa..5f39c1cc0766baaad663d98882168b275859ded4 100644 (file)
@@ -118,7 +118,7 @@ typedef enum {
  * analysis of the state functions, but in reality just taken from
  * thin air in the hopes othat we don't trigger a kernel panic.
  */
-#define SCTP_MAX_NUM_COMMANDS 14
+#define SCTP_MAX_NUM_COMMANDS 20
 
 typedef union {
        __s32 i32;
index cd89510eab2a44d405c2cba2817b3b7ae863306e..845ab6decc457b1e8a18aee128fe68f70322a6ee 100644 (file)
@@ -540,6 +540,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat
        asoc->pmtu_pending = 0;
 }
 
+static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
+{
+       return !list_empty(&chunk->list);
+}
+
 /* Walk through a list of TLV parameters.  Don't trust the
  * individual parameter lengths and instead depend on
  * the chunk length to indicate when to stop.  Make sure
index 2a82d1384706c61a6757db53edf8d81a53cbc7de..c4c9458f37cdede038205781eb84eeb4f991a7bb 100644 (file)
@@ -255,9 +255,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
                                              int, __be16);
 struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
                                             union sctp_addr *addr);
-int sctp_verify_asconf(const struct sctp_association *asoc,
-                      struct sctp_paramhdr *param_hdr, void *chunk_end,
-                      struct sctp_paramhdr **errp);
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+                       struct sctp_chunk *chunk, bool addr_param_needed,
+                       struct sctp_paramhdr **errp);
 struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                                       struct sctp_chunk *asconf);
 int sctp_process_asconf_ack(struct sctp_association *asoc,
index c2e542b27a5a8316f3b105b2338bc96d5698690e..b1c3d1c63c4e0e2a54807d0fc209efb341fa77ae 100644 (file)
@@ -3,8 +3,6 @@
 
 #include <linux/types.h>
 
-extern __u32 secure_ip_id(__be32 daddr);
-extern __u32 secure_ipv6_id(const __be32 daddr[4]);
 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
                                      __be16 dport);
index 72f710d2f75a0ce6d6f0d4c38125a0af92a7cce5..c0aad07160ef3e3b03be628634ca6ab70aec339c 100644 (file)
@@ -932,7 +932,6 @@ struct proto {
                                                struct sk_buff *skb);
 
        void            (*release_cb)(struct sock *sk);
-       void            (*mtu_reduced)(struct sock *sk);
 
        /* Keeping track of sk's, looking them up, and port selection methods. */
        void                    (*hash)(struct sock *sk);
@@ -1727,8 +1726,8 @@ sk_dst_get(struct sock *sk)
 
        rcu_read_lock();
        dst = rcu_dereference(sk->sk_dst_cache);
-       if (dst)
-               dst_hold(dst);
+       if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+               dst = NULL;
        rcu_read_unlock();
        return dst;
 }
@@ -1767,9 +1766,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
 static inline void
 sk_dst_set(struct sock *sk, struct dst_entry *dst)
 {
-       spin_lock(&sk->sk_dst_lock);
-       __sk_dst_set(sk, dst);
-       spin_unlock(&sk->sk_dst_lock);
+       struct dst_entry *old_dst;
+
+       sk_tx_queue_clear(sk);
+       old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+       dst_release(old_dst);
 }
 
 static inline void
@@ -1781,9 +1782,7 @@ __sk_dst_reset(struct sock *sk)
 static inline void
 sk_dst_reset(struct sock *sk)
 {
-       spin_lock(&sk->sk_dst_lock);
-       __sk_dst_reset(sk);
-       spin_unlock(&sk->sk_dst_lock);
+       sk_dst_set(sk, NULL);
 }
 
 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
index 13f12c10f03b84c68c77c5cc178e6f323e0c6346..e0fc213575823f9eb6e8f75c5e1d899c25b74e13 100644 (file)
@@ -461,6 +461,7 @@ extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
  */
 
 extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+void tcp_v4_mtu_reduced(struct sock *sk);
 extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 extern struct sock * tcp_create_openreq_child(struct sock *sk,
                                              struct request_sock *req,
index 7005d1109ec94c8c2839bbff6984395e20e8a873..908925ace77661a2e056fa5ddd9ac52df12a0b19 100644 (file)
@@ -37,7 +37,7 @@ TRACE_EVENT(kvm_userspace_exit,
                  __entry->errno < 0 ? -__entry->errno : __entry->reason)
 );
 
-#if defined(CONFIG_HAVE_KVM_IRQCHIP)
+#if defined(CONFIG_HAVE_KVM_IRQFD)
 TRACE_EVENT(kvm_set_irq,
        TP_PROTO(unsigned int gsi, int level, int irq_source_id),
        TP_ARGS(gsi, level, irq_source_id),
@@ -57,7 +57,7 @@ TRACE_EVENT(kvm_set_irq,
        TP_printk("gsi %u level %d source %d",
                  __entry->gsi, __entry->level, __entry->irq_source_id)
 );
-#endif
+#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
 
 #if defined(__KVM_HAVE_IOAPIC)
 #define kvm_deliver_mode               \
@@ -124,7 +124,7 @@ TRACE_EVENT(kvm_msi_set_irq,
 
 #endif /* defined(__KVM_HAVE_IOAPIC) */
 
-#if defined(CONFIG_HAVE_KVM_IRQCHIP)
+#if defined(CONFIG_HAVE_KVM_IRQFD)
 
 TRACE_EVENT(kvm_ack_irq,
        TP_PROTO(unsigned int irqchip, unsigned int pin),
@@ -149,7 +149,7 @@ TRACE_EVENT(kvm_ack_irq,
 #endif
 );
 
-#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
 
 
 
@@ -296,23 +296,21 @@ DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
 
 TRACE_EVENT(
        kvm_async_pf_completed,
-       TP_PROTO(unsigned long address, struct page *page, u64 gva),
-       TP_ARGS(address, page, gva),
+       TP_PROTO(unsigned long address, u64 gva),
+       TP_ARGS(address, gva),
 
        TP_STRUCT__entry(
                __field(unsigned long, address)
-               __field(pfn_t, pfn)
                __field(u64, gva)
                ),
 
        TP_fast_assign(
                __entry->address = address;
-               __entry->pfn = page ? page_to_pfn(page) : 0;
                __entry->gva = gva;
                ),
 
-       TP_printk("gva %#llx address %#lx pfn %#llx",  __entry->gva,
-                 __entry->address, __entry->pfn)
+       TP_printk("gva %#llx address %#lx",  __entry->gva,
+                 __entry->address)
 );
 
 #endif
index 0cc74c4403e446c8ecc5a1bd9fd8f5f19b3ae0dc..b422ad5d238bb7b983fd08ae0fe6c43f0b7ba964 100644 (file)
@@ -692,9 +692,19 @@ __SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
 __SYSCALL(__NR_kcmp, sys_kcmp)
 #define __NR_finit_module 273
 __SYSCALL(__NR_finit_module, sys_finit_module)
+/* Backporting seccomp, skip a few ...
+ * #define __NR_sched_setattr 274
+__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
+ * #define __NR_sched_getattr 275
+__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+ * #define __NR_renameat2 276
+__SYSCALL(__NR_renameat2, sys_renameat2)
+ */
+#define __NR_seccomp 277
+__SYSCALL(__NR_seccomp, sys_seccomp)
 
 #undef __NR_syscalls
-#define __NR_syscalls 274
+#define __NR_syscalls 278
 
 /*
  * All syscalls below here should go away really,
index bdc6e87ff3eb379cf223a54d6976f36ce96e7fab..405887bec8b35a1c00c8160d83b84cdd3e8dae84 100644 (file)
@@ -311,6 +311,7 @@ header-y += ppp-ioctl.h
 header-y += ppp_defs.h
 header-y += pps.h
 header-y += prctl.h
+header-y += psci.h
 header-y += ptp_clock.h
 header-y += ptrace.h
 header-y += qnx4_fs.h
index 75cef3fd97add201693b6bf21f4e2f1754c96f43..ce8750f8788a21665b6bd600be219fdf5fd036c5 100644 (file)
@@ -324,6 +324,8 @@ enum {
 /* distinguish syscall tables */
 #define __AUDIT_ARCH_64BIT 0x80000000
 #define __AUDIT_ARCH_LE           0x40000000
+
+#define AUDIT_ARCH_AARCH64     (EM_AARCH64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ALPHA       (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARM         (EM_ARM|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARMEB       (EM_ARM)
index 51da65b68b8501cb2d25fe0064c2c6ff65e3d4e6..9dcdb6251cb877642229b577a2c16172ba43b123 100644 (file)
@@ -49,6 +49,8 @@ enum {
        FRA_TABLE,      /* Extended table id */
        FRA_FWMASK,     /* mask for netfilter mark */
        FRA_OIFNAME,
+       FRA_UID_START,  /* UID range */
+       FRA_UID_END,
        __FRA_MAX
 };
 
index a4ed56cf0eac5f1e5c1b90edf2a1d3b6093d9e02..5014a5c472ed1447bf4237a0da534cabe8ce517a 100644 (file)
@@ -154,6 +154,8 @@ struct inodes_stat_t {
 #define FITHAW         _IOWR('X', 120, int)    /* Thaw */
 #define FITRIM         _IOWR('X', 121, struct fstrim_range)    /* Trim */
 
+#define FIDTRIM        _IOWR('f', 128, struct fstrim_range)    /* Deep discard trim */
+
 #define        FS_IOC_GETFLAGS                 _IOR('f', 1, long)
 #define        FS_IOC_SETFLAGS                 _IOW('f', 2, long)
 #define        FS_IOC_GETVERSION               _IOR('v', 1, long)
index 4214fac1bf4fbeea0c702c440f39aaa6cad94291..e9d0f7efde3b8dbd41790360df37336e213c1531 100644 (file)
@@ -161,6 +161,7 @@ enum {
        DEVCONF_FORCE_TLLAO,
        DEVCONF_NDISC_NOTIFY,
        DEVCONF_ACCEPT_RA_RT_TABLE,
+       DEVCONF_USE_OPTIMISTIC,
        DEVCONF_MAX
 };
 
index d88c8ee00c8b7b39cc935c8353bc7f4f3eb3c8bf..00d2c69a3cb67a0e8424163aaf1655c0ba5d1f5a 100644 (file)
@@ -171,6 +171,7 @@ struct kvm_pit_config {
 #define KVM_EXIT_WATCHDOG         21
 #define KVM_EXIT_S390_TSCH        22
 #define KVM_EXIT_EPR              23
+#define KVM_EXIT_SYSTEM_EVENT     24
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -301,6 +302,13 @@ struct kvm_run {
                struct {
                        __u32 epr;
                } epr;
+               /* KVM_EXIT_SYSTEM_EVENT */
+               struct {
+#define KVM_SYSTEM_EVENT_SHUTDOWN       1
+#define KVM_SYSTEM_EVENT_RESET          2
+                       __u32 type;
+                       __u64 flags;
+               } system_event;
                /* Fix the size of the union. */
                char padding[256];
        };
@@ -391,8 +399,9 @@ struct kvm_vapic_addr {
        __u64 vapic_addr;
 };
 
-/* for KVM_SET_MPSTATE */
+/* for KVM_SET_MP_STATE */
 
+/* not all states are valid on all architectures */
 #define KVM_MP_STATE_RUNNABLE          0
 #define KVM_MP_STATE_UNINITIALIZED     1
 #define KVM_MP_STATE_INIT_RECEIVED     2
@@ -541,6 +550,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_TRACE_ENABLE          __KVM_DEPRECATED_MAIN_W_0x06
 #define KVM_TRACE_PAUSE           __KVM_DEPRECATED_MAIN_0x07
 #define KVM_TRACE_DISABLE         __KVM_DEPRECATED_MAIN_0x08
+#define KVM_GET_EMULATED_CPUID   _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
 
 /*
  * Extension capability list.
@@ -568,9 +578,7 @@ struct kvm_ppc_smmu_info {
 #endif
 /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
 #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
-#ifdef __KVM_HAVE_USER_NMI
 #define KVM_CAP_USER_NMI 22
-#endif
 #ifdef __KVM_HAVE_GUEST_DEBUG
 #define KVM_CAP_SET_GUEST_DEBUG 23
 #endif
@@ -652,9 +660,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_PPC_GET_SMMU_INFO 78
 #define KVM_CAP_S390_COW 79
 #define KVM_CAP_PPC_ALLOC_HTAB 80
-#ifdef __KVM_HAVE_READONLY_MEM
 #define KVM_CAP_READONLY_MEM 81
-#endif
 #define KVM_CAP_IRQFD_RESAMPLE 82
 #define KVM_CAP_PPC_BOOKE_WATCHDOG 83
 #define KVM_CAP_PPC_HTAB_FD 84
@@ -666,6 +672,10 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_IRQ_MPIC 90
 #define KVM_CAP_PPC_RTAS 91
 #define KVM_CAP_IRQ_XICS 92
+#define KVM_CAP_ARM_EL1_32BIT 93
+#define KVM_CAP_EXT_EMUL_CPUID 95
+#define KVM_CAP_ARM_PSCI_0_2 102
+#define KVM_CAP_CHECK_EXTENSION_VM 105
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -783,6 +793,7 @@ struct kvm_dirty_tlb {
 #define KVM_REG_IA64           0x3000000000000000ULL
 #define KVM_REG_ARM            0x4000000000000000ULL
 #define KVM_REG_S390           0x5000000000000000ULL
+#define KVM_REG_ARM64          0x6000000000000000ULL
 #define KVM_REG_MIPS           0x7000000000000000ULL
 
 #define KVM_REG_SIZE_SHIFT     52
@@ -837,9 +848,25 @@ struct kvm_device_attr {
        __u64   addr;           /* userspace address of attr data */
 };
 
-#define KVM_DEV_TYPE_FSL_MPIC_20       1
-#define KVM_DEV_TYPE_FSL_MPIC_42       2
-#define KVM_DEV_TYPE_XICS              3
+#define  KVM_DEV_VFIO_GROUP                    1
+#define   KVM_DEV_VFIO_GROUP_ADD                       1
+#define   KVM_DEV_VFIO_GROUP_DEL                       2
+
+enum kvm_device_type {
+       KVM_DEV_TYPE_FSL_MPIC_20        = 1,
+#define KVM_DEV_TYPE_FSL_MPIC_20       KVM_DEV_TYPE_FSL_MPIC_20
+       KVM_DEV_TYPE_FSL_MPIC_42,
+#define KVM_DEV_TYPE_FSL_MPIC_42       KVM_DEV_TYPE_FSL_MPIC_42
+       KVM_DEV_TYPE_XICS,
+#define KVM_DEV_TYPE_XICS              KVM_DEV_TYPE_XICS
+       KVM_DEV_TYPE_VFIO,
+#define KVM_DEV_TYPE_VFIO              KVM_DEV_TYPE_VFIO
+       KVM_DEV_TYPE_ARM_VGIC_V2,
+#define KVM_DEV_TYPE_ARM_VGIC_V2       KVM_DEV_TYPE_ARM_VGIC_V2
+       KVM_DEV_TYPE_FLIC,
+#define KVM_DEV_TYPE_FLIC              KVM_DEV_TYPE_FLIC
+       KVM_DEV_TYPE_MAX,
+};
 
 /*
  * ioctls for VM fds
@@ -977,7 +1004,7 @@ struct kvm_s390_ucas_mapping {
 #define KVM_S390_INITIAL_RESET    _IO(KVMIO,   0x97)
 #define KVM_GET_MP_STATE          _IOR(KVMIO,  0x98, struct kvm_mp_state)
 #define KVM_SET_MP_STATE          _IOW(KVMIO,  0x99, struct kvm_mp_state)
-/* Available with KVM_CAP_NMI */
+/* Available with KVM_CAP_USER_NMI */
 #define KVM_NMI                   _IO(KVMIO,   0x9a)
 /* Available with KVM_CAP_SET_GUEST_DEBUG */
 #define KVM_SET_GUEST_DEBUG       _IOW(KVMIO,  0x9b, struct kvm_guest_debug)
@@ -1009,6 +1036,7 @@ struct kvm_s390_ucas_mapping {
 /* VM is being stopped by host */
 #define KVM_KVMCLOCK_CTRL        _IO(KVMIO,   0xad)
 #define KVM_ARM_VCPU_INIT        _IOW(KVMIO,  0xae, struct kvm_vcpu_init)
+#define KVM_ARM_PREFERRED_TARGET  _IOR(KVMIO,  0xaf, struct kvm_vcpu_init)
 #define KVM_GET_REG_LIST         _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
 
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU    (1 << 0)
index 5dda450eb55be1ca37d17680984d788f94c82d29..2ec9fbcd06f9a489c0669521bf77728af69e0ed0 100644 (file)
@@ -6,6 +6,8 @@
 
 #define XT_BPF_MAX_NUM_INSTR   64
 
+struct sk_filter;
+
 struct xt_bpf_info {
        __u16 bpf_program_num_elem;
        struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
new file mode 100644 (file)
index 0000000..310d83e
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * ARM Power State and Coordination Interface (PSCI) header
+ *
+ * This header holds common PSCI defines and macros shared
+ * by: ARM kernel, ARM64 kernel, KVM ARM/ARM64 and user space.
+ *
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Anup Patel <anup.patel@linaro.org>
+ */
+
+#ifndef _UAPI_LINUX_PSCI_H
+#define _UAPI_LINUX_PSCI_H
+
+/*
+ * PSCI v0.1 interface
+ *
+ * The PSCI v0.1 function numbers are implementation defined.
+ *
+ * Only PSCI return values such as: SUCCESS, NOT_SUPPORTED,
+ * INVALID_PARAMS, and DENIED defined below are applicable
+ * to PSCI v0.1.
+ */
+
+/* PSCI v0.2 interface */
+#define PSCI_0_2_FN_BASE                       0x84000000
+#define PSCI_0_2_FN(n)                         (PSCI_0_2_FN_BASE + (n))
+#define PSCI_0_2_64BIT                         0x40000000
+#define PSCI_0_2_FN64_BASE                     \
+                                       (PSCI_0_2_FN_BASE + PSCI_0_2_64BIT)
+#define PSCI_0_2_FN64(n)                       (PSCI_0_2_FN64_BASE + (n))
+
+#define PSCI_0_2_FN_PSCI_VERSION               PSCI_0_2_FN(0)
+#define PSCI_0_2_FN_CPU_SUSPEND                        PSCI_0_2_FN(1)
+#define PSCI_0_2_FN_CPU_OFF                    PSCI_0_2_FN(2)
+#define PSCI_0_2_FN_CPU_ON                     PSCI_0_2_FN(3)
+#define PSCI_0_2_FN_AFFINITY_INFO              PSCI_0_2_FN(4)
+#define PSCI_0_2_FN_MIGRATE                    PSCI_0_2_FN(5)
+#define PSCI_0_2_FN_MIGRATE_INFO_TYPE          PSCI_0_2_FN(6)
+#define PSCI_0_2_FN_MIGRATE_INFO_UP_CPU                PSCI_0_2_FN(7)
+#define PSCI_0_2_FN_SYSTEM_OFF                 PSCI_0_2_FN(8)
+#define PSCI_0_2_FN_SYSTEM_RESET               PSCI_0_2_FN(9)
+
+#define PSCI_0_2_FN64_CPU_SUSPEND              PSCI_0_2_FN64(1)
+#define PSCI_0_2_FN64_CPU_ON                   PSCI_0_2_FN64(3)
+#define PSCI_0_2_FN64_AFFINITY_INFO            PSCI_0_2_FN64(4)
+#define PSCI_0_2_FN64_MIGRATE                  PSCI_0_2_FN64(5)
+#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU      PSCI_0_2_FN64(7)
+
+/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
+#define PSCI_0_2_POWER_STATE_ID_MASK           0xffff
+#define PSCI_0_2_POWER_STATE_ID_SHIFT          0
+#define PSCI_0_2_POWER_STATE_TYPE_SHIFT                16
+#define PSCI_0_2_POWER_STATE_TYPE_MASK         \
+                               (0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
+#define PSCI_0_2_POWER_STATE_AFFL_SHIFT                24
+#define PSCI_0_2_POWER_STATE_AFFL_MASK         \
+                               (0x3 << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
+
+/* PSCI v0.2 affinity level state returned by AFFINITY_INFO */
+#define PSCI_0_2_AFFINITY_LEVEL_ON             0
+#define PSCI_0_2_AFFINITY_LEVEL_OFF            1
+#define PSCI_0_2_AFFINITY_LEVEL_ON_PENDING     2
+
+/* PSCI v0.2 multicore support in Trusted OS returned by MIGRATE_INFO_TYPE */
+#define PSCI_0_2_TOS_UP_MIGRATE                        0
+#define PSCI_0_2_TOS_UP_NO_MIGRATE             1
+#define PSCI_0_2_TOS_MP                                2
+
+/* PSCI version decoding (independent of PSCI version) */
+#define PSCI_VERSION_MAJOR_SHIFT               16
+#define PSCI_VERSION_MINOR_MASK                        \
+               ((1U << PSCI_VERSION_MAJOR_SHIFT) - 1)
+#define PSCI_VERSION_MAJOR_MASK                        ~PSCI_VERSION_MINOR_MASK
+#define PSCI_VERSION_MAJOR(ver)                        \
+               (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
+#define PSCI_VERSION_MINOR(ver)                        \
+               ((ver) & PSCI_VERSION_MINOR_MASK)
+
+/* PSCI return values (inclusive of all PSCI versions) */
+#define PSCI_RET_SUCCESS                       0
+#define PSCI_RET_NOT_SUPPORTED                 -1
+#define PSCI_RET_INVALID_PARAMS                        -2
+#define PSCI_RET_DENIED                                -3
+#define PSCI_RET_ALREADY_ON                    -4
+#define PSCI_RET_ON_PENDING                    -5
+#define PSCI_RET_INTERNAL_FAILURE              -6
+#define PSCI_RET_NOT_PRESENT                   -7
+#define PSCI_RET_DISABLED                      -8
+
+#endif /* _UAPI_LINUX_PSCI_H */
index 7a2144e1afae679a198449bad4e7b57a77a63fb0..07c1146c1f51104bcc8912c1719e1297bcb57340 100644 (file)
@@ -297,6 +297,7 @@ enum rtattr_type_t {
        RTA_TABLE,
        RTA_MARK,
        RTA_MFC_STATS,
+       RTA_UID,
        __RTA_MAX
 };
 
index ac2dc9f7297367c0529ed70e7ed02a8be8f4c27f..0f238a43ff1e7e5ebd8bd746a055fa910c10b51d 100644 (file)
 #define SECCOMP_MODE_STRICT    1 /* uses hard-coded filter. */
 #define SECCOMP_MODE_FILTER    2 /* uses user-supplied filter. */
 
+/* Valid operations for seccomp syscall. */
+#define SECCOMP_SET_MODE_STRICT        0
+#define SECCOMP_SET_MODE_FILTER        1
+
+/* Valid flags for SECCOMP_SET_MODE_FILTER */
+#define SECCOMP_FILTER_FLAG_TSYNC      1
+
 /*
  * All BPF programs must return a 32-bit value.
  * The bottom 16-bits are for optional return data.
index 602dc6c45d1a091fb548ac586c69813f25f4f508..165e7059de75173ec1bf29dcfb9138c35a4a3956 100644 (file)
@@ -57,6 +57,7 @@
 #define MAX_NUM_CODECS 32
 #define MAX_NUM_CODEC_DESCRIPTORS 32
 #define MAX_NUM_BITRATES 32
+#define MAX_NUM_SAMPLE_RATES 32
 
 /* Codecs are listed linearly to allow for extensibility */
 #define SND_AUDIOCODEC_PCM                   ((__u32) 0x00000001)
@@ -324,7 +325,8 @@ union snd_codec_options {
 
 /** struct snd_codec_desc - description of codec capabilities
  * @max_ch: Maximum number of audio channels
- * @sample_rates: Sampling rates in Hz, use SNDRV_PCM_RATE_xxx for this
+ * @sample_rates: Sampling rates in Hz, use values like 48000 for this
+ * @num_sample_rates: Number of valid values in sample_rates array
  * @bit_rate: Indexed array containing supported bit rates
  * @num_bitrates: Number of valid values in bit_rate array
  * @rate_control: value is specified by SND_RATECONTROLMODE defines.
@@ -346,7 +348,8 @@ union snd_codec_options {
 
 struct snd_codec_desc {
        __u32 max_ch;
-       __u32 sample_rates;
+       __u32 sample_rates[MAX_NUM_SAMPLE_RATES];
+       __u32 num_sample_rates;
        __u32 bit_rate[MAX_NUM_BITRATES];
        __u32 num_bitrates;
        __u32 rate_control;
@@ -364,7 +367,8 @@ struct snd_codec_desc {
  * @ch_out: Number of output channels. In case of contradiction between
  *             this field and the channelMode field, the channelMode field
  *             overrides.
- * @sample_rate: Audio sample rate of input data
+ * @sample_rate: Audio sample rate of input data in Hz, use values like 48000
+ *             for this.
  * @bit_rate: Bitrate of encoded data. May be ignored by decoders
  * @rate_control: Encoding rate control. See SND_RATECONTROLMODE defines.
  *               Encoders may rely on profiles for quality levels.
index 051ac0c6b16f1acef6d9c9b224858bb10f2625e6..79b356f892f9a22f261b3281a3925ba37597c433 100644 (file)
@@ -1373,6 +1373,7 @@ config FUTEX
 
 config HAVE_FUTEX_CMPXCHG
        bool
+       depends on FUTEX
        help
          Architectures should select this if futex_atomic_cmpxchg_inatomic()
          is implemented and always working. This removes a couple of runtime
index e83ac04fda977397899ec0d342a74012ee52d13c..2132ffd5e03188cabb1865af574af8e27feb6adf 100644 (file)
@@ -605,6 +605,10 @@ asmlinkage void __init start_kernel(void)
 #ifdef CONFIG_X86
        if (efi_enabled(EFI_RUNTIME_SERVICES))
                efi_enter_virtual_mode();
+#endif
+#ifdef CONFIG_X86_ESPFIX64
+       /* Should be run before the first non-init thread is created */
+       init_espfix_bsp();
 #endif
        thread_info_cache_init();
        cred_init();
index b0e99deb6d05330482c8ec98f1a511f07f9fa5f1..a0f0ab2ac2a8b92961c229dde22703dc7107cf3e 100644 (file)
@@ -123,7 +123,6 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        struct ctl_table ipc_table;
-       size_t lenp_bef = *lenp;
        int oldval;
        int rc;
 
@@ -133,7 +132,7 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
 
        rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
 
-       if (write && !rc && lenp_bef == *lenp) {
+       if (write && !rc) {
                int newval = *((int *)(ipc_table.data));
                /*
                 * The file "auto_msgmni" has correctly been set.
index 44511d100eaa89f73c53f0e16828a9f03d7256ac..e4d30533c562c1eb810d890707d74d332a147aa9 100644 (file)
@@ -220,6 +220,9 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
 
 endif
 
+config ARCH_SUPPORTS_ATOMIC_RMW
+       bool
+
 config MUTEX_SPIN_ON_OWNER
        def_bool y
-       depends on SMP && !DEBUG_MUTEXES
+       depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
index a6c632757e57a3223c8daebf1219d8d94c218120..4dd7529b084515505ab0028a373efc0b78ef118d 100644 (file)
@@ -1412,7 +1412,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
        audit_log_format(ab, " %s=", prefix);
        CAP_FOR_EACH_U32(i) {
                audit_log_format(ab, "%08x",
-                                cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
+                                cap->cap[CAP_LAST_U32 - i]);
        }
 }
 
index 43c307dc9453d5c9166596d2303deaf099cbf5b0..00c4459f76df3e5256bb27eef53f4822153753c2 100644 (file)
@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
                chunk->owners[i].index = i;
        }
        fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
+       chunk->mark.mask = FS_IN_IGNORED;
        return chunk;
 }
 
index d52eecc0942b0eacc20ef0a7e6bf277d31e87a8f..1339806a87312f175a2a19fd99751f949dc9917c 100644 (file)
@@ -268,6 +268,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
                i++;
        }
 
+       effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+       permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+       inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+
        new = prepare_creds();
        if (!new)
                return -ENOMEM;
index cd1c303214f32c20672538283d1e053323abb65c..e646e870ec5fbe6929e582866c313cd513a5ac0b 100644 (file)
@@ -2124,6 +2124,25 @@ static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
        return 0;
 }
 
+int subsys_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+       const struct cred *cred = current_cred(), *tcred;
+       struct task_struct *task;
+
+       if (capable(CAP_SYS_NICE))
+               return 0;
+
+       cgroup_taskset_for_each(task, cgrp, tset) {
+               tcred = __task_cred(task);
+
+               if (current != task && cred->euid != tcred->uid &&
+                   cred->euid != tcred->suid)
+                       return -EACCES;
+       }
+
+       return 0;
+}
+
 /*
  * Find the task_struct of the task to attach by vpid and pass it along to the
  * function to attach either it or all tasks in its threadgroup. Will lock
index 459b94c9472140e1ea2160f070594887d2de60f2..3f63ea6464cae60eb55acb1bbed1cb10af2fdedf 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/mm_types.h>
 #include <linux/cgroup.h>
+#include <linux/compat.h>
 
 #include "internal.h"
 
@@ -1398,6 +1399,11 @@ retry:
         */
        if (ctx->is_active) {
                raw_spin_unlock_irq(&ctx->lock);
+               /*
+                * Reload the task pointer, it might have been changed by
+                * a concurrent perf_event_context_sched_out().
+                */
+               task = ctx->task;
                goto retry;
        }
 
@@ -1829,6 +1835,11 @@ retry:
         */
        if (ctx->is_active) {
                raw_spin_unlock_irq(&ctx->lock);
+               /*
+                * Reload the task pointer, it might have been changed by
+                * a concurrent perf_event_context_sched_out().
+                */
+               task = ctx->task;
                goto retry;
        }
 
@@ -3480,6 +3491,25 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return 0;
 }
 
+#ifdef CONFIG_COMPAT
+static long perf_compat_ioctl(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       switch (_IOC_NR(cmd)) {
+       case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
+               /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
+               if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+                       cmd &= ~IOCSIZE_MASK;
+                       cmd |= sizeof(void *) << IOCSIZE_SHIFT;
+               }
+               break;
+       }
+       return perf_ioctl(file, cmd, arg);
+}
+#else
+# define perf_compat_ioctl NULL
+#endif
+
 int perf_event_task_enable(void)
 {
        struct perf_event *event;
@@ -3951,7 +3981,7 @@ static const struct file_operations perf_fops = {
        .read                   = perf_read,
        .poll                   = perf_poll,
        .unlocked_ioctl         = perf_ioctl,
-       .compat_ioctl           = perf_ioctl,
+       .compat_ioctl           = perf_compat_ioctl,
        .mmap                   = perf_mmap,
        .fasync                 = perf_fasync,
 };
@@ -7472,8 +7502,10 @@ int perf_event_init_task(struct task_struct *child)
 
        for_each_task_context_nr(ctxn) {
                ret = perf_event_init_context(child, ctxn);
-               if (ret)
+               if (ret) {
+                       perf_event_free_task(child);
                        return ret;
+               }
        }
 
        return 0;
index 89cca291f86388ca51d7704cc994f3a1c2a37abf..33fde71b83d057579c5c55d48d8479b9d69944bb 100644 (file)
@@ -74,6 +74,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
                __this_cpu_dec(process_counts);
        }
        list_del_rcu(&p->thread_group);
+       list_del_rcu(&p->thread_node);
 }
 
 /*
index a0fbe5277226bbe679d03cc8efea6c3bf145f46b..b0663950634b332526d3bd28074953e1369361c9 100644 (file)
@@ -327,6 +327,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
                goto free_ti;
 
        tsk->stack = ti;
+#ifdef CONFIG_SECCOMP
+       /*
+        * We must handle setting up seccomp filters once we're under
+        * the sighand lock in case orig has changed between now and
+        * then. Until then, filter must be NULL to avoid messing up
+        * the usage counts on the error path calling free_task.
+        */
+       tsk->seccomp.filter = NULL;
+#endif
 
        setup_thread_stack(tsk, orig);
        clear_user_return_notifier(tsk);
@@ -1062,6 +1071,11 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        sig->nr_threads = 1;
        atomic_set(&sig->live, 1);
        atomic_set(&sig->sigcnt, 1);
+
+       /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
+       sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
+       tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
+
        init_waitqueue_head(&sig->wait_chldexit);
        sig->curr_target = tsk;
        init_sigpending(&sig->shared_pending);
@@ -1103,6 +1117,39 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
        p->flags = new_flags;
 }
 
+static void copy_seccomp(struct task_struct *p)
+{
+#ifdef CONFIG_SECCOMP
+       /*
+        * Must be called with sighand->lock held, which is common to
+        * all threads in the group. Holding cred_guard_mutex is not
+        * needed because this new task is not yet running and cannot
+        * be racing exec.
+        */
+       assert_spin_locked(&current->sighand->siglock);
+
+       /* Ref-count the new filter user, and assign it. */
+       get_seccomp_filter(current);
+       p->seccomp = current->seccomp;
+
+       /*
+        * Explicitly enable no_new_privs here in case it got set
+        * between the task_struct being duplicated and holding the
+        * sighand lock. The seccomp state and nnp must be in sync.
+        */
+       if (task_no_new_privs(current))
+               task_set_no_new_privs(p);
+
+       /*
+        * If the parent gained a seccomp mode after copying thread
+        * flags and between before we held the sighand lock, we have
+        * to manually enable the seccomp thread flag here.
+        */
+       if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
+               set_tsk_thread_flag(p, TIF_SECCOMP);
+#endif
+}
+
 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
 {
        current->clear_child_tid = tidptr;
@@ -1207,7 +1254,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                goto fork_out;
 
        ftrace_graph_init_task(p);
-       get_seccomp_filter(p);
 
        rt_mutex_init_task(p);
 
@@ -1336,7 +1382,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                goto bad_fork_cleanup_policy;
        retval = audit_alloc(p);
        if (retval)
-               goto bad_fork_cleanup_policy;
+               goto bad_fork_cleanup_perf;
        /* copy all the process information */
        retval = copy_semundo(clone_flags, p);
        if (retval)
@@ -1449,6 +1495,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        spin_lock(&current->sighand->siglock);
 
+       /*
+        * Copy seccomp details explicitly here, in case they were changed
+        * before holding sighand lock.
+        */
+       copy_seccomp(p);
+
        /*
         * Process group and session signals need to be delivered to just the
         * parent before the fork or both the parent and the child after the
@@ -1465,14 +1517,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                goto bad_fork_free_pid;
        }
 
-       if (clone_flags & CLONE_THREAD) {
-               current->signal->nr_threads++;
-               atomic_inc(&current->signal->live);
-               atomic_inc(&current->signal->sigcnt);
-               p->group_leader = current->group_leader;
-               list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
-       }
-
        if (likely(p->pid)) {
                ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
 
@@ -1489,6 +1533,15 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                        list_add_tail(&p->sibling, &p->real_parent->children);
                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
                        __this_cpu_inc(process_counts);
+               } else {
+                       current->signal->nr_threads++;
+                       atomic_inc(&current->signal->live);
+                       atomic_inc(&current->signal->sigcnt);
+                       p->group_leader = current->group_leader;
+                       list_add_tail_rcu(&p->thread_group,
+                                         &p->group_leader->thread_group);
+                       list_add_tail_rcu(&p->thread_node,
+                                         &p->signal->thread_head);
                }
                attach_pid(p, PIDTYPE_PID, pid);
                nr_threads++;
@@ -1533,8 +1586,9 @@ bad_fork_cleanup_semundo:
        exit_sem(p);
 bad_fork_cleanup_audit:
        audit_free(p);
-bad_fork_cleanup_policy:
+bad_fork_cleanup_perf:
        perf_event_free_task(p);
+bad_fork_cleanup_policy:
 #ifdef CONFIG_NUMA
        mpol_put(p->mempolicy);
 bad_fork_cleanup_cgroup:
index 5420f635111f4b0b9d28760276424e6aa39a7b21..4ada72f5f55adfb9b02bd01bc834407a0819c320 100644 (file)
@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p)
        if (p->flags & PF_NOFREEZE)
                return false;
 
+       if (test_thread_flag(TIF_MEMDIE))
+               return false;
+
        if (pm_nosig_freezing || cgroup_freezing(p))
                return true;
 
index c72b7a43beb9428d3c134584e394db9252569cd2..bbb5f65665b94e7908d080d2f8ab54ea4ed72951 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/syscore_ops.h>
-
+#include <linux/wakeup_reason.h>
 #include "internals.h"
 
 /**
@@ -100,11 +100,16 @@ EXPORT_SYMBOL_GPL(resume_device_irqs);
 int check_wakeup_irqs(void)
 {
        struct irq_desc *desc;
+       char suspend_abort[MAX_SUSPEND_ABORT_LEN];
        int irq;
 
        for_each_irq_desc(irq, desc) {
                if (irqd_is_wakeup_set(&desc->irq_data)) {
                        if (desc->istate & IRQS_PENDING) {
+                               log_suspend_abort_reason("Wakeup IRQ %d %s pending",
+                                       irq,
+                                       desc->action && desc->action->name ?
+                                       desc->action->name : "");
                                pr_info("Wakeup IRQ %d %s pending, suspend aborted\n",
                                        irq,
                                        desc->action && desc->action->name ?
index e30ac0fe61c3ded5533cb4db8e95b2d41dcf2ac8..0aa69ea1d8fdcfa68046aa75b03c4373783a02fa 100644 (file)
@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
  */
 static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
 {
-       long ret;
+       long t1, t2;
 
-       ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
+       t1 = kptr_obfuscate((long)v1, type);
+       t2 = kptr_obfuscate((long)v2, type);
 
-       return (ret < 0) | ((ret > 0) << 1);
+       return (t1 < t2) | ((t1 > t2) << 1);
 }
 
 /* The caller must have pinned the task */
index 10a3af821d2863c105a43e4ab3dc9be34615a15f..61fb677211cba90d841bf1fab48649d0c4504aaa 100644 (file)
@@ -1866,7 +1866,9 @@ static void free_module(struct module *mod)
 
        /* We leave it in list to prevent duplicate loads, but make sure
         * that noone uses it while it's being deconstructed. */
+       mutex_lock(&module_mutex);
        mod->state = MODULE_STATE_UNFORMED;
+       mutex_unlock(&module_mutex);
 
        /* Remove dynamic debug info */
        ddebug_remove_module(mod->name);
index 424c2d4265c90cca5a484f2d5ae0ab587550e454..77e6b83c0431515e7c47f0888e5ae74af4ee5199 100644 (file)
@@ -634,6 +634,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
                        goto out;
                }
        } else {
+               memset(&event.sigev_value, 0, sizeof(event.sigev_value));
                event.sigev_notify = SIGEV_SIGNAL;
                event.sigev_signo = SIGALRM;
                event.sigev_value.sival_int = new_timer->it_id;
index b26f5f1e773e6b6aa3420ee1fb60c9b8fc6025cc..1634dc6e2fe7db3e5f77fa513d10d26a406be634 100644 (file)
@@ -491,8 +491,14 @@ int hibernation_restore(int platform_mode)
        error = dpm_suspend_start(PMSG_QUIESCE);
        if (!error) {
                error = resume_target_kernel(platform_mode);
-               dpm_resume_end(PMSG_RECOVER);
+               /*
+                * The above should either succeed and jump to the new kernel,
+                * or return with an error. Otherwise things are just
+                * undefined, so let's be paranoid.
+                */
+               BUG_ON(!error);
        }
+       dpm_resume_end(PMSG_RECOVER);
        pm_restore_gfp_mask();
        ftrace_start();
        resume_console();
index d77663bfedeb071370f584f2bf1e42cd8332e5a4..312c1b2c725ddf4165a373c609f307679118a951 100644 (file)
@@ -293,12 +293,12 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
 {
        char *s = buf;
 #ifdef CONFIG_SUSPEND
-       int i;
+       suspend_state_t i;
+
+       for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
+               if (pm_states[i].state)
+                       s += sprintf(s,"%s ", pm_states[i].label);
 
-       for (i = 0; i < PM_SUSPEND_MAX; i++) {
-               if (pm_states[i] && valid_state(i))
-                       s += sprintf(s,"%s ", pm_states[i]);
-       }
 #endif
 #ifdef CONFIG_HIBERNATION
        s += sprintf(s, "%s\n", "disk");
@@ -314,7 +314,7 @@ static suspend_state_t decode_state(const char *buf, size_t n)
 {
 #ifdef CONFIG_SUSPEND
        suspend_state_t state = PM_SUSPEND_MIN;
-       const char * const *s;
+       struct pm_sleep_state *s;
 #endif
        char *p;
        int len;
@@ -328,8 +328,9 @@ static suspend_state_t decode_state(const char *buf, size_t n)
 
 #ifdef CONFIG_SUSPEND
        for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
-               if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
-                       return state;
+               if (s->state && len == strlen(s->label)
+                   && !strncmp(buf, s->label, len))
+                       return s->state;
 #endif
 
        return PM_SUSPEND_ON;
@@ -445,8 +446,8 @@ static ssize_t autosleep_show(struct kobject *kobj,
 
 #ifdef CONFIG_SUSPEND
        if (state < PM_SUSPEND_MAX)
-               return sprintf(buf, "%s\n", valid_state(state) ?
-                                               pm_states[state] : "error");
+               return sprintf(buf, "%s\n", pm_states[state].state ?
+                                       pm_states[state].label : "error");
 #endif
 #ifdef CONFIG_HIBERNATION
        return sprintf(buf, "disk\n");
index 7d4b7ffb3c1d4371f19e81fe83c67ea88ac83759..f770cad3666c0c9f0fa4f018f04839f461eff5b3 100644 (file)
@@ -175,17 +175,20 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *,
                                unsigned int, char *);
 
 #ifdef CONFIG_SUSPEND
+struct pm_sleep_state {
+       const char *label;
+       suspend_state_t state;
+};
+
 /* kernel/power/suspend.c */
-extern const char *const pm_states[];
+extern struct pm_sleep_state pm_states[];
 
-extern bool valid_state(suspend_state_t state);
 extern int suspend_devices_and_enter(suspend_state_t state);
 #else /* !CONFIG_SUSPEND */
 static inline int suspend_devices_and_enter(suspend_state_t state)
 {
        return -ENOSYS;
 }
-static inline bool valid_state(suspend_state_t state) { return false; }
 #endif /* !CONFIG_SUSPEND */
 
 #ifdef CONFIG_PM_TEST_SUSPEND
index fc0df84864495f8c44261961cb6909e66d24a21e..498dfb8bfba386eca8996b0820336ae3f0b53c18 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/delay.h>
 #include <linux/workqueue.h>
 #include <linux/kmod.h>
-
+#include <linux/wakeup_reason.h>
 /* 
  * Timeout for stopping processes
  */
@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
        unsigned int elapsed_msecs;
        bool wakeup = false;
        int sleep_usecs = USEC_PER_MSEC;
+       char suspend_abort[MAX_SUSPEND_ABORT_LEN];
 
        do_gettimeofday(&start);
 
@@ -63,6 +64,9 @@ static int try_to_freeze_tasks(bool user_only)
                        break;
 
                if (pm_wakeup_pending()) {
+                       pm_get_active_wakeup_sources(suspend_abort,
+                               MAX_SUSPEND_ABORT_LEN);
+                       log_suspend_abort_reason(suspend_abort);
                        wakeup = true;
                        break;
                }
@@ -82,23 +86,24 @@ static int try_to_freeze_tasks(bool user_only)
        do_div(elapsed_msecs64, NSEC_PER_MSEC);
        elapsed_msecs = elapsed_msecs64;
 
-       if (todo) {
+       if (wakeup) {
+               printk("\n");
+               printk(KERN_ERR "Freezing of tasks aborted after %d.%03d seconds",
+                      elapsed_msecs / 1000, elapsed_msecs % 1000);
+       } else if (todo) {
                printk("\n");
-               printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
-                      "(%d tasks refusing to freeze, wq_busy=%d):\n",
-                      wakeup ? "aborted" : "failed",
+               printk(KERN_ERR "Freezing of tasks failed after %d.%03d seconds"
+                      " (%d tasks refusing to freeze, wq_busy=%d):\n",
                       elapsed_msecs / 1000, elapsed_msecs % 1000,
                       todo - wq_busy, wq_busy);
 
-               if (!wakeup) {
-                       read_lock(&tasklist_lock);
-                       do_each_thread(g, p) {
-                               if (p != current && !freezer_should_skip(p)
-                                   && freezing(p) && !frozen(p))
-                                       sched_show_task(p);
-                       } while_each_thread(g, p);
-                       read_unlock(&tasklist_lock);
-               }
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
+                       if (p != current && !freezer_should_skip(p)
+                           && freezing(p) && !frozen(p))
+                               sched_show_task(p);
+               } while_each_thread(g, p);
+               read_unlock(&tasklist_lock);
        } else {
                printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
                        elapsed_msecs % 1000);
@@ -107,6 +112,28 @@ static int try_to_freeze_tasks(bool user_only)
        return todo ? -EBUSY : 0;
 }
 
+/*
+ * Returns true if all freezable tasks (except for current) are frozen already
+ */
+static bool check_frozen_processes(void)
+{
+       struct task_struct *g, *p;
+       bool ret = true;
+
+       read_lock(&tasklist_lock);
+       for_each_process_thread(g, p) {
+               if (p != current && !freezer_should_skip(p) &&
+                   !frozen(p)) {
+                       ret = false;
+                       goto done;
+               }
+       }
+done:
+       read_unlock(&tasklist_lock);
+
+       return ret;
+}
+
 /**
  * freeze_processes - Signal user space processes to enter the refrigerator.
  *
@@ -115,6 +142,7 @@ static int try_to_freeze_tasks(bool user_only)
 int freeze_processes(void)
 {
        int error;
+       int oom_kills_saved;
 
        error = __usermodehelper_disable(UMH_FREEZING);
        if (error)
@@ -125,12 +153,27 @@ int freeze_processes(void)
 
        printk("Freezing user space processes ... ");
        pm_freezing = true;
+       oom_kills_saved = oom_kills_count();
        error = try_to_freeze_tasks(true);
        if (!error) {
-               printk("done.");
                __usermodehelper_set_disable_depth(UMH_DISABLED);
                oom_killer_disable();
+
+               /*
+                * There might have been an OOM kill while we were
+                * freezing tasks and the killed task might be still
+                * on the way out so we have to double check for race.
+                */
+               if (oom_kills_count() != oom_kills_saved &&
+                               !check_frozen_processes()) {
+                       __usermodehelper_set_disable_depth(UMH_ENABLED);
+                       printk("OOM in progress.");
+                       error = -EBUSY;
+                       goto done;
+               }
+               printk("done.");
        }
+done:
        printk("\n");
        BUG_ON(in_atomic());
 
@@ -178,6 +221,7 @@ void thaw_processes(void)
 
        printk("Restarting tasks ... ");
 
+       __usermodehelper_set_disable_depth(UMH_FREEZING);
        thaw_workqueues();
 
        read_lock(&tasklist_lock);
index 454568e6c8d280a59a9f2e3e0ed50c6af6f27f90..82450c20875c3736f2579f9b86f302adeb542190 100644 (file)
 #include <linux/ftrace.h>
 #include <linux/rtc.h>
 #include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
 
 #include "power.h"
 
-const char *const pm_states[PM_SUSPEND_MAX] = {
-       [PM_SUSPEND_FREEZE]     = "freeze",
-       [PM_SUSPEND_STANDBY]    = "standby",
-       [PM_SUSPEND_MEM]        = "mem",
+struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
+       [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE },
+       [PM_SUSPEND_STANDBY] = { .label = "standby", },
+       [PM_SUSPEND_MEM] = { .label = "mem", },
 };
 
 static const struct platform_suspend_ops *suspend_ops;
@@ -63,42 +64,34 @@ void freeze_wake(void)
 }
 EXPORT_SYMBOL_GPL(freeze_wake);
 
+static bool valid_state(suspend_state_t state)
+{
+       /*
+        * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
+        * support and need to be valid to the low level
+        * implementation, no valid callback implies that none are valid.
+        */
+       return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
+}
+
 /**
  * suspend_set_ops - Set the global suspend method table.
  * @ops: Suspend operations to use.
  */
 void suspend_set_ops(const struct platform_suspend_ops *ops)
 {
+       suspend_state_t i;
+
        lock_system_sleep();
+
        suspend_ops = ops;
+       for (i = PM_SUSPEND_STANDBY; i <= PM_SUSPEND_MEM; i++)
+               pm_states[i].state = valid_state(i) ? i : 0;
+
        unlock_system_sleep();
 }
 EXPORT_SYMBOL_GPL(suspend_set_ops);
 
-bool valid_state(suspend_state_t state)
-{
-       if (state == PM_SUSPEND_FREEZE) {
-#ifdef CONFIG_PM_DEBUG
-               if (pm_test_level != TEST_NONE &&
-                   pm_test_level != TEST_FREEZER &&
-                   pm_test_level != TEST_DEVICES &&
-                   pm_test_level != TEST_PLATFORM) {
-                       printk(KERN_WARNING "Unsupported pm_test mode for "
-                                       "freeze state, please choose "
-                                       "none/freezer/devices/platform.\n");
-                       return false;
-               }
-#endif
-                       return true;
-       }
-       /*
-        * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
-        * support and need to be valid to the lowlevel
-        * implementation, no valid callback implies that none are valid.
-        */
-       return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
-}
-
 /**
  * suspend_valid_only_mem - Generic memory-only valid callback.
  *
@@ -147,7 +140,7 @@ static int suspend_prepare(suspend_state_t state)
        error = suspend_freeze_processes();
        if (!error)
                return 0;
-
+       log_suspend_abort_reason("One or more tasks refusing to freeze");
        suspend_stats.failed_freeze++;
        dpm_save_failed_step(SUSPEND_FREEZE);
  Finish:
@@ -177,7 +170,8 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
  */
 static int suspend_enter(suspend_state_t state, bool *wakeup)
 {
-       int error;
+       char suspend_abort[MAX_SUSPEND_ABORT_LEN];
+       int error, last_dev;
 
        if (need_suspend_ops(state) && suspend_ops->prepare) {
                error = suspend_ops->prepare();
@@ -187,7 +181,11 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
 
        error = dpm_suspend_end(PMSG_SUSPEND);
        if (error) {
+               last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+               last_dev %= REC_FAILED_NUM;
                printk(KERN_ERR "PM: Some devices failed to power down\n");
+               log_suspend_abort_reason("%s device failed to power down",
+                       suspend_stats.failed_devs[last_dev]);
                goto Platform_finish;
        }
 
@@ -212,8 +210,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
        }
 
        error = disable_nonboot_cpus();
-       if (error || suspend_test(TEST_CPUS))
+       if (error || suspend_test(TEST_CPUS)) {
+               log_suspend_abort_reason("Disabling non-boot cpus failed");
                goto Enable_cpus;
+       }
 
        arch_suspend_disable_irqs();
        BUG_ON(!irqs_disabled());
@@ -224,6 +224,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
                if (!(suspend_test(TEST_CORE) || *wakeup)) {
                        error = suspend_ops->enter(state);
                        events_check_enabled = false;
+               } else {
+                       pm_get_active_wakeup_sources(suspend_abort,
+                               MAX_SUSPEND_ABORT_LEN);
+                       log_suspend_abort_reason(suspend_abort);
                }
                syscore_resume();
        }
@@ -271,6 +275,7 @@ int suspend_devices_and_enter(suspend_state_t state)
        error = dpm_suspend_start(PMSG_SUSPEND);
        if (error) {
                printk(KERN_ERR "PM: Some devices failed to suspend\n");
+               log_suspend_abort_reason("Some devices failed to suspend");
                goto Recover_platform;
        }
        suspend_test_finish("suspend devices");
@@ -325,9 +330,17 @@ static int enter_state(suspend_state_t state)
 {
        int error;
 
-       if (!valid_state(state))
-               return -ENODEV;
-
+       if (state == PM_SUSPEND_FREEZE) {
+#ifdef CONFIG_PM_DEBUG
+               if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
+                       pr_warning("PM: Unsupported test mode for freeze state,"
+                                  "please choose none/freezer/devices/platform.\n");
+                       return -EAGAIN;
+               }
+#endif
+       } else if (!valid_state(state)) {
+               return -EINVAL;
+       }
        if (!mutex_trylock(&pm_mutex))
                return -EBUSY;
 
@@ -338,7 +351,7 @@ static int enter_state(suspend_state_t state)
        sys_sync();
        printk("done.\n");
 
-       pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
+       pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label);
        error = suspend_prepare(state);
        if (error)
                goto Unlock;
@@ -346,7 +359,7 @@ static int enter_state(suspend_state_t state)
        if (suspend_test(TEST_FREEZER))
                goto Finish;
 
-       pr_debug("PM: Entering %s sleep\n", pm_states[state]);
+       pr_debug("PM: Entering %s sleep\n", pm_states[state].label);
        pm_restrict_gfp_mask();
        error = suspend_devices_and_enter(state);
        pm_restore_gfp_mask();
index 9b2a1d58558da81bfa31137a4b075214761f7a81..269b097e78eaa03d590b7c930badeca3015ac75f 100644 (file)
@@ -92,13 +92,13 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
        }
 
        if (state == PM_SUSPEND_MEM) {
-               printk(info_test, pm_states[state]);
+               printk(info_test, pm_states[state].label);
                status = pm_suspend(state);
                if (status == -ENODEV)
                        state = PM_SUSPEND_STANDBY;
        }
        if (state == PM_SUSPEND_STANDBY) {
-               printk(info_test, pm_states[state]);
+               printk(info_test, pm_states[state].label);
                status = pm_suspend(state);
        }
        if (status < 0)
@@ -136,18 +136,16 @@ static char warn_bad_state[] __initdata =
 
 static int __init setup_test_suspend(char *value)
 {
-       unsigned i;
+       suspend_state_t i;
 
        /* "=mem" ==> "mem" */
        value++;
-       for (i = 0; i < PM_SUSPEND_MAX; i++) {
-               if (!pm_states[i])
-                       continue;
-               if (strcmp(pm_states[i], value) != 0)
-                       continue;
-               test_state = (__force suspend_state_t) i;
-               return 0;
-       }
+       for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
+               if (!strcmp(pm_states[i].label, value)) {
+                       test_state = pm_states[i].state;
+                       return 0;
+               }
+
        printk(warn_bad_state, value);
        return 0;
 }
@@ -164,8 +162,8 @@ static int __init test_suspend(void)
        /* PM is initialized by now; is that state testable? */
        if (test_state == PM_SUSPEND_ON)
                goto done;
-       if (!valid_state(test_state)) {
-               printk(warn_bad_state, pm_states[test_state]);
+       if (!pm_states[test_state].state) {
+               printk(warn_bad_state, pm_states[test_state].label);
                goto done;
        }
 
index 187e4e9105fbb06d95b52446a6a7946a96de4da0..085c99edca06fac67365cfbb2d3a4e1e414d1a6b 100644 (file)
 #define MAX_WAKEUP_REASON_IRQS 32
 static int irq_list[MAX_WAKEUP_REASON_IRQS];
 static int irqcount;
+static bool suspend_abort;
+static char abort_reason[MAX_SUSPEND_ABORT_LEN];
 static struct kobject *wakeup_reason;
-static spinlock_t resume_reason_lock;
+static DEFINE_SPINLOCK(resume_reason_lock);
 
 static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
                char *buf)
@@ -40,14 +42,18 @@ static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribu
        int irq_no, buf_offset = 0;
        struct irq_desc *desc;
        spin_lock(&resume_reason_lock);
-       for (irq_no = 0; irq_no < irqcount; irq_no++) {
-               desc = irq_to_desc(irq_list[irq_no]);
-               if (desc && desc->action && desc->action->name)
-                       buf_offset += sprintf(buf + buf_offset, "%d %s\n",
-                                       irq_list[irq_no], desc->action->name);
-               else
-                       buf_offset += sprintf(buf + buf_offset, "%d\n",
-                                       irq_list[irq_no]);
+       if (suspend_abort) {
+               buf_offset = sprintf(buf, "Abort: %s", abort_reason);
+       } else {
+               for (irq_no = 0; irq_no < irqcount; irq_no++) {
+                       desc = irq_to_desc(irq_list[irq_no]);
+                       if (desc && desc->action && desc->action->name)
+                               buf_offset += sprintf(buf + buf_offset, "%d %s\n",
+                                               irq_list[irq_no], desc->action->name);
+                       else
+                               buf_offset += sprintf(buf + buf_offset, "%d\n",
+                                               irq_list[irq_no]);
+               }
        }
        spin_unlock(&resume_reason_lock);
        return buf_offset;
@@ -89,6 +95,40 @@ void log_wakeup_reason(int irq)
        spin_unlock(&resume_reason_lock);
 }
 
+int check_wakeup_reason(int irq)
+{
+       int irq_no;
+       int ret = false;
+
+       spin_lock(&resume_reason_lock);
+       for (irq_no = 0; irq_no < irqcount; irq_no++)
+               if (irq_list[irq_no] == irq) {
+                       ret = true;
+                       break;
+       }
+       spin_unlock(&resume_reason_lock);
+       return ret;
+}
+
+void log_suspend_abort_reason(const char *fmt, ...)
+{
+       va_list args;
+
+       spin_lock(&resume_reason_lock);
+
+       //Suspend abort reason has already been logged.
+       if (suspend_abort) {
+               spin_unlock(&resume_reason_lock);
+               return;
+       }
+
+       suspend_abort = true;
+       va_start(args, fmt);
+       snprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
+       va_end(args);
+       spin_unlock(&resume_reason_lock);
+}
+
 /* Detects a suspend and clears all the previous wake up reasons*/
 static int wakeup_reason_pm_event(struct notifier_block *notifier,
                unsigned long pm_event, void *unused)
@@ -97,6 +137,7 @@ static int wakeup_reason_pm_event(struct notifier_block *notifier,
        case PM_SUSPEND_PREPARE:
                spin_lock(&resume_reason_lock);
                irqcount = 0;
+               suspend_abort = false;
                spin_unlock(&resume_reason_lock);
                break;
        default:
@@ -115,7 +156,7 @@ static struct notifier_block wakeup_reason_pm_notifier_block = {
 int __init wakeup_reason_init(void)
 {
        int retval;
-       spin_lock_init(&resume_reason_lock);
+
        retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
        if (retval)
                printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
index cbf71b406e33627b7e5c9e5941caf0b97f7bef1d..832b6a663e8a421121325e87376c086d8a7538ef 100644 (file)
@@ -2509,7 +2509,7 @@ void wake_up_klogd(void)
        preempt_enable();
 }
 
-int printk_sched(const char *fmt, ...)
+int printk_deferred(const char *fmt, ...)
 {
        unsigned long flags;
        va_list args;
index 4c05e5faa704a1a2df0dda707f656dd0d72c4e39..ea4e780697b497c9afdadf522bc313b9c89a95e6 100644 (file)
@@ -1235,7 +1235,7 @@ out:
                 * leave kernel.
                 */
                if (p->mm && printk_ratelimit()) {
-                       printk_sched("process %d (%s) no longer affine to cpu%d\n",
+                       printk_deferred("process %d (%s) no longer affine to cpu%d\n",
                                        task_pid_nr(p), p->comm, cpu);
                }
        }
@@ -1635,9 +1635,9 @@ static void __sched_fork(struct task_struct *p)
 #ifdef CONFIG_SCHED_HMP
        /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
 #define LOAD_AVG_MAX 47742
-       if (p->mm) {
-               p->se.avg.hmp_last_up_migration = 0;
-               p->se.avg.hmp_last_down_migration = 0;
+       p->se.avg.hmp_last_up_migration = 0;
+       p->se.avg.hmp_last_down_migration = 0;
+       if (hmp_task_should_forkboost(p)) {
                p->se.avg.load_avg_ratio = 1023;
                p->se.avg.load_avg_contrib =
                                (1023 * scale_load_down(p->se.load.weight));
@@ -7761,23 +7761,6 @@ static void cpu_cgroup_css_offline(struct cgroup *cgrp)
        sched_offline_group(tg);
 }
 
-static int
-cpu_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
-{
-       const struct cred *cred = current_cred(), *tcred;
-       struct task_struct *task;
-
-       cgroup_taskset_for_each(task, cgrp, tset) {
-               tcred = __task_cred(task);
-
-               if ((current != task) && !capable(CAP_SYS_NICE) &&
-                   cred->euid != tcred->uid && cred->euid != tcred->suid)
-                       return -EACCES;
-       }
-
-       return 0;
-}
-
 static int cpu_cgroup_can_attach(struct cgroup *cgrp,
                                 struct cgroup_taskset *tset)
 {
@@ -8144,7 +8127,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
        .css_offline    = cpu_cgroup_css_offline,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
-       .allow_attach   = cpu_cgroup_allow_attach,
+       .allow_attach   = subsys_cgroup_allow_attach,
        .exit           = cpu_cgroup_exit,
        .subsys_id      = cpu_cgroup_subsys_id,
        .base_cftypes   = cpu_files,
index c2665cd29594aa573bb85c9f39a4d0db6e658930..1e23284fd692603191cf26aab163de26681038af 100644 (file)
@@ -554,7 +554,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
                avg_atom = p->se.sum_exec_runtime;
                if (nr_switches)
-                       do_div(avg_atom, nr_switches);
+                       avg_atom = div64_ul(avg_atom, nr_switches);
                else
                        avg_atom = -1LL;
 
index 97ed132c809a9c203567a7e28833757beab65aef..41d0cbda605d1f46ff86908a482537f0d16ccec1 100644 (file)
@@ -4385,7 +4385,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
 
 #ifdef CONFIG_SCHED_HMP
        /* always put non-kernel forking tasks on a big domain */
-       if (p->mm && (sd_flag & SD_BALANCE_FORK)) {
+       if (unlikely(sd_flag & SD_BALANCE_FORK) && hmp_task_should_forkboost(p)) {
                new_cpu = hmp_select_faster_cpu(p, prev_cpu);
                if (new_cpu != NR_CPUS) {
                        hmp_next_up_delay(&p->se, new_cpu);
@@ -6537,16 +6537,16 @@ static int nohz_test_cpu(int cpu)
  * Decide if the tasks on the busy CPUs in the
  * littlest domain would benefit from an idle balance
  */
-static int hmp_packing_ilb_needed(int cpu)
+static int hmp_packing_ilb_needed(int cpu, int ilb_needed)
 {
        struct hmp_domain *hmp;
-       /* always allow ilb on non-slowest domain */
+       /* allow previous decision on non-slowest domain */
        if (!hmp_cpu_is_slowest(cpu))
-               return 1;
+               return ilb_needed;
 
        /* if disabled, use normal ILB behaviour */
        if (!hmp_packing_enabled)
-               return 1;
+               return ilb_needed;
 
        hmp = hmp_cpu_domain(cpu);
        for_each_cpu_and(cpu, &hmp->cpus, nohz.idle_cpus_mask) {
@@ -6558,19 +6558,34 @@ static int hmp_packing_ilb_needed(int cpu)
 }
 #endif
 
+DEFINE_PER_CPU(cpumask_var_t, ilb_tmpmask);
+
 static inline int find_new_ilb(int call_cpu)
 {
        int ilb = cpumask_first(nohz.idle_cpus_mask);
 #ifdef CONFIG_SCHED_HMP
-       int ilb_needed = 1;
+       int ilb_needed = 0;
+       int cpu;
+       struct cpumask* tmp = per_cpu(ilb_tmpmask, smp_processor_id());
 
        /* restrict nohz balancing to occur in the same hmp domain */
        ilb = cpumask_first_and(nohz.idle_cpus_mask,
                        &((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus);
 
+       /* check to see if it's necessary within this domain */
+       cpumask_andnot(tmp,
+                       &((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus,
+                       nohz.idle_cpus_mask);
+       for_each_cpu(cpu, tmp) {
+               if (cpu_rq(cpu)->nr_running > 1) {
+                       ilb_needed = 1;
+                       break;
+               }
+       }
+
 #ifdef CONFIG_SCHED_HMP_LITTLE_PACKING
        if (ilb < nr_cpu_ids)
-               ilb_needed = hmp_packing_ilb_needed(ilb);
+               ilb_needed = hmp_packing_ilb_needed(ilb, ilb_needed);
 #endif
 
        if (ilb_needed && ilb < nr_cpu_ids && idle_cpu(ilb))
index 15334e6de832d09e5da0d98b133a8d721164b06d..2dffc7b5d469fe69c73a278b073b9034c4f77c9f 100644 (file)
@@ -892,7 +892,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 
                        if (!once) {
                                once = true;
-                               printk_sched("sched: RT throttling activated\n");
+                               printk_deferred("sched: RT throttling activated\n");
                        }
                } else {
                        /*
index b7a10048a32c11fb473515d9075ef2b0782a563a..1fbb1a2bc459329c23bdac6fafe82eb67229c470 100644 (file)
 #include <linux/compat.h>
 #include <linux/sched.h>
 #include <linux/seccomp.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
 
 /* #define SECCOMP_DEBUG 1 */
 
 #ifdef CONFIG_SECCOMP_FILTER
 #include <asm/syscall.h>
 #include <linux/filter.h>
+#include <linux/pid.h>
 #include <linux/ptrace.h>
 #include <linux/security.h>
-#include <linux/slab.h>
 #include <linux/tracehook.h>
 #include <linux/uaccess.h>
 
@@ -95,7 +97,7 @@ u32 seccomp_bpf_load(int off)
        if (off == BPF_DATA(nr))
                return syscall_get_nr(current, regs);
        if (off == BPF_DATA(arch))
-               return syscall_get_arch(current, regs);
+               return syscall_get_arch();
        if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
                unsigned long value;
                int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
@@ -201,32 +203,170 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
  */
 static u32 seccomp_run_filters(int syscall)
 {
-       struct seccomp_filter *f;
+       struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter);
        u32 ret = SECCOMP_RET_ALLOW;
 
        /* Ensure unexpected behavior doesn't result in failing open. */
-       if (WARN_ON(current->seccomp.filter == NULL))
+       if (unlikely(WARN_ON(f == NULL)))
                return SECCOMP_RET_KILL;
 
+       /* Make sure cross-thread synced filter points somewhere sane. */
+       smp_read_barrier_depends();
+
        /*
         * All filters in the list are evaluated and the lowest BPF return
         * value always takes priority (ignoring the DATA).
         */
-       for (f = current->seccomp.filter; f; f = f->prev) {
+       for (; f; f = f->prev) {
                u32 cur_ret = sk_run_filter(NULL, f->insns);
+               
                if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
                        ret = cur_ret;
        }
        return ret;
 }
+#endif /* CONFIG_SECCOMP_FILTER */
+
+static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
+{
+       assert_spin_locked(&current->sighand->siglock);
+
+       if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
+               return false;
+
+       return true;
+}
+
+static inline void seccomp_assign_mode(struct task_struct *task,
+                                      unsigned long seccomp_mode)
+{
+       assert_spin_locked(&task->sighand->siglock);
+
+       task->seccomp.mode = seccomp_mode;
+       /*
+        * Make sure TIF_SECCOMP cannot be set before the mode (and
+        * filter) is set.
+        */
+       smp_mb();
+       set_tsk_thread_flag(task, TIF_SECCOMP);
+}
+
+#ifdef CONFIG_SECCOMP_FILTER
+/* Returns 1 if the parent is an ancestor of the child. */
+static int is_ancestor(struct seccomp_filter *parent,
+                      struct seccomp_filter *child)
+{
+       /* NULL is the root ancestor. */
+       if (parent == NULL)
+               return 1;
+       for (; child; child = child->prev)
+               if (child == parent)
+                       return 1;
+       return 0;
+}
 
 /**
- * seccomp_attach_filter: Attaches a seccomp filter to current.
+ * seccomp_can_sync_threads: checks if all threads can be synchronized
+ *
+ * Expects sighand and cred_guard_mutex locks to be held.
+ *
+ * Returns 0 on success, -ve on error, or the pid of a thread which was
+ * either not in the correct seccomp mode or it did not have an ancestral
+ * seccomp filter.
+ */
+static inline pid_t seccomp_can_sync_threads(void)
+{
+       struct task_struct *thread, *caller;
+
+       BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
+       assert_spin_locked(&current->sighand->siglock);
+
+       /* Validate all threads being eligible for synchronization. */
+       caller = current;
+       for_each_thread(caller, thread) {
+               pid_t failed;
+
+               /* Skip current, since it is initiating the sync. */
+               if (thread == caller)
+                       continue;
+
+               if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
+                   (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
+                    is_ancestor(thread->seccomp.filter,
+                                caller->seccomp.filter)))
+                       continue;
+
+               /* Return the first thread that cannot be synchronized. */
+               failed = task_pid_vnr(thread);
+               /* If the pid cannot be resolved, then return -ESRCH */
+               if (unlikely(WARN_ON(failed == 0)))
+                       failed = -ESRCH;
+               return failed;
+       }
+
+       return 0;
+}
+
+/**
+ * seccomp_sync_threads: sets all threads to use current's filter
+ *
+ * Expects sighand and cred_guard_mutex locks to be held, and for
+ * seccomp_can_sync_threads() to have returned success already
+ * without dropping the locks.
+ *
+ */
+static inline void seccomp_sync_threads(void)
+{
+       struct task_struct *thread, *caller;
+
+       BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
+       assert_spin_locked(&current->sighand->siglock);
+
+       /* Synchronize all threads. */
+       caller = current;
+       for_each_thread(caller, thread) {
+               /* Skip current, since it needs no changes. */
+               if (thread == caller)
+                       continue;
+
+               /* Get a task reference for the new leaf node. */
+               get_seccomp_filter(caller);
+               /*
+                * Drop the task reference to the shared ancestor since
+                * current's path will hold a reference.  (This also
+                * allows a put before the assignment.)
+                */
+               put_seccomp_filter(thread);
+               smp_store_release(&thread->seccomp.filter,
+                                 caller->seccomp.filter);
+               /*
+                * Opt the other thread into seccomp if needed.
+                * As threads are considered to be trust-realm
+                * equivalent (see ptrace_may_access), it is safe to
+                * allow one thread to transition the other.
+                */
+               if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
+                       /*
+                        * Don't let an unprivileged task work around
+                        * the no_new_privs restriction by creating
+                        * a thread that sets it up, enters seccomp,
+                        * then dies.
+                        */
+                       if (task_no_new_privs(caller))
+                               task_set_no_new_privs(thread);
+
+                       seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+               }
+       }
+}
+
+/**
+ * seccomp_prepare_filter: Prepares a seccomp filter for use.
  * @fprog: BPF program to install
  *
- * Returns 0 on success or an errno on failure.
+ * Returns filter on success or an ERR_PTR on failure.
  */
-static long seccomp_attach_filter(struct sock_fprog *fprog)
+static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
 {
        struct seccomp_filter *filter;
        unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
@@ -234,12 +374,13 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
        long ret;
 
        if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
+       BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
 
        for (filter = current->seccomp.filter; filter; filter = filter->prev)
                total_insns += filter->len + 4;  /* include a 4 instr penalty */
        if (total_insns > MAX_INSNS_PER_PATH)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        /*
         * Installing a seccomp filter requires that the task have
@@ -247,16 +388,16 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
         * This avoids scenarios where unprivileged tasks can affect the
         * behavior of privileged children.
         */
-       if (!current->no_new_privs &&
+       if (!task_no_new_privs(current) &&
            security_capable_noaudit(current_cred(), current_user_ns(),
                                     CAP_SYS_ADMIN) != 0)
-               return -EACCES;
+               return ERR_PTR(-EACCES);
 
        /* Allocate a new seccomp_filter */
        filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
                         GFP_KERNEL|__GFP_NOWARN);
        if (!filter)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);;
        atomic_set(&filter->usage, 1);
        filter->len = fprog->len;
 
@@ -275,28 +416,24 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
        if (ret)
                goto fail;
 
-       /*
-        * If there is an existing filter, make it the prev and don't drop its
-        * task reference.
-        */
-       filter->prev = current->seccomp.filter;
-       current->seccomp.filter = filter;
-       return 0;
+       return filter;
+
 fail:
        kfree(filter);
-       return ret;
+       return ERR_PTR(ret);
 }
 
 /**
- * seccomp_attach_user_filter - attaches a user-supplied sock_fprog
+ * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
  * @user_filter: pointer to the user data containing a sock_fprog.
  *
  * Returns 0 on success and non-zero otherwise.
  */
-long seccomp_attach_user_filter(char __user *user_filter)
+static struct seccomp_filter *
+seccomp_prepare_user_filter(const char __user *user_filter)
 {
        struct sock_fprog fprog;
-       long ret = -EFAULT;
+       struct seccomp_filter *filter = ERR_PTR(-EFAULT);
 
 #ifdef CONFIG_COMPAT
        if (is_compat_task()) {
@@ -309,9 +446,56 @@ long seccomp_attach_user_filter(char __user *user_filter)
 #endif
        if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
                goto out;
-       ret = seccomp_attach_filter(&fprog);
+       filter = seccomp_prepare_filter(&fprog);
 out:
-       return ret;
+       return filter;
+}
+
+/**
+ * seccomp_attach_filter: validate and attach filter
+ * @flags:  flags to change filter behavior
+ * @filter: seccomp filter to add to the current process
+ *
+ * Caller must be holding current->sighand->siglock lock.
+ *
+ * Returns 0 on success, -ve on error.
+ */
+static long seccomp_attach_filter(unsigned int flags,
+                                 struct seccomp_filter *filter)
+{
+       unsigned long total_insns;
+       struct seccomp_filter *walker;
+
+       assert_spin_locked(&current->sighand->siglock);
+
+       /* Validate resulting filter length. */
+       total_insns = filter->len;
+       for (walker = current->seccomp.filter; walker; walker = walker->prev)
+               total_insns += walker->len + 4;  /* 4 instr penalty */
+       if (total_insns > MAX_INSNS_PER_PATH)
+               return -ENOMEM;
+
+       /* If thread sync has been requested, check that it is possible. */
+       if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
+               int ret;
+
+               ret = seccomp_can_sync_threads();
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * If there is an existing filter, make it the prev and don't drop its
+        * task reference.
+        */
+       filter->prev = current->seccomp.filter;
+       current->seccomp.filter = filter;
+
+       /* Now that the new filter is in place, synchronize to all threads. */
+       if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+               seccomp_sync_threads();
+
+       return 0;
 }
 
 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
@@ -324,6 +508,13 @@ void get_seccomp_filter(struct task_struct *tsk)
        atomic_inc(&orig->usage);
 }
 
+static inline void seccomp_filter_free(struct seccomp_filter *filter)
+{
+       if (filter) {
+               kfree(filter);
+       }
+}
+
 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
 void put_seccomp_filter(struct task_struct *tsk)
 {
@@ -332,7 +523,7 @@ void put_seccomp_filter(struct task_struct *tsk)
        while (orig && atomic_dec_and_test(&orig->usage)) {
                struct seccomp_filter *freeme = orig;
                orig = orig->prev;
-               kfree(freeme);
+               seccomp_filter_free(freeme);
        }
 }
 
@@ -351,7 +542,7 @@ static void seccomp_send_sigsys(int syscall, int reason)
        info.si_code = SYS_SECCOMP;
        info.si_call_addr = (void __user *)KSTK_EIP(current);
        info.si_errno = reason;
-       info.si_arch = syscall_get_arch(current, task_pt_regs(current));
+       info.si_arch = syscall_get_arch();
        info.si_syscall = syscall;
        force_sig_info(SIGSYS, &info, current);
 }
@@ -376,12 +567,17 @@ static int mode1_syscalls_32[] = {
 
 int __secure_computing(int this_syscall)
 {
-       int mode = current->seccomp.mode;
        int exit_sig = 0;
        int *syscall;
        u32 ret;
 
-       switch (mode) {
+       /*
+        * Make sure that any changes to mode from another thread have
+        * been seen after TIF_SECCOMP was seen.
+        */
+       rmb();
+
+       switch (current->seccomp.mode) {
        case SECCOMP_MODE_STRICT:
                syscall = mode1_syscalls;
 #ifdef CONFIG_COMPAT
@@ -467,47 +663,152 @@ long prctl_get_seccomp(void)
 }
 
 /**
- * prctl_set_seccomp: configures current->seccomp.mode
- * @seccomp_mode: requested mode to use
- * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
+ * seccomp_set_mode_strict: internal function for setting strict seccomp
+ *
+ * Once current->seccomp.mode is non-zero, it may not be changed.
+ *
+ * Returns 0 on success or -EINVAL on failure.
+ */
+static long seccomp_set_mode_strict(void)
+{
+       const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
+       long ret = -EINVAL;
+
+       spin_lock_irq(&current->sighand->siglock);
+
+       if (!seccomp_may_assign_mode(seccomp_mode))
+               goto out;
+
+#ifdef TIF_NOTSC
+       disable_TSC();
+#endif
+       seccomp_assign_mode(current, seccomp_mode);
+       ret = 0;
+
+out:
+       spin_unlock_irq(&current->sighand->siglock);
+
+       return ret;
+}
+
+#ifdef CONFIG_SECCOMP_FILTER
+/**
+ * seccomp_set_mode_filter: internal function for setting seccomp filter
+ * @flags:  flags to change filter behavior
+ * @filter: struct sock_fprog containing filter
  *
- * This function may be called repeatedly with a @seccomp_mode of
- * SECCOMP_MODE_FILTER to install additional filters.  Every filter
- * successfully installed will be evaluated (in reverse order) for each system
- * call the task makes.
+ * This function may be called repeatedly to install additional filters.
+ * Every filter successfully installed will be evaluated (in reverse order)
+ * for each system call the task makes.
  *
  * Once current->seccomp.mode is non-zero, it may not be changed.
  *
  * Returns 0 on success or -EINVAL on failure.
  */
-long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
+static long seccomp_set_mode_filter(unsigned int flags,
+                                   const char __user *filter)
 {
+       const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
+       struct seccomp_filter *prepared = NULL;
        long ret = -EINVAL;
 
-       if (current->seccomp.mode &&
-           current->seccomp.mode != seccomp_mode)
+       /* Validate flags. */
+       if (flags & ~SECCOMP_FILTER_FLAG_MASK)
+               return -EINVAL;
+
+       /* Prepare the new filter before holding any locks. */
+       prepared = seccomp_prepare_user_filter(filter);
+       if (IS_ERR(prepared))
+               return PTR_ERR(prepared);
+
+       /*
+        * Make sure we cannot change seccomp or nnp state via TSYNC
+        * while another thread is in the middle of calling exec.
+        */
+       if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
+           mutex_lock_killable(&current->signal->cred_guard_mutex))
+               goto out_free;
+
+       spin_lock_irq(&current->sighand->siglock);
+
+       if (!seccomp_may_assign_mode(seccomp_mode))
+               goto out;
+
+       ret = seccomp_attach_filter(flags, prepared);
+       if (ret)
                goto out;
+       /* Do not free the successfully attached filter. */
+       prepared = NULL;
+
+       seccomp_assign_mode(current, seccomp_mode);
+out:
+       spin_unlock_irq(&current->sighand->siglock);
+       if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+               mutex_unlock(&current->signal->cred_guard_mutex);
+out_free:
+       seccomp_filter_free(prepared);
+       return ret;
+}
+#else
+static inline long seccomp_set_mode_filter(unsigned int flags,
+                                          const char __user *filter)
+{
+       return -EINVAL;
+}
+#endif
+
+/* Common entry point for both prctl and syscall. */
+static long do_seccomp(unsigned int op, unsigned int flags,
+                      const char __user *uargs)
+{
+       switch (op) {
+       case SECCOMP_SET_MODE_STRICT:
+               if (flags != 0 || uargs != NULL)
+                       return -EINVAL;
+               return seccomp_set_mode_strict();
+       case SECCOMP_SET_MODE_FILTER:
+               return seccomp_set_mode_filter(flags, uargs);
+       default:
+               return -EINVAL;
+       }
+}
+
+SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
+                        const char __user *, uargs)
+{
+       return do_seccomp(op, flags, uargs);
+}
+
+/**
+ * prctl_set_seccomp: configures current->seccomp.mode
+ * @seccomp_mode: requested mode to use
+ * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
+ *
+ * Returns 0 on success or -EINVAL on failure.
+ */
+long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
+{
+       unsigned int op;
+       char __user *uargs;
 
        switch (seccomp_mode) {
        case SECCOMP_MODE_STRICT:
-               ret = 0;
-#ifdef TIF_NOTSC
-               disable_TSC();
-#endif
+               op = SECCOMP_SET_MODE_STRICT;
+               /*
+                * Setting strict mode through prctl always ignored filter,
+                * so make sure it is always NULL here to pass the internal
+                * check in do_seccomp().
+                */
+               uargs = NULL;
                break;
-#ifdef CONFIG_SECCOMP_FILTER
        case SECCOMP_MODE_FILTER:
-               ret = seccomp_attach_user_filter(filter);
-               if (ret)
-                       goto out;
+               op = SECCOMP_SET_MODE_FILTER;
+               uargs = filter;
                break;
-#endif
        default:
-               goto out;
+               return -EINVAL;
        }
 
-       current->seccomp.mode = seccomp_mode;
-       set_thread_flag(TIF_SECCOMP);
-out:
-       return ret;
+       /* prctl interface doesn't have flags, so they are always zero. */
+       return do_seccomp(op, 0, uargs);
 }
index 23ccc67dcbb261b6a7dd127c919fe61692ecb97d..74ba5e2c037ca01f149023da7678ab1d964d972e 100644 (file)
@@ -666,7 +666,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
                        if (cond_func(cpu, info)) {
                                ret = smp_call_function_single(cpu, func,
                                                                info, wait);
-                               WARN_ON_ONCE(!ret);
+                               WARN_ON_ONCE(ret);
                        }
                preempt_enable();
        }
index 6af60700eede2beecca24e2b7894e304b66c727f..768ec2301d4f65fd1750744141c3c3d55fbe7c86 100644 (file)
@@ -2193,7 +2193,7 @@ static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
                        tmp = end;
 
                /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
-               error = prctl_update_vma_anon_name(vma, &prev, start, end,
+               error = prctl_update_vma_anon_name(vma, &prev, start, tmp,
                                (const char __user *)arg);
                if (error)
                        return error;
@@ -2433,12 +2433,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                if (arg2 != 1 || arg3 || arg4 || arg5)
                        return -EINVAL;
 
-               current->no_new_privs = 1;
+               task_set_no_new_privs(current);
                break;
        case PR_GET_NO_NEW_PRIVS:
                if (arg2 || arg3 || arg4 || arg5)
                        return -EINVAL;
-               return current->no_new_privs ? 1 : 0;
+               return task_no_new_privs(current) ? 1 : 0;
        case PR_SET_VMA:
                error = prctl_set_vma(arg2, arg3, arg4, arg5);
                break;
index 7078052284fd9eda7bae8a86805054328af7bb4b..7e7fc0a082c4517b5b604dac039f01e4c2da2a10 100644 (file)
@@ -209,3 +209,6 @@ cond_syscall(compat_sys_open_by_handle_at);
 
 /* compare kernel pointers */
 cond_syscall(sys_kcmp);
+
+/* operate on Secure Computing state */
+cond_syscall(sys_seccomp);
index d3617dbd3dca6b3844e0814e889026f01af0ddd4..d21398e6da87f19d62e3b20fd1d7ded9ffa9cabf 100644 (file)
@@ -496,17 +496,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
  * that a remainder subtract here would not do the right thing as the
  * resolution values don't fall on second boundries.  I.e. the line:
  * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ * Note that due to the small error in the multiplier here, this
+ * rounding is incorrect for sufficiently large values of tv_nsec, but
+ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
+ * OK.
  *
  * Rather, we just shift the bits off the right.
  *
  * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
  * value to a scaled second value.
  */
-unsigned long
-timespec_to_jiffies(const struct timespec *value)
+static unsigned long
+__timespec_to_jiffies(unsigned long sec, long nsec)
 {
-       unsigned long sec = value->tv_sec;
-       long nsec = value->tv_nsec + TICK_NSEC - 1;
+       nsec = nsec + TICK_NSEC - 1;
 
        if (sec >= MAX_SEC_IN_JIFFIES){
                sec = MAX_SEC_IN_JIFFIES;
@@ -517,6 +520,13 @@ timespec_to_jiffies(const struct timespec *value)
                 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
 
 }
+
+unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+       return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
+}
+
 EXPORT_SYMBOL(timespec_to_jiffies);
 
 void
@@ -533,31 +543,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
 }
 EXPORT_SYMBOL(jiffies_to_timespec);
 
-/* Same for "timeval"
+/*
+ * We could use a similar algorithm to timespec_to_jiffies (with a
+ * different multiplier for usec instead of nsec). But this has a
+ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
+ * usec value, since it's not necessarily integral.
  *
- * Well, almost.  The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part.  Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
+ * We could instead round in the intermediate scaled representation
+ * (i.e. in units of 1/2^(large scale) jiffies) but that's also
+ * perilous: the scaling introduces a small positive error, which
+ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
+ * units to the intermediate before shifting) leads to accidental
+ * overflow and overestimates.
+ *
+ * At the cost of one additional multiplication by a constant, just
+ * use the timespec implementation.
  */
 unsigned long
 timeval_to_jiffies(const struct timeval *value)
 {
-       unsigned long sec = value->tv_sec;
-       long usec = value->tv_usec;
-
-       if (sec >= MAX_SEC_IN_JIFFIES){
-               sec = MAX_SEC_IN_JIFFIES;
-               usec = 0;
-       }
-       return (((u64)sec * SEC_CONVERSION) +
-               (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
-                (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+       return __timespec_to_jiffies(value->tv_sec,
+                                    value->tv_usec * NSEC_PER_USEC);
 }
 EXPORT_SYMBOL(timeval_to_jiffies);
 
index d41fcb46a40311f09e68a73f97491e4ba2b462b2..bb08d0961f80fface6213c131c817cc459359fba 100644 (file)
@@ -456,18 +456,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
 static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
                                                        ktime_t now)
 {
+       unsigned long flags;
        struct k_itimer *ptr = container_of(alarm, struct k_itimer,
                                                it.alarm.alarmtimer);
-       if (posix_timer_event(ptr, 0) != 0)
-               ptr->it_overrun++;
+       enum alarmtimer_restart result = ALARMTIMER_NORESTART;
+
+       spin_lock_irqsave(&ptr->it_lock, flags);
+       if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
+               if (posix_timer_event(ptr, 0) != 0)
+                       ptr->it_overrun++;
+       }
 
        /* Re-add periodic timers */
        if (ptr->it.alarm.interval.tv64) {
                ptr->it_overrun += alarm_forward(alarm, now,
                                                ptr->it.alarm.interval);
-               return ALARMTIMER_RESTART;
+               result = ALARMTIMER_RESTART;
        }
-       return ALARMTIMER_NORESTART;
+       spin_unlock_irqrestore(&ptr->it_lock, flags);
+
+       return result;
 }
 
 /**
@@ -577,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
                                struct itimerspec *new_setting,
                                struct itimerspec *old_setting)
 {
+       ktime_t exp;
+
        if (!rtcdev)
                return -ENOTSUPP;
 
+       if (flags & ~TIMER_ABSTIME)
+               return -EINVAL;
+
        if (old_setting)
                alarm_timer_get(timr, old_setting);
 
@@ -589,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
 
        /* start the timer */
        timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
-       alarm_start(&timr->it.alarm.alarmtimer,
-                       timespec_to_ktime(new_setting->it_value));
+       exp = timespec_to_ktime(new_setting->it_value);
+       /* Convert (if necessary) to absolute time */
+       if (flags != TIMER_ABSTIME) {
+               ktime_t now;
+
+               now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
+               exp = ktime_add(now, exp);
+       }
+
+       alarm_start(&timr->it.alarm.alarmtimer, exp);
        return 0;
 }
 
@@ -722,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        if (!alarmtimer_get_rtcdev())
                return -ENOTSUPP;
 
+       if (flags & ~TIMER_ABSTIME)
+               return -EINVAL;
+
        if (!capable(CAP_WAKE_ALARM))
                return -EPERM;
 
index 9df0e3b19f099d920c0f8c0b0adc611c024a8c76..58e8430165b52d81930c0ab4e32f527783da193b 100644 (file)
@@ -138,7 +138,8 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
 {
        /* Nothing to do if we already reached the limit */
        if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
-               printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
+               printk_deferred(KERN_WARNING
+                               "CE: Reprogramming failure. Giving up\n");
                dev->next_event.tv64 = KTIME_MAX;
                return -ETIME;
        }
@@ -151,9 +152,10 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
        if (dev->min_delta_ns > MIN_DELTA_LIMIT)
                dev->min_delta_ns = MIN_DELTA_LIMIT;
 
-       printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
-              dev->name ? dev->name : "?",
-              (unsigned long long) dev->min_delta_ns);
+       printk_deferred(KERN_WARNING
+                       "CE: %s increased min_delta_ns to %llu nsec\n",
+                       dev->name ? dev->name : "?",
+                       (unsigned long long) dev->min_delta_ns);
        return 0;
 }
 
index 797d3b91a30bf7aaa9bbb9ed4dbd98db5e8374a0..401d9bd1fe42cbf484d933183436d61b4ed00723 100644 (file)
@@ -331,12 +331,12 @@ static void update_ftrace_function(void)
                func = ftrace_ops_list_func;
        }
 
+       update_function_graph_func();
+
        /* If there's no change, then do nothing more here */
        if (ftrace_trace_function == func)
                return;
 
-       update_function_graph_func();
-
        /*
         * If we are using the list function, it doesn't care
         * about the function_trace_ops.
index 8e94c1102636670dabf637e5aff3e2e65fa661bc..3d9fee3a80b39fb4867fcb861298094442fff518 100644 (file)
@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
        struct ring_buffer_per_cpu *cpu_buffer;
        struct rb_irq_work *work;
 
-       if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
-           (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
-               return POLLIN | POLLRDNORM;
-
        if (cpu == RING_BUFFER_ALL_CPUS)
                work = &buffer->irq_work;
        else {
@@ -630,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
                work = &cpu_buffer->irq_work;
        }
 
-       work->waiters_pending = true;
        poll_wait(filp, &work->waiters, poll_table);
+       work->waiters_pending = true;
+       /*
+        * There's a tight race between setting the waiters_pending and
+        * checking if the ring buffer is empty.  Once the waiters_pending bit
+        * is set, the next event will wake the task up, but we can get stuck
+        * if there's only a single event in.
+        *
+        * FIXME: Ideally, we need a memory barrier on the writer side as well,
+        * but adding a memory barrier to all events will cause too much of a
+        * performance hit in the fast path.  We only need a memory barrier when
+        * the buffer goes from empty to having content.  But as this race is
+        * extremely small, and it's not a problem if another event comes in, we
+        * will fix it later.
+        */
+       smp_mb();
 
        if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
            (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
@@ -1984,7 +1994,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
 
 /**
  * rb_update_event - update event type and data
- * @event: the even to update
+ * @event: the event to update
  * @type: the type of event
  * @length: the size of the event field in the ring buffer
  *
@@ -3357,21 +3367,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
 
        /* Iterator usage is expected to have record disabled */
-       if (list_empty(&cpu_buffer->reader_page->list)) {
-               iter->head_page = rb_set_head_page(cpu_buffer);
-               if (unlikely(!iter->head_page))
-                       return;
-               iter->head = iter->head_page->read;
-       } else {
-               iter->head_page = cpu_buffer->reader_page;
-               iter->head = cpu_buffer->reader_page->read;
-       }
+       iter->head_page = cpu_buffer->reader_page;
+       iter->head = cpu_buffer->reader_page->read;
+
+       iter->cache_reader_page = iter->head_page;
+       iter->cache_read = cpu_buffer->read;
+
        if (iter->head)
                iter->read_stamp = cpu_buffer->read_stamp;
        else
                iter->read_stamp = iter->head_page->page->time_stamp;
-       iter->cache_reader_page = cpu_buffer->reader_page;
-       iter->cache_read = cpu_buffer->read;
 }
 
 /**
@@ -3764,12 +3769,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
                return NULL;
 
        /*
-        * We repeat when a time extend is encountered.
-        * Since the time extend is always attached to a data event,
-        * we should never loop more than once.
-        * (We never hit the following condition more than twice).
+        * We repeat when a time extend is encountered or we hit
+        * the end of the page. Since the time extend is always attached
+        * to a data event, we should never loop more than three times.
+        * Once for going to next page, once on time extend, and
+        * finally once to get the event.
+        * (We never hit the following condition more than thrice).
         */
-       if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
+       if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
                return NULL;
 
        if (rb_per_cpu_empty(cpu_buffer))
index bf12fbe49dfcd8486f3be6786dac14f3130661e8..3bf9864c313e17c370023547a7cccda69fedcbf2 100644 (file)
@@ -423,6 +423,9 @@ int __trace_puts(unsigned long ip, const char *str, int size)
        struct print_entry *entry;
        unsigned long irq_flags;
        int alloc;
+       int pc;
+
+       pc = preempt_count();
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
@@ -432,7 +435,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
-                                         irq_flags, preempt_count());
+                                         irq_flags, pc);
        if (!event)
                return 0;
 
@@ -449,6 +452,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
                entry->buf[size] = '\0';
 
        __buffer_unlock_commit(buffer, event);
+       ftrace_trace_stack(buffer, irq_flags, 4, pc);
 
        return size;
 }
@@ -466,6 +470,9 @@ int __trace_bputs(unsigned long ip, const char *str)
        struct bputs_entry *entry;
        unsigned long irq_flags;
        int size = sizeof(struct bputs_entry);
+       int pc;
+
+       pc = preempt_count();
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
@@ -473,7 +480,7 @@ int __trace_bputs(unsigned long ip, const char *str)
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
-                                         irq_flags, preempt_count());
+                                         irq_flags, pc);
        if (!event)
                return 0;
 
@@ -482,6 +489,7 @@ int __trace_bputs(unsigned long ip, const char *str)
        entry->str                      = str;
 
        __buffer_unlock_commit(buffer, event);
+       ftrace_trace_stack(buffer, irq_flags, 4, pc);
 
        return 1;
 }
@@ -734,7 +742,7 @@ static struct {
        { trace_clock_local,    "local",        1 },
        { trace_clock_global,   "global",       1 },
        { trace_clock_counter,  "counter",      0 },
-       { trace_clock_jiffies,  "uptime",       1 },
+       { trace_clock_jiffies,  "uptime",       0 },
        { trace_clock,          "perf",         1 },
        ARCH_TRACE_CLOCKS
 };
index 26dc348332b798eeb43a77cf2d89357512d9e8c0..57b67b1f24d1a141f88163c385e62be25cd275cf 100644 (file)
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
 
 /*
  * trace_jiffy_clock(): Simply use jiffies as a clock counter.
+ * Note that this use of jiffies_64 is not completely safe on
+ * 32-bit systems. But the window is tiny, and the effect if
+ * we are affected is that we will have an obviously bogus
+ * timestamp on a trace event - i.e. not life threatening.
  */
 u64 notrace trace_clock_jiffies(void)
 {
-       u64 jiffy = jiffies - INITIAL_JIFFIES;
-
-       /* Return nsecs */
-       return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
+       return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
 }
 
 /*
index 322e16461072575e6d8439208891b5730bd41438..bdb9ee0af99192e74bdd3507b2a2cada010e57de 100644 (file)
@@ -312,7 +312,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        int size;
 
        syscall_nr = trace_get_syscall_nr(current, regs);
-       if (syscall_nr < 0)
+       if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
        if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
                return;
@@ -354,7 +354,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
        int syscall_nr;
 
        syscall_nr = trace_get_syscall_nr(current, regs);
-       if (syscall_nr < 0)
+       if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
        if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
                return;
@@ -557,7 +557,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
        int size;
 
        syscall_nr = trace_get_syscall_nr(current, regs);
-       if (syscall_nr < 0)
+       if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
        if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
                return;
@@ -633,7 +633,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
        int size;
 
        syscall_nr = trace_get_syscall_nr(current, regs);
-       if (syscall_nr < 0)
+       if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
        if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
                return;
index 06f7e4fe8d2de4046a3139106058b9c831c9b789..e5c4ebe586babfa20ff91f646df8a4009bc98076 100644 (file)
@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
                lower = src[off + k];
                if (left && off + k == lim - 1)
                        lower &= mask;
-               dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
+               dst[k] = lower >> rem;
+               if (rem)
+                       dst[k] |= upper << (BITS_PER_LONG - rem);
                if (left && k == lim - 1)
                        dst[k] &= mask;
        }
@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
                upper = src[k];
                if (left && k == lim - 1)
                        upper &= (1UL << left) - 1;
-               dst[k + off] = lower  >> (BITS_PER_LONG - rem) | upper << rem;
+               dst[k + off] = upper << rem;
+               if (rem)
+                       dst[k + off] |= lower >> (BITS_PER_LONG - rem);
                if (left && k + off == lim - 1)
                        dst[k + off] &= (1UL << left) - 1;
        }
index f9a484676cb6a8e4782333ce164bf0662b583b78..4264871ea1a00194750116a82c48e18d18edbd44 100644 (file)
@@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init);
 
 void btree_destroy(struct btree_head *head)
 {
+       mempool_free(head->node, head->mempool);
        mempool_destroy(head->mempool);
        head->mempool = NULL;
 }
index 8563081e8da38fb81e0335d2589c9fcebcd81266..a1c387f6afba24c3a027456b48b44c603c3c3b96 100644 (file)
 #include <linux/lzo.h>
 #include "lzodefs.h"
 
-#define HAVE_IP(t, x)                                  \
-       (((size_t)(ip_end - ip) >= (size_t)(t + x)) &&  \
-        (((t + x) >= t) && ((t + x) >= x)))
+#define HAVE_IP(x)      ((size_t)(ip_end - ip) >= (size_t)(x))
+#define HAVE_OP(x)      ((size_t)(op_end - op) >= (size_t)(x))
+#define NEED_IP(x)      if (!HAVE_IP(x)) goto input_overrun
+#define NEED_OP(x)      if (!HAVE_OP(x)) goto output_overrun
+#define TEST_LB(m_pos)  if ((m_pos) < out) goto lookbehind_overrun
 
-#define HAVE_OP(t, x)                                  \
-       (((size_t)(op_end - op) >= (size_t)(t + x)) &&  \
-        (((t + x) >= t) && ((t + x) >= x)))
-
-#define NEED_IP(t, x)                                  \
-       do {                                            \
-               if (!HAVE_IP(t, x))                     \
-                       goto input_overrun;             \
-       } while (0)
-
-#define NEED_OP(t, x)                                  \
-       do {                                            \
-               if (!HAVE_OP(t, x))                     \
-                       goto output_overrun;            \
-       } while (0)
-
-#define TEST_LB(m_pos)                                 \
-       do {                                            \
-               if ((m_pos) < out)                      \
-                       goto lookbehind_overrun;        \
-       } while (0)
+/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
+ * count without overflowing an integer. The multiply will overflow when
+ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
+ * depending on the base count. Since the base count is taken from a u8
+ * and a few bits, it is safe to assume that it will always be lower than
+ * or equal to 2*255, thus we can always prevent any overflow by accepting
+ * two less 255 steps. See Documentation/lzo.txt for more information.
+ */
+#define MAX_255_COUNT      ((((size_t)~0) / 255) - 2)
 
 int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                          unsigned char *out, size_t *out_len)
@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                if (t < 16) {
                        if (likely(state == 0)) {
                                if (unlikely(t == 0)) {
+                                       size_t offset;
+                                       const unsigned char *ip_last = ip;
+
                                        while (unlikely(*ip == 0)) {
-                                               t += 255;
                                                ip++;
-                                               NEED_IP(1, 0);
+                                               NEED_IP(1);
                                        }
-                                       t += 15 + *ip++;
+                                       offset = ip - ip_last;
+                                       if (unlikely(offset > MAX_255_COUNT))
+                                               return LZO_E_ERROR;
+
+                                       offset = (offset << 8) - offset;
+                                       t += offset + 15 + *ip++;
                                }
                                t += 3;
 copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-                               if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
+                               if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
                                        const unsigned char *ie = ip + t;
                                        unsigned char *oe = op + t;
                                        do {
@@ -101,8 +98,8 @@ copy_literal_run:
                                } else
 #endif
                                {
-                                       NEED_OP(t, 0);
-                                       NEED_IP(t, 3);
+                                       NEED_OP(t);
+                                       NEED_IP(t + 3);
                                        do {
                                                *op++ = *ip++;
                                        } while (--t > 0);
@@ -115,7 +112,7 @@ copy_literal_run:
                                m_pos -= t >> 2;
                                m_pos -= *ip++ << 2;
                                TEST_LB(m_pos);
-                               NEED_OP(2, 0);
+                               NEED_OP(2);
                                op[0] = m_pos[0];
                                op[1] = m_pos[1];
                                op += 2;
@@ -136,13 +133,20 @@ copy_literal_run:
                } else if (t >= 32) {
                        t = (t & 31) + (3 - 1);
                        if (unlikely(t == 2)) {
+                               size_t offset;
+                               const unsigned char *ip_last = ip;
+
                                while (unlikely(*ip == 0)) {
-                                       t += 255;
                                        ip++;
-                                       NEED_IP(1, 0);
+                                       NEED_IP(1);
                                }
-                               t += 31 + *ip++;
-                               NEED_IP(2, 0);
+                               offset = ip - ip_last;
+                               if (unlikely(offset > MAX_255_COUNT))
+                                       return LZO_E_ERROR;
+
+                               offset = (offset << 8) - offset;
+                               t += offset + 31 + *ip++;
+                               NEED_IP(2);
                        }
                        m_pos = op - 1;
                        next = get_unaligned_le16(ip);
@@ -154,13 +158,20 @@ copy_literal_run:
                        m_pos -= (t & 8) << 11;
                        t = (t & 7) + (3 - 1);
                        if (unlikely(t == 2)) {
+                               size_t offset;
+                               const unsigned char *ip_last = ip;
+
                                while (unlikely(*ip == 0)) {
-                                       t += 255;
                                        ip++;
-                                       NEED_IP(1, 0);
+                                       NEED_IP(1);
                                }
-                               t += 7 + *ip++;
-                               NEED_IP(2, 0);
+                               offset = ip - ip_last;
+                               if (unlikely(offset > MAX_255_COUNT))
+                                       return LZO_E_ERROR;
+
+                               offset = (offset << 8) - offset;
+                               t += offset + 7 + *ip++;
+                               NEED_IP(2);
                        }
                        next = get_unaligned_le16(ip);
                        ip += 2;
@@ -174,7 +185,7 @@ copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
                if (op - m_pos >= 8) {
                        unsigned char *oe = op + t;
-                       if (likely(HAVE_OP(t, 15))) {
+                       if (likely(HAVE_OP(t + 15))) {
                                do {
                                        COPY8(op, m_pos);
                                        op += 8;
@@ -184,7 +195,7 @@ copy_literal_run:
                                        m_pos += 8;
                                } while (op < oe);
                                op = oe;
-                               if (HAVE_IP(6, 0)) {
+                               if (HAVE_IP(6)) {
                                        state = next;
                                        COPY4(op, ip);
                                        op += next;
@@ -192,7 +203,7 @@ copy_literal_run:
                                        continue;
                                }
                        } else {
-                               NEED_OP(t, 0);
+                               NEED_OP(t);
                                do {
                                        *op++ = *m_pos++;
                                } while (op < oe);
@@ -201,7 +212,7 @@ copy_literal_run:
 #endif
                {
                        unsigned char *oe = op + t;
-                       NEED_OP(t, 0);
+                       NEED_OP(t);
                        op[0] = m_pos[0];
                        op[1] = m_pos[1];
                        op += 2;
@@ -214,15 +225,15 @@ match_next:
                state = next;
                t = next;
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-               if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
+               if (likely(HAVE_IP(6) && HAVE_OP(4))) {
                        COPY4(op, ip);
                        op += t;
                        ip += t;
                } else
 #endif
                {
-                       NEED_IP(t, 3);
-                       NEED_OP(t, 0);
+                       NEED_IP(t + 3);
+                       NEED_OP(t);
                        while (t > 0) {
                                *op++ = *ip++;
                                t--;
index e5878de4f1013ddbdd3db07d1fca2bcc3a692a7c..43d0781daf4703d94be9fd988bd6054f49cdeca4 100644 (file)
@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count)
 EXPORT_SYMBOL(memset);
 #endif
 
+/**
+ * memzero_explicit - Fill a region of memory (e.g. sensitive
+ *                   keying data) with 0s.
+ * @s: Pointer to the start of the area.
+ * @count: The size of the area.
+ *
+ * memzero_explicit() doesn't need an arch-specific version as
+ * it just invokes the one of memset() implicitly.
+ */
+void memzero_explicit(void *s, size_t count)
+{
+       memset(s, 0, count);
+       OPTIMIZER_HIDE_VAR(s);
+}
+EXPORT_SYMBOL(memzero_explicit);
+
 #ifndef __HAVE_ARCH_MEMCPY
 /**
  * memcpy - Copy one area of memory to another
index 1cdf0ba23e240a93f965dd3dd11424ea6cc88551..bbd0f160398fed27df3e0ebaae5065c11504f39c 100644 (file)
@@ -113,3 +113,6 @@ CONFIG_SECURITY_NETWORK=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_SECURITY_SELINUX=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_DEBUG_INFO=y
+CONFIG_FANOTIFY=y
index eb00e81601a54f2ae190e996d8bd34bbdc801b0d..d21c9ef0943c3b6173c3f71027e0f7ca6db92f0c 100644 (file)
@@ -1733,21 +1733,24 @@ static int __split_huge_page_map(struct page *page,
        if (pmd) {
                pgtable = pgtable_trans_huge_withdraw(mm);
                pmd_populate(mm, &_pmd, pgtable);
+               if (pmd_write(*pmd))
+                       BUG_ON(page_mapcount(page) != 1);
 
                haddr = address;
                for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
                        pte_t *pte, entry;
                        BUG_ON(PageCompound(page+i));
+                       /*
+                        * Note that pmd_numa is not transferred deliberately
+                        * to avoid any possibility that pte_numa leaks to
+                        * a PROT_NONE VMA by accident.
+                        */
                        entry = mk_pte(page + i, vma->vm_page_prot);
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                        if (!pmd_write(*pmd))
                                entry = pte_wrprotect(entry);
-                       else
-                               BUG_ON(page_mapcount(page) != 1);
                        if (!pmd_young(*pmd))
                                entry = pte_mkold(entry);
-                       if (pmd_numa(*pmd))
-                               entry = pte_mknuma(entry);
                        pte = pte_offset_map(&_pmd, haddr);
                        BUG_ON(!pte_none(*pte));
                        set_pte_at(mm, haddr, pte, entry);
index aa55badf57f7e6a3a16f3ee8be8531e455fb0493..ea32a04296f063d3c81ae56218d1984710ab8f89 100644 (file)
@@ -2399,6 +2399,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                } else {
                        if (cow)
                                huge_ptep_set_wrprotect(src, addr, src_pte);
+                       entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
                        get_page(ptepage);
                        page_dup_rmap(ptepage);
index f45e21ab9cea8531acd4bde77e9b4e3b63866f96..81bf16fd2266d1278d63b7db1b0d1708c03b2eaa 100644 (file)
@@ -302,6 +302,7 @@ struct mem_cgroup {
 
        bool            oom_lock;
        atomic_t        under_oom;
+       atomic_t        oom_wakeups;
 
        atomic_t        refcnt;
 
@@ -2075,15 +2076,18 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
        return total;
 }
 
+static DEFINE_SPINLOCK(memcg_oom_lock);
+
 /*
  * Check OOM-Killer is already running under our hierarchy.
  * If someone is running, return false.
- * Has to be called with memcg_oom_lock
  */
-static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
+static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
 {
        struct mem_cgroup *iter, *failed = NULL;
 
+       spin_lock(&memcg_oom_lock);
+
        for_each_mem_cgroup_tree(iter, memcg) {
                if (iter->oom_lock) {
                        /*
@@ -2097,33 +2101,33 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
                        iter->oom_lock = true;
        }
 
-       if (!failed)
-               return true;
-
-       /*
-        * OK, we failed to lock the whole subtree so we have to clean up
-        * what we set up to the failing subtree
-        */
-       for_each_mem_cgroup_tree(iter, memcg) {
-               if (iter == failed) {
-                       mem_cgroup_iter_break(memcg, iter);
-                       break;
+       if (failed) {
+               /*
+                * OK, we failed to lock the whole subtree so we have
+                * to clean up what we set up to the failing subtree
+                */
+               for_each_mem_cgroup_tree(iter, memcg) {
+                       if (iter == failed) {
+                               mem_cgroup_iter_break(memcg, iter);
+                               break;
+                       }
+                       iter->oom_lock = false;
                }
-               iter->oom_lock = false;
        }
-       return false;
+
+       spin_unlock(&memcg_oom_lock);
+
+       return !failed;
 }
 
-/*
- * Has to be called with memcg_oom_lock
- */
-static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
+static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
 {
        struct mem_cgroup *iter;
 
+       spin_lock(&memcg_oom_lock);
        for_each_mem_cgroup_tree(iter, memcg)
                iter->oom_lock = false;
-       return 0;
+       spin_unlock(&memcg_oom_lock);
 }
 
 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
@@ -2147,7 +2151,6 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
                atomic_add_unless(&iter->under_oom, -1, 0);
 }
 
-static DEFINE_SPINLOCK(memcg_oom_lock);
 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
 
 struct oom_wait_info {
@@ -2177,6 +2180,7 @@ static int memcg_oom_wake_function(wait_queue_t *wait,
 
 static void memcg_wakeup_oom(struct mem_cgroup *memcg)
 {
+       atomic_inc(&memcg->oom_wakeups);
        /* for filtering, pass "memcg" as argument. */
        __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
 }
@@ -2187,57 +2191,97 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
                memcg_wakeup_oom(memcg);
 }
 
-/*
- * try to call OOM killer. returns false if we should exit memory-reclaim loop.
+static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
+{
+       if (!current->memcg_oom.may_oom)
+               return;
+       /*
+        * We are in the middle of the charge context here, so we
+        * don't want to block when potentially sitting on a callstack
+        * that holds all kinds of filesystem and mm locks.
+        *
+        * Also, the caller may handle a failed allocation gracefully
+        * (like optional page cache readahead) and so an OOM killer
+        * invocation might not even be necessary.
+        *
+        * That's why we don't do anything here except remember the
+        * OOM context and then deal with it at the end of the page
+        * fault when the stack is unwound, the locks are released,
+        * and when we know whether the fault was overall successful.
+        */
+       css_get(&memcg->css);
+       current->memcg_oom.memcg = memcg;
+       current->memcg_oom.gfp_mask = mask;
+       current->memcg_oom.order = order;
+}
+
+/**
+ * mem_cgroup_oom_synchronize - complete memcg OOM handling
+ * @handle: actually kill/wait or just clean up the OOM state
+ *
+ * This has to be called at the end of a page fault if the memcg OOM
+ * handler was enabled.
+ *
+ * Memcg supports userspace OOM handling where failed allocations must
+ * sleep on a waitqueue until the userspace task resolves the
+ * situation.  Sleeping directly in the charge context with all kinds
+ * of locks held is not a good idea, instead we remember an OOM state
+ * in the task and mem_cgroup_oom_synchronize() has to be called at
+ * the end of the page fault to complete the OOM handling.
+ *
+ * Returns %true if an ongoing memcg OOM situation was detected and
+ * completed, %false otherwise.
  */
-static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
-                                 int order)
+bool mem_cgroup_oom_synchronize(bool handle)
 {
+       struct mem_cgroup *memcg = current->memcg_oom.memcg;
        struct oom_wait_info owait;
-       bool locked, need_to_kill;
+       bool locked;
+
+       /* OOM is global, do not handle */
+       if (!memcg)
+               return false;
+
+       if (!handle)
+               goto cleanup;
 
        owait.memcg = memcg;
        owait.wait.flags = 0;
        owait.wait.func = memcg_oom_wake_function;
        owait.wait.private = current;
        INIT_LIST_HEAD(&owait.wait.task_list);
-       need_to_kill = true;
-       mem_cgroup_mark_under_oom(memcg);
 
-       /* At first, try to OOM lock hierarchy under memcg.*/
-       spin_lock(&memcg_oom_lock);
-       locked = mem_cgroup_oom_lock(memcg);
-       /*
-        * Even if signal_pending(), we can't quit charge() loop without
-        * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
-        * under OOM is always welcomed, use TASK_KILLABLE here.
-        */
        prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
-       if (!locked || memcg->oom_kill_disable)
-               need_to_kill = false;
+       mem_cgroup_mark_under_oom(memcg);
+
+       locked = mem_cgroup_oom_trylock(memcg);
+
        if (locked)
                mem_cgroup_oom_notify(memcg);
-       spin_unlock(&memcg_oom_lock);
 
-       if (need_to_kill) {
+       if (locked && !memcg->oom_kill_disable) {
+               mem_cgroup_unmark_under_oom(memcg);
                finish_wait(&memcg_oom_waitq, &owait.wait);
-               mem_cgroup_out_of_memory(memcg, mask, order);
+               mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
+                                        current->memcg_oom.order);
        } else {
                schedule();
+               mem_cgroup_unmark_under_oom(memcg);
                finish_wait(&memcg_oom_waitq, &owait.wait);
        }
-       spin_lock(&memcg_oom_lock);
-       if (locked)
-               mem_cgroup_oom_unlock(memcg);
-       memcg_wakeup_oom(memcg);
-       spin_unlock(&memcg_oom_lock);
 
-       mem_cgroup_unmark_under_oom(memcg);
-
-       if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
-               return false;
-       /* Give chance to dying process */
-       schedule_timeout_uninterruptible(1);
+       if (locked) {
+               mem_cgroup_oom_unlock(memcg);
+               /*
+                * There is no guarantee that an OOM-lock contender
+                * sees the wakeups triggered by the OOM kill
+                * uncharges.  Wake any sleepers explicitely.
+                */
+               memcg_oom_recover(memcg);
+       }
+cleanup:
+       current->memcg_oom.memcg = NULL;
+       css_put(&memcg->css);
        return true;
 }
 
@@ -2550,12 +2594,11 @@ enum {
        CHARGE_RETRY,           /* need to retry but retry is not bad */
        CHARGE_NOMEM,           /* we can't do more. return -ENOMEM */
        CHARGE_WOULDBLOCK,      /* GFP_WAIT wasn't set and no enough res. */
-       CHARGE_OOM_DIE,         /* the current is killed because of OOM */
 };
 
 static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
                                unsigned int nr_pages, unsigned int min_pages,
-                               bool oom_check)
+                               bool invoke_oom)
 {
        unsigned long csize = nr_pages * PAGE_SIZE;
        struct mem_cgroup *mem_over_limit;
@@ -2612,14 +2655,10 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
        if (mem_cgroup_wait_acct_move(mem_over_limit))
                return CHARGE_RETRY;
 
-       /* If we don't need to call oom-killer at el, return immediately */
-       if (!oom_check)
-               return CHARGE_NOMEM;
-       /* check OOM */
-       if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
-               return CHARGE_OOM_DIE;
+       if (invoke_oom)
+               mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize));
 
-       return CHARGE_RETRY;
+       return CHARGE_NOMEM;
 }
 
 /*
@@ -2663,6 +2702,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                     || fatal_signal_pending(current)))
                goto bypass;
 
+       if (unlikely(task_in_memcg_oom(current)))
+               goto bypass;
+
        /*
         * We always charge the cgroup the mm_struct belongs to.
         * The mm_struct's mem_cgroup changes on task migration if the
@@ -2722,7 +2764,7 @@ again:
        }
 
        do {
-               bool oom_check;
+               bool invoke_oom = oom && !nr_oom_retries;
 
                /* If killed, bypass charge */
                if (fatal_signal_pending(current)) {
@@ -2730,14 +2772,8 @@ again:
                        goto bypass;
                }
 
-               oom_check = false;
-               if (oom && !nr_oom_retries) {
-                       oom_check = true;
-                       nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
-               }
-
-               ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
-                   oom_check);
+               ret = mem_cgroup_do_charge(memcg, gfp_mask, batch,
+                                          nr_pages, invoke_oom);
                switch (ret) {
                case CHARGE_OK:
                        break;
@@ -2750,16 +2786,12 @@ again:
                        css_put(&memcg->css);
                        goto nomem;
                case CHARGE_NOMEM: /* OOM routine works */
-                       if (!oom) {
+                       if (!oom || invoke_oom) {
                                css_put(&memcg->css);
                                goto nomem;
                        }
-                       /* If oom, we never return -ENOMEM */
                        nr_oom_retries--;
                        break;
-               case CHARGE_OOM_DIE: /* Killed by OOM Killer */
-                       css_put(&memcg->css);
-                       goto bypass;
                }
        } while (ret != CHARGE_OK);
 
@@ -6763,6 +6795,12 @@ static int mem_cgroup_can_attach(struct cgroup *cgroup,
        return ret;
 }
 
+static int mem_cgroup_allow_attach(struct cgroup *cgroup,
+                                  struct cgroup_taskset *tset)
+{
+       return subsys_cgroup_allow_attach(cgroup, tset);
+}
+
 static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
                                     struct cgroup_taskset *tset)
 {
@@ -6931,6 +6969,11 @@ static int mem_cgroup_can_attach(struct cgroup *cgroup,
 {
        return 0;
 }
+static int mem_cgroup_allow_attach(struct cgroup *cgroup,
+                                  struct cgroup_taskset *tset)
+{
+       return 0;
+}
 static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
                                     struct cgroup_taskset *tset)
 {
@@ -6966,6 +7009,7 @@ struct cgroup_subsys mem_cgroup_subsys = {
        .can_attach = mem_cgroup_can_attach,
        .cancel_attach = mem_cgroup_cancel_attach,
        .attach = mem_cgroup_move_task,
+       .allow_attach = mem_cgroup_allow_attach,
        .bind = mem_cgroup_bind,
        .base_cftypes = mem_cgroup_files,
        .early_init = 0,
index ebe0f285c0e7094b431c726f752aa354f61eff80..b5edd6ee71d831fdd869bff1666839838d574443 100644 (file)
@@ -118,6 +118,8 @@ __setup("norandmaps", disable_randmaps);
 unsigned long zero_pfn __read_mostly;
 unsigned long highest_memmap_pfn __read_mostly;
 
+EXPORT_SYMBOL(zero_pfn);
+
 /*
  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
  */
@@ -3754,22 +3756,14 @@ unlock:
 /*
  * By the time we get here, we already hold the mm semaphore
  */
-int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-               unsigned long address, unsigned int flags)
+static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+                            unsigned long address, unsigned int flags)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
 
-       __set_current_state(TASK_RUNNING);
-
-       count_vm_event(PGFAULT);
-       mem_cgroup_count_vm_event(mm, PGFAULT);
-
-       /* do counter updates before entering really critical section. */
-       check_sync_rss_stat(current);
-
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, flags);
 
@@ -3850,6 +3844,43 @@ retry:
        return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 }
 
+int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+                   unsigned long address, unsigned int flags)
+{
+       int ret;
+
+       __set_current_state(TASK_RUNNING);
+
+       count_vm_event(PGFAULT);
+       mem_cgroup_count_vm_event(mm, PGFAULT);
+
+       /* do counter updates before entering really critical section. */
+       check_sync_rss_stat(current);
+
+       /*
+        * Enable the memcg OOM handling for faults triggered in user
+        * space.  Kernel faults are handled more gracefully.
+        */
+       if (flags & FAULT_FLAG_USER)
+               mem_cgroup_oom_enable();
+
+       ret = __handle_mm_fault(mm, vma, address, flags);
+
+       if (flags & FAULT_FLAG_USER) {
+               mem_cgroup_oom_disable();
+                /*
+                 * The task may have entered a memcg OOM situation but
+                 * if the allocation error was handled gracefully (no
+                 * VM_FAULT_OOM), there is no need to kill anything.
+                 * Just clean up the OOM state peacefully.
+                 */
+                if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
+                        mem_cgroup_oom_synchronize(false);
+       }
+
+       return ret;
+}
+
 #ifndef __PAGETABLE_PUD_FOLDED
 /*
  * Allocate page upper directory.
index dfa94ed3c7fa72939c8d0cf15f7b23d3d82f93ef..4d87d7c4ed2e4e0d545f4db22146b6230440064e 100644 (file)
@@ -47,19 +47,21 @@ static DEFINE_SPINLOCK(zone_scan_lock);
 #ifdef CONFIG_NUMA
 /**
  * has_intersects_mems_allowed() - check task eligiblity for kill
- * @tsk: task struct of which task to consider
+ * @start: task struct of which task to consider
  * @mask: nodemask passed to page allocator for mempolicy ooms
  *
  * Task eligibility is determined by whether or not a candidate task, @tsk,
  * shares the same mempolicy nodes as current if it is bound by such a policy
  * and whether or not it has the same set of allowed cpuset nodes.
  */
-static bool has_intersects_mems_allowed(struct task_struct *tsk,
+static bool has_intersects_mems_allowed(struct task_struct *start,
                                        const nodemask_t *mask)
 {
-       struct task_struct *start = tsk;
+       struct task_struct *tsk;
+       bool ret = false;
 
-       do {
+       rcu_read_lock();
+       for_each_thread(start, tsk) {
                if (mask) {
                        /*
                         * If this is a mempolicy constrained oom, tsk's
@@ -67,19 +69,20 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
                         * mempolicy intersects current, otherwise it may be
                         * needlessly killed.
                         */
-                       if (mempolicy_nodemask_intersects(tsk, mask))
-                               return true;
+                       ret = mempolicy_nodemask_intersects(tsk, mask);
                } else {
                        /*
                         * This is not a mempolicy constrained oom, so only
                         * check the mems of tsk's cpuset.
                         */
-                       if (cpuset_mems_allowed_intersects(current, tsk))
-                               return true;
+                       ret = cpuset_mems_allowed_intersects(current, tsk);
                }
-       } while_each_thread(start, tsk);
+               if (ret)
+                       break;
+       }
+       rcu_read_unlock();
 
-       return false;
+       return ret;
 }
 #else
 static bool has_intersects_mems_allowed(struct task_struct *tsk,
@@ -97,16 +100,21 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
  */
 struct task_struct *find_lock_task_mm(struct task_struct *p)
 {
-       struct task_struct *t = p;
+       struct task_struct *t;
 
-       do {
+       rcu_read_lock();
+
+       for_each_thread(p, t) {
                task_lock(t);
                if (likely(t->mm))
-                       return t;
+                       goto found;
                task_unlock(t);
-       } while_each_thread(p, t);
+       }
+       t = NULL;
+found:
+       rcu_read_unlock();
 
-       return NULL;
+       return t;
 }
 
 /* return true if the task is not adequate as candidate victim task. */
@@ -301,7 +309,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
        unsigned long chosen_points = 0;
 
        rcu_read_lock();
-       do_each_thread(g, p) {
+       for_each_process_thread(g, p) {
                unsigned int points;
 
                switch (oom_scan_process_thread(p, totalpages, nodemask,
@@ -323,7 +331,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
                        chosen = p;
                        chosen_points = points;
                }
-       } while_each_thread(g, p);
+       }
        if (chosen)
                get_task_struct(chosen);
        rcu_read_unlock();
@@ -394,6 +402,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
                dump_tasks(memcg, nodemask);
 }
 
+/*
+ * Number of OOM killer invocations (including memcg OOM killer).
+ * Primarily used by PM freezer to check for potential races with
+ * OOM killed frozen task.
+ */
+static atomic_t oom_kills = ATOMIC_INIT(0);
+
+int oom_kills_count(void)
+{
+       return atomic_read(&oom_kills);
+}
+
+void note_oom_kill(void)
+{
+       atomic_inc(&oom_kills);
+}
+
 #define K(x) ((x) << (PAGE_SHIFT-10))
 /*
  * Must be called while holding a reference to p, which will be released upon
@@ -406,7 +431,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
 {
        struct task_struct *victim = p;
        struct task_struct *child;
-       struct task_struct *t = p;
+       struct task_struct *t;
        struct mm_struct *mm;
        unsigned int victim_points = 0;
        static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
@@ -437,7 +462,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         * still freeing memory.
         */
        read_lock(&tasklist_lock);
-       do {
+       for_each_thread(p, t) {
                list_for_each_entry(child, &t->children, sibling) {
                        unsigned int child_points;
 
@@ -455,13 +480,11 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
                                get_task_struct(victim);
                        }
                }
-       } while_each_thread(p, t);
+       }
        read_unlock(&tasklist_lock);
 
-       rcu_read_lock();
        p = find_lock_task_mm(victim);
        if (!p) {
-               rcu_read_unlock();
                put_task_struct(victim);
                return;
        } else if (victim != p) {
@@ -487,6 +510,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         * That thread will now get access to memory reserves since it has a
         * pending fatal signal.
         */
+       rcu_read_lock();
        for_each_process(p)
                if (p->mm == mm && !same_thread_group(p, victim) &&
                    !(p->flags & PF_KTHREAD)) {
@@ -678,9 +702,12 @@ out:
  */
 void pagefault_out_of_memory(void)
 {
-       struct zonelist *zonelist = node_zonelist(first_online_node,
-                                                 GFP_KERNEL);
+       struct zonelist *zonelist;
+
+       if (mem_cgroup_oom_synchronize(true))
+               return;
 
+       zonelist = node_zonelist(first_online_node, GFP_KERNEL);
        if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
                out_of_memory(NULL, 0, 0, NULL, false);
                clear_zonelist_oom(zonelist, GFP_KERNEL);
index bf6c9285013fb0372693ef82f75cdd50007b2620..a13e47eecee8b95cc423799858cb4815517dc870 100644 (file)
@@ -2141,6 +2141,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                return NULL;
        }
 
+       /*
+        * PM-freezer should be notified that there might be an OOM killer on
+        * its way to kill and wake somebody up. This is too early and we might
+        * end up not killing anything but false positives are acceptable.
+        * See freeze_processes.
+        */
+       note_oom_kill();
+
        /*
         * Go through the zonelist yet one more time, keep very high watermark
         * here, this is only to catch a parallel oom killing, we must fail if
@@ -2361,7 +2369,7 @@ static inline int
 gfp_to_alloc_flags(gfp_t gfp_mask)
 {
        int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
-       const gfp_t wait = gfp_mask & __GFP_WAIT;
+       const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
 
        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
        BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
@@ -2370,20 +2378,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
         * The caller may dip into page reserves a bit more if the caller
         * cannot run direct reclaim, or if the caller has realtime scheduling
         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
-        * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
+        * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
         */
        alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
 
-       if (!wait) {
+       if (atomic) {
                /*
-                * Not worth trying to allocate harder for
-                * __GFP_NOMEMALLOC even if it can't schedule.
+                * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
+                * if it can't schedule.
                 */
-               if  (!(gfp_mask & __GFP_NOMEMALLOC))
+               if (!(gfp_mask & __GFP_NOMEMALLOC))
                        alloc_flags |= ALLOC_HARDER;
                /*
-                * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
-                * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+                * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
+                * comment for __cpuset_node_allowed_softwall().
                 */
                alloc_flags &= ~ALLOC_CPUSET;
        } else if (unlikely(rt_task(current)) && !in_interrupt())
index 6d757e3a872ad52adea55aabecee1ec571c3970d..e007236f345ad812a6913bf7246f9587d2e85a97 100644 (file)
@@ -170,6 +170,7 @@ static void free_page_cgroup(void *addr)
                        sizeof(struct page_cgroup) * PAGES_PER_SECTION;
 
                BUG_ON(PageReserved(page));
+               kmemleak_free(addr);
                free_pages_exact(addr, table_size);
        }
 }
index 3707c71ae4cddbec027eac857291185c662c760a..51108165f829d777e4c69abe6109cb88ca4d7e14 100644 (file)
@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
                            int page_start, int page_end)
 {
        const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
-       unsigned int cpu;
+       unsigned int cpu, tcpu;
        int i;
 
        for_each_possible_cpu(cpu) {
@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
                        struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
 
                        *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
-                       if (!*pagep) {
-                               pcpu_free_pages(chunk, pages, populated,
-                                               page_start, page_end);
-                               return -ENOMEM;
-                       }
+                       if (!*pagep)
+                               goto err;
                }
        }
        return 0;
+
+err:
+       while (--i >= page_start)
+               __free_page(pages[pcpu_page_idx(cpu, i)]);
+
+       for_each_possible_cpu(tcpu) {
+               if (tcpu == cpu)
+                       break;
+               for (i = page_start; i < page_end; i++)
+                       __free_page(pages[pcpu_page_idx(tcpu, i)]);
+       }
+       return -ENOMEM;
 }
 
 /**
@@ -263,6 +272,7 @@ err:
                __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
                                   page_end - page_start);
        }
+       pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
        return err;
 }
 
index 6019778b951bff5d00a47d43e9ab5c18bb483596..5373c7fffd993865e0288b0a851e02b98f8a3a0d 100644 (file)
@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
 #define SHORT_SYMLINK_LEN 128
 
 /*
- * shmem_fallocate and shmem_writepage communicate via inode->i_private
- * (with i_mutex making sure that it has only one user at a time):
- * we would prefer not to enlarge the shmem inode just for that.
+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
+ * inode->i_private (with i_mutex making sure that it has only one user at
+ * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
+       wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
        pgoff_t start;          /* start of range currently being fallocated */
        pgoff_t next;           /* the next page offset to be fallocated */
        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
@@ -533,22 +534,19 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                return;
 
        index = start;
-       for ( ; ; ) {
+       while (index < end) {
                cond_resched();
                pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                                        pvec.pages, indices);
                if (!pvec.nr) {
-                       if (index == start || unfalloc)
+                       /* If all gone or hole-punch or unfalloc, we're done */
+                       if (index == start || end != -1)
                                break;
+                       /* But if truncating, restart to make sure all gone */
                        index = start;
                        continue;
                }
-               if ((index == start || unfalloc) && indices[0] >= end) {
-                       shmem_deswap_pagevec(&pvec);
-                       pagevec_release(&pvec);
-                       break;
-               }
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
@@ -560,8 +558,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        if (radix_tree_exceptional_entry(page)) {
                                if (unfalloc)
                                        continue;
-                               nr_swaps_freed += !shmem_free_swap(mapping,
-                                                               index, page);
+                               if (shmem_free_swap(mapping, index, page)) {
+                                       /* Swap was replaced by page: retry */
+                                       index--;
+                                       break;
+                               }
+                               nr_swaps_freed++;
                                continue;
                        }
 
@@ -570,6 +572,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                                if (page->mapping == mapping) {
                                        VM_BUG_ON(PageWriteback(page));
                                        truncate_inode_page(mapping, page);
+                               } else {
+                                       /* Page was replaced by swap: retry */
+                                       unlock_page(page);
+                                       index--;
+                                       break;
                                }
                        }
                        unlock_page(page);
@@ -826,6 +833,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                        spin_lock(&inode->i_lock);
                        shmem_falloc = inode->i_private;
                        if (shmem_falloc &&
+                           !shmem_falloc->waitq &&
                            index >= shmem_falloc->start &&
                            index < shmem_falloc->next)
                                shmem_falloc->nr_unswapped++;
@@ -1300,6 +1308,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int error;
        int ret = VM_FAULT_LOCKED;
 
+       /*
+        * Trinity finds that probing a hole which tmpfs is punching can
+        * prevent the hole-punch from ever completing: which in turn
+        * locks writers out with its hold on i_mutex.  So refrain from
+        * faulting pages into the hole while it's being punched.  Although
+        * shmem_undo_range() does remove the additions, it may be unable to
+        * keep up, as each new page needs its own unmap_mapping_range() call,
+        * and the i_mmap tree grows ever slower to scan if new vmas are added.
+        *
+        * It does not matter if we sometimes reach this check just before the
+        * hole-punch begins, so that one fault then races with the punch:
+        * we just need to make racing faults a rare case.
+        *
+        * The implementation below would be much simpler if we just used a
+        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * and bloating every shmem inode for this unlikely case would be sad.
+        */
+       if (unlikely(inode->i_private)) {
+               struct shmem_falloc *shmem_falloc;
+
+               spin_lock(&inode->i_lock);
+               shmem_falloc = inode->i_private;
+               if (shmem_falloc &&
+                   shmem_falloc->waitq &&
+                   vmf->pgoff >= shmem_falloc->start &&
+                   vmf->pgoff < shmem_falloc->next) {
+                       wait_queue_head_t *shmem_falloc_waitq;
+                       DEFINE_WAIT(shmem_fault_wait);
+
+                       ret = VM_FAULT_NOPAGE;
+                       if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+                          !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               /* It's polite to up mmap_sem if we can */
+                               up_read(&vma->vm_mm->mmap_sem);
+                               ret = VM_FAULT_RETRY;
+                       }
+
+                       shmem_falloc_waitq = shmem_falloc->waitq;
+                       prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&inode->i_lock);
+                       schedule();
+
+                       /*
+                        * shmem_falloc_waitq points into the shmem_fallocate()
+                        * stack of the hole-punching task: shmem_falloc_waitq
+                        * is usually invalid by the time we reach here, but
+                        * finish_wait() does not dereference it in that case;
+                        * though i_lock needed lest racing with wake_up_all().
+                        */
+                       spin_lock(&inode->i_lock);
+                       finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+                       spin_unlock(&inode->i_lock);
+                       return ret;
+               }
+               spin_unlock(&inode->i_lock);
+       }
+
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
        if (error)
                return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1821,12 +1887,25 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                struct address_space *mapping = file->f_mapping;
                loff_t unmap_start = round_up(offset, PAGE_SIZE);
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+
+               shmem_falloc.waitq = &shmem_falloc_waitq;
+               shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+               shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+               spin_lock(&inode->i_lock);
+               inode->i_private = &shmem_falloc;
+               spin_unlock(&inode->i_lock);
 
                if ((u64)unmap_end > (u64)unmap_start)
                        unmap_mapping_range(mapping, unmap_start,
                                            1 + unmap_end - unmap_start, 0);
                shmem_truncate_range(inode, offset, offset + len - 1);
                /* No need to unmap again: hole-punching leaves COWed pages */
+
+               spin_lock(&inode->i_lock);
+               inode->i_private = NULL;
+               wake_up_all(&shmem_falloc_waitq);
+               spin_unlock(&inode->i_lock);
                error = 0;
                goto out;
        }
@@ -1844,6 +1923,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                goto out;
        }
 
+       shmem_falloc.waitq = NULL;
        shmem_falloc.start = start;
        shmem_falloc.next  = start;
        shmem_falloc.nr_falloced = 0;
@@ -2048,8 +2128,10 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct
 
        if (new_dentry->d_inode) {
                (void) shmem_unlink(new_dir, new_dentry);
-               if (they_are_dirs)
+               if (they_are_dirs) {
+                       drop_nlink(new_dentry->d_inode);
                        drop_nlink(old_dir);
+               }
        } else if (they_are_dirs) {
                drop_nlink(old_dir);
                inc_nlink(new_dir);
index 2d414508e9ecb32e64df911c72bf0dd2682b87b8..7d21d3fddbf0427deada294d606de29930ff9694 100644 (file)
@@ -55,6 +55,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
                        continue;
                }
 
+#if !defined(CONFIG_SLUB)
                /*
                 * For simplicity, we won't check this in the list of memcg
                 * caches. We have control over memcg naming, and if there
@@ -68,6 +69,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
                        s = NULL;
                        return -EINVAL;
                }
+#endif
        }
 
        WARN_ON(strchr(name, ' '));     /* It confuses parsers */
index c75b736e54b793f338fce25f35afa60eb0faf5ef..2d6151fc8f083629a8c4e95ddd157afbb76b8772 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/buffer_head.h> /* grr. try_to_release_page,
                                   do_invalidatepage */
 #include <linux/cleancache.h>
+#include <linux/rmap.h>
 #include "internal.h"
 
 
@@ -567,15 +568,66 @@ EXPORT_SYMBOL(truncate_pagecache);
  */
 void truncate_setsize(struct inode *inode, loff_t newsize)
 {
-       loff_t oldsize;
+       loff_t oldsize = inode->i_size;
 
-       oldsize = inode->i_size;
        i_size_write(inode, newsize);
-
+       if (newsize > oldsize)
+               pagecache_isize_extended(inode, oldsize, newsize);
        truncate_pagecache(inode, oldsize, newsize);
 }
 EXPORT_SYMBOL(truncate_setsize);
 
+/**
+ * pagecache_isize_extended - update pagecache after extension of i_size
+ * @inode:     inode for which i_size was extended
+ * @from:      original inode size
+ * @to:                new inode size
+ *
+ * Handle extension of inode size either caused by extending truncate or by
+ * write starting after current i_size. We mark the page straddling current
+ * i_size RO so that page_mkwrite() is called on the nearest write access to
+ * the page.  This way filesystem can be sure that page_mkwrite() is called on
+ * the page before user writes to the page via mmap after the i_size has been
+ * changed.
+ *
+ * The function must be called after i_size is updated so that page fault
+ * coming after we unlock the page will already see the new i_size.
+ * The function must be called while we still hold i_mutex - this not only
+ * makes sure i_size is stable but also that userspace cannot observe new
+ * i_size value before we are prepared to store mmap writes at new inode size.
+ */
+void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
+{
+       int bsize = 1 << inode->i_blkbits;
+       loff_t rounded_from;
+       struct page *page;
+       pgoff_t index;
+
+       WARN_ON(to > inode->i_size);
+
+       if (from >= to || bsize == PAGE_CACHE_SIZE)
+               return;
+       /* Page straddling @from will not have any hole block created? */
+       rounded_from = round_up(from, bsize);
+       if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
+               return;
+
+       index = from >> PAGE_CACHE_SHIFT;
+       page = find_lock_page(inode->i_mapping, index);
+       /* Page not cached? Nothing to do */
+       if (!page)
+               return;
+       /*
+        * See clear_page_dirty_for_io() for details why set_page_dirty()
+        * is needed.
+        */
+       if (page_mkclean(page))
+               set_page_dirty(page);
+       unlock_page(page);
+       page_cache_release(page);
+}
+EXPORT_SYMBOL(pagecache_isize_extended);
+
 /**
  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
  * @inode: inode
index ab1424dbe2e6c9396ee66ab4446f1bc1cd557382..f68b2db27e2bab4f46a2cd3f82fc9f8445cd61ba 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -7,6 +7,7 @@
 #include <linux/security.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
+#include <linux/vmalloc.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -272,17 +273,14 @@ pid_t vm_is_stack(struct task_struct *task,
 
        if (in_group) {
                struct task_struct *t;
-               rcu_read_lock();
-               if (!pid_alive(task))
-                       goto done;
 
-               t = task;
-               do {
+               rcu_read_lock();
+               for_each_thread(task, t) {
                        if (vm_is_stack_for_task(t, vma)) {
                                ret = t->pid;
                                goto done;
                        }
-               } while_each_thread(task, t);
+               }
 done:
                rcu_read_unlock();
        }
@@ -384,6 +382,15 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
 }
 EXPORT_SYMBOL(vm_mmap);
 
+void kvfree(const void *addr)
+{
+       if (is_vmalloc_addr(addr))
+               vfree(addr);
+       else
+               kfree(addr);
+}
+EXPORT_SYMBOL(kvfree);
+
 struct address_space *page_mapping(struct page *page)
 {
        struct address_space *mapping = page->mapping;
index 4a78c4de9f200831c75e5c87bad683bbf669bbfb..42ef36a85e69cd04bd00552dab331ecf5a84285f 100644 (file)
@@ -103,8 +103,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_id);
 
 static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
 {
-       if (skb_cow(skb, skb_headroom(skb)) < 0)
+       if (skb_cow(skb, skb_headroom(skb)) < 0) {
+               kfree_skb(skb);
                return NULL;
+       }
+
        memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
        skb->mac_header += VLAN_HLEN;
        return skb;
index 0018daccdea9cc01235beb9b99d7a140299a5151..8799e171addf1826624b337979a5aa44f35d33c4 100644 (file)
@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
                goto drop;
 
        /* Queue packet (standard) */
-       skb->sk = sock;
-
        if (sock_queue_rcv_skb(sock, skb) < 0)
                goto drop;
 
@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
        if (!skb)
                goto out;
 
-       skb->sk = sk;
        skb_reserve(skb, ddp_dl->header_length);
        skb_reserve(skb, dev->hard_header_len);
        skb->dev = dev;
index 302d29b3744d4c5250778ac784a432d369dde977..5f36f70ce44d3f8607735b85c19cfa0655b617e7 100644 (file)
@@ -887,7 +887,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
                l2cap_chan_close(chan, 0);
                lock_sock(sk);
 
-               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+                   !(current->flags & PF_EXITING))
                        err = bt_sock_wait_state(sk, BT_CLOSED,
                                                 sk->sk_lingertime);
        }
index 0c77476d33d2043f388da49a7fe8545418760e96..3ca5e40fe3908f5bc0e362ce27d8edf39519790b 100644 (file)
@@ -1856,10 +1856,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
        /* Get data directly from socket receive queue without copying it. */
        while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
                skb_orphan(skb);
-               if (!skb_linearize(skb))
+               if (!skb_linearize(skb)) {
                        s = rfcomm_recv_frame(s, skb);
-               else
+                       if (!s)
+                               break;
+               } else {
                        kfree_skb(skb);
+               }
        }
 
        if (s && (sk->sk_state == BT_CLOSED))
index c1c6028e389adc8b7ad0d40ef94120633c033613..7ca014daa5ab815bc129436ef533a9c22fd9218a 100644 (file)
@@ -887,7 +887,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
                sk->sk_shutdown = SHUTDOWN_MASK;
                __rfcomm_sock_close(sk);
 
-               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+                   !(current->flags & PF_EXITING))
                        err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
        }
        release_sock(sk);
index 3178c7b4a17148f30fa811d80053afb8309bdb3a..de9c955b247acb7ccf1b4efc0a67dd9819502643 100644 (file)
@@ -875,7 +875,8 @@ static int sco_sock_shutdown(struct socket *sock, int how)
                sco_sock_clear_timer(sk);
                __sco_sock_close(sk);
 
-               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+                   !(current->flags & PF_EXITING))
                        err = bt_sock_wait_state(sk, BT_CLOSED,
                                                 sk->sk_lingertime);
        }
@@ -895,7 +896,8 @@ static int sco_sock_release(struct socket *sock)
 
        sco_sock_close(sk);
 
-       if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
+       if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+           !(current->flags & PF_EXITING)) {
                lock_sock(sk);
                err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
                release_sock(sk);
index e696833a31b532dabf1b7d27e2a46c7590ee0170..11ab6628027ab92dc53826957f0c15c0bde158e5 100644 (file)
@@ -429,6 +429,16 @@ extern netdev_features_t br_features_recompute(struct net_bridge *br,
 extern int br_handle_frame_finish(struct sk_buff *skb);
 extern rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
 
+static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
+{
+       return rcu_dereference(dev->rx_handler) == br_handle_frame;
+}
+
+static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
+{
+       return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
+}
+
 /* br_ioctl.c */
 extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
index 8660ea3be7054571defa59bcfbfc0cd0dd7ab99e..bdb459d21ad8e2d5191023c5b096de39b6a78c90 100644 (file)
@@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
        if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
                goto err;
 
-       p = br_port_get_rcu(dev);
+       p = br_port_get_check_rcu(dev);
        if (!p)
                goto err;
 
index 96238ba95f2b6954c598294243b8f65931a82284..de6662b14e1f5d7110ac1316c1362e4db6bffc5f 100644 (file)
@@ -13,8 +13,6 @@
 #include "auth_x.h"
 #include "auth_x_protocol.h"
 
-#define TEMP_TICKET_BUF_LEN    256
-
 static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
 
 static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
 }
 
 static int ceph_x_decrypt(struct ceph_crypto_key *secret,
-                         void **p, void *end, void *obuf, size_t olen)
+                         void **p, void *end, void **obuf, size_t olen)
 {
        struct ceph_x_encrypt_header head;
        size_t head_len = sizeof(head);
@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
                return -EINVAL;
 
        dout("ceph_x_decrypt len %d\n", len);
-       ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
-                           *p, len);
+       if (*obuf == NULL) {
+               *obuf = kmalloc(len, GFP_NOFS);
+               if (!*obuf)
+                       return -ENOMEM;
+               olen = len;
+       }
+
+       ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
        if (ret)
                return ret;
        if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
        kfree(th);
 }
 
-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
-                                   struct ceph_crypto_key *secret,
-                                   void *buf, void *end)
+static int process_one_ticket(struct ceph_auth_client *ac,
+                             struct ceph_crypto_key *secret,
+                             void **p, void *end)
 {
        struct ceph_x_info *xi = ac->private;
-       int num;
-       void *p = buf;
+       int type;
+       u8 tkt_struct_v, blob_struct_v;
+       struct ceph_x_ticket_handler *th;
+       void *dbuf = NULL;
+       void *dp, *dend;
+       int dlen;
+       char is_enc;
+       struct timespec validity;
+       struct ceph_crypto_key old_key;
+       void *ticket_buf = NULL;
+       void *tp, *tpend;
+       struct ceph_timespec new_validity;
+       struct ceph_crypto_key new_session_key;
+       struct ceph_buffer *new_ticket_blob;
+       unsigned long new_expires, new_renew_after;
+       u64 new_secret_id;
        int ret;
-       char *dbuf;
-       char *ticket_buf;
-       u8 reply_struct_v;
 
-       dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
-       if (!dbuf)
-               return -ENOMEM;
+       ceph_decode_need(p, end, sizeof(u32) + 1, bad);
 
-       ret = -ENOMEM;
-       ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
-       if (!ticket_buf)
-               goto out_dbuf;
+       type = ceph_decode_32(p);
+       dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
 
-       ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
-       reply_struct_v = ceph_decode_8(&p);
-       if (reply_struct_v != 1)
+       tkt_struct_v = ceph_decode_8(p);
+       if (tkt_struct_v != 1)
                goto bad;
-       num = ceph_decode_32(&p);
-       dout("%d tickets\n", num);
-       while (num--) {
-               int type;
-               u8 tkt_struct_v, blob_struct_v;
-               struct ceph_x_ticket_handler *th;
-               void *dp, *dend;
-               int dlen;
-               char is_enc;
-               struct timespec validity;
-               struct ceph_crypto_key old_key;
-               void *tp, *tpend;
-               struct ceph_timespec new_validity;
-               struct ceph_crypto_key new_session_key;
-               struct ceph_buffer *new_ticket_blob;
-               unsigned long new_expires, new_renew_after;
-               u64 new_secret_id;
-
-               ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
-
-               type = ceph_decode_32(&p);
-               dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
-
-               tkt_struct_v = ceph_decode_8(&p);
-               if (tkt_struct_v != 1)
-                       goto bad;
-
-               th = get_ticket_handler(ac, type);
-               if (IS_ERR(th)) {
-                       ret = PTR_ERR(th);
-                       goto out;
-               }
 
-               /* blob for me */
-               dlen = ceph_x_decrypt(secret, &p, end, dbuf,
-                                     TEMP_TICKET_BUF_LEN);
-               if (dlen <= 0) {
-                       ret = dlen;
-                       goto out;
-               }
-               dout(" decrypted %d bytes\n", dlen);
-               dend = dbuf + dlen;
-               dp = dbuf;
+       th = get_ticket_handler(ac, type);
+       if (IS_ERR(th)) {
+               ret = PTR_ERR(th);
+               goto out;
+       }
 
-               tkt_struct_v = ceph_decode_8(&dp);
-               if (tkt_struct_v != 1)
-                       goto bad;
+       /* blob for me */
+       dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
+       if (dlen <= 0) {
+               ret = dlen;
+               goto out;
+       }
+       dout(" decrypted %d bytes\n", dlen);
+       dp = dbuf;
+       dend = dp + dlen;
 
-               memcpy(&old_key, &th->session_key, sizeof(old_key));
-               ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
-               if (ret)
-                       goto out;
+       tkt_struct_v = ceph_decode_8(&dp);
+       if (tkt_struct_v != 1)
+               goto bad;
 
-               ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
-               ceph_decode_timespec(&validity, &new_validity);
-               new_expires = get_seconds() + validity.tv_sec;
-               new_renew_after = new_expires - (validity.tv_sec / 4);
-               dout(" expires=%lu renew_after=%lu\n", new_expires,
-                    new_renew_after);
+       memcpy(&old_key, &th->session_key, sizeof(old_key));
+       ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+       if (ret)
+               goto out;
 
-               /* ticket blob for service */
-               ceph_decode_8_safe(&p, end, is_enc, bad);
-               tp = ticket_buf;
-               if (is_enc) {
-                       /* encrypted */
-                       dout(" encrypted ticket\n");
-                       dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
-                                             TEMP_TICKET_BUF_LEN);
-                       if (dlen < 0) {
-                               ret = dlen;
-                               goto out;
-                       }
-                       dlen = ceph_decode_32(&tp);
-               } else {
-                       /* unencrypted */
-                       ceph_decode_32_safe(&p, end, dlen, bad);
-                       ceph_decode_need(&p, end, dlen, bad);
-                       ceph_decode_copy(&p, ticket_buf, dlen);
+       ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+       ceph_decode_timespec(&validity, &new_validity);
+       new_expires = get_seconds() + validity.tv_sec;
+       new_renew_after = new_expires - (validity.tv_sec / 4);
+       dout(" expires=%lu renew_after=%lu\n", new_expires,
+            new_renew_after);
+
+       /* ticket blob for service */
+       ceph_decode_8_safe(p, end, is_enc, bad);
+       if (is_enc) {
+               /* encrypted */
+               dout(" encrypted ticket\n");
+               dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
+               if (dlen < 0) {
+                       ret = dlen;
+                       goto out;
                }
-               tpend = tp + dlen;
-               dout(" ticket blob is %d bytes\n", dlen);
-               ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
-               blob_struct_v = ceph_decode_8(&tp);
-               new_secret_id = ceph_decode_64(&tp);
-               ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
-               if (ret)
+               tp = ticket_buf;
+               dlen = ceph_decode_32(&tp);
+       } else {
+               /* unencrypted */
+               ceph_decode_32_safe(p, end, dlen, bad);
+               ticket_buf = kmalloc(dlen, GFP_NOFS);
+               if (!ticket_buf) {
+                       ret = -ENOMEM;
                        goto out;
-
-               /* all is well, update our ticket */
-               ceph_crypto_key_destroy(&th->session_key);
-               if (th->ticket_blob)
-                       ceph_buffer_put(th->ticket_blob);
-               th->session_key = new_session_key;
-               th->ticket_blob = new_ticket_blob;
-               th->validity = new_validity;
-               th->secret_id = new_secret_id;
-               th->expires = new_expires;
-               th->renew_after = new_renew_after;
-               dout(" got ticket service %d (%s) secret_id %lld len %d\n",
-                    type, ceph_entity_type_name(type), th->secret_id,
-                    (int)th->ticket_blob->vec.iov_len);
-               xi->have_keys |= th->service;
+               }
+               tp = ticket_buf;
+               ceph_decode_need(p, end, dlen, bad);
+               ceph_decode_copy(p, ticket_buf, dlen);
        }
+       tpend = tp + dlen;
+       dout(" ticket blob is %d bytes\n", dlen);
+       ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
+       blob_struct_v = ceph_decode_8(&tp);
+       new_secret_id = ceph_decode_64(&tp);
+       ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
+       if (ret)
+               goto out;
+
+       /* all is well, update our ticket */
+       ceph_crypto_key_destroy(&th->session_key);
+       if (th->ticket_blob)
+               ceph_buffer_put(th->ticket_blob);
+       th->session_key = new_session_key;
+       th->ticket_blob = new_ticket_blob;
+       th->validity = new_validity;
+       th->secret_id = new_secret_id;
+       th->expires = new_expires;
+       th->renew_after = new_renew_after;
+       dout(" got ticket service %d (%s) secret_id %lld len %d\n",
+            type, ceph_entity_type_name(type), th->secret_id,
+            (int)th->ticket_blob->vec.iov_len);
+       xi->have_keys |= th->service;
 
-       ret = 0;
 out:
        kfree(ticket_buf);
-out_dbuf:
        kfree(dbuf);
        return ret;
 
@@ -270,6 +255,34 @@ bad:
        goto out;
 }
 
+static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+                                   struct ceph_crypto_key *secret,
+                                   void *buf, void *end)
+{
+       void *p = buf;
+       u8 reply_struct_v;
+       u32 num;
+       int ret;
+
+       ceph_decode_8_safe(&p, end, reply_struct_v, bad);
+       if (reply_struct_v != 1)
+               return -EINVAL;
+
+       ceph_decode_32_safe(&p, end, num, bad);
+       dout("%d tickets\n", num);
+
+       while (num--) {
+               ret = process_one_ticket(ac, secret, &p, end);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+
+bad:
+       return -EINVAL;
+}
+
 static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
                                   struct ceph_x_ticket_handler *th,
                                   struct ceph_x_authorizer *au)
@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
        struct ceph_x_ticket_handler *th;
        int ret = 0;
        struct ceph_x_authorize_reply reply;
+       void *preply = &reply;
        void *p = au->reply_buf;
        void *end = p + sizeof(au->reply_buf);
 
        th = get_ticket_handler(ac, au->service);
        if (IS_ERR(th))
                return PTR_ERR(th);
-       ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
+       ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
        if (ret < 0)
                return ret;
        if (ret != sizeof(reply))
index 6e7a236525b6ff92d9cd2e44770cb6f7334b0b02..06f19b9e159a3c30f180cca3e5b405207519f36e 100644 (file)
@@ -89,11 +89,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
 
 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
 
+/*
+ * Should be used for buffers allocated with ceph_kvmalloc().
+ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
+ * in-buffer (msg front).
+ *
+ * Dispose of @sgt with teardown_sgtable().
+ *
+ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
+ * in cases where a single sg is sufficient.  No attempt to reduce the
+ * number of sgs by squeezing physically contiguous pages together is
+ * made though, for simplicity.
+ */
+static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
+                        const void *buf, unsigned int buf_len)
+{
+       struct scatterlist *sg;
+       const bool is_vmalloc = is_vmalloc_addr(buf);
+       unsigned int off = offset_in_page(buf);
+       unsigned int chunk_cnt = 1;
+       unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
+       int i;
+       int ret;
+
+       if (buf_len == 0) {
+               memset(sgt, 0, sizeof(*sgt));
+               return -EINVAL;
+       }
+
+       if (is_vmalloc) {
+               chunk_cnt = chunk_len >> PAGE_SHIFT;
+               chunk_len = PAGE_SIZE;
+       }
+
+       if (chunk_cnt > 1) {
+               ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
+               if (ret)
+                       return ret;
+       } else {
+               WARN_ON(chunk_cnt != 1);
+               sg_init_table(prealloc_sg, 1);
+               sgt->sgl = prealloc_sg;
+               sgt->nents = sgt->orig_nents = 1;
+       }
+
+       for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
+               struct page *page;
+               unsigned int len = min(chunk_len - off, buf_len);
+
+               if (is_vmalloc)
+                       page = vmalloc_to_page(buf);
+               else
+                       page = virt_to_page(buf);
+
+               sg_set_page(sg, page, len, off);
+
+               off = 0;
+               buf += len;
+               buf_len -= len;
+       }
+       WARN_ON(buf_len != 0);
+
+       return 0;
+}
+
+static void teardown_sgtable(struct sg_table *sgt)
+{
+       if (sgt->orig_nents > 1)
+               sg_free_table(sgt);
+}
+
 static int ceph_aes_encrypt(const void *key, int key_len,
                            void *dst, size_t *dst_len,
                            const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[2], sg_out[1];
+       struct scatterlist sg_in[2], prealloc_sg;
+       struct sg_table sg_out;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
        int ret;
@@ -109,16 +180,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
 
        *dst_len = src_len + zero_padding;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        sg_init_table(sg_in, 2);
        sg_set_buf(&sg_in[0], src, src_len);
        sg_set_buf(&sg_in[1], pad, zero_padding);
-       sg_init_table(sg_out, 1);
-       sg_set_buf(sg_out, dst, *dst_len);
+       ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+       if (ret)
+               goto out_tfm;
+
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
+
        /*
        print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
                       key, key_len, 1);
@@ -127,16 +200,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
+       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
                                     src_len + zero_padding);
-       crypto_free_blkcipher(tfm);
-       if (ret < 0)
+       if (ret < 0) {
                pr_err("ceph_aes_crypt failed %d\n", ret);
+               goto out_sg;
+       }
        /*
        print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_out);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
@@ -144,7 +223,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
                             const void *src1, size_t src1_len,
                             const void *src2, size_t src2_len)
 {
-       struct scatterlist sg_in[3], sg_out[1];
+       struct scatterlist sg_in[3], prealloc_sg;
+       struct sg_table sg_out;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
        int ret;
@@ -160,17 +240,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
 
        *dst_len = src1_len + src2_len + zero_padding;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        sg_init_table(sg_in, 3);
        sg_set_buf(&sg_in[0], src1, src1_len);
        sg_set_buf(&sg_in[1], src2, src2_len);
        sg_set_buf(&sg_in[2], pad, zero_padding);
-       sg_init_table(sg_out, 1);
-       sg_set_buf(sg_out, dst, *dst_len);
+       ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+       if (ret)
+               goto out_tfm;
+
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
+
        /*
        print_hex_dump(KERN_ERR, "enc  key: ", DUMP_PREFIX_NONE, 16, 1,
                       key, key_len, 1);
@@ -181,23 +263,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
        print_hex_dump(KERN_ERR, "enc  pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
+       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
                                     src1_len + src2_len + zero_padding);
-       crypto_free_blkcipher(tfm);
-       if (ret < 0)
+       if (ret < 0) {
                pr_err("ceph_aes_crypt2 failed %d\n", ret);
+               goto out_sg;
+       }
        /*
        print_hex_dump(KERN_ERR, "enc  out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_out);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_decrypt(const void *key, int key_len,
                            void *dst, size_t *dst_len,
                            const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[1], sg_out[2];
+       struct sg_table sg_in;
+       struct scatterlist sg_out[2], prealloc_sg;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm };
        char pad[16];
@@ -209,16 +298,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       sg_init_table(sg_in, 1);
        sg_init_table(sg_out, 2);
-       sg_set_buf(sg_in, src, src_len);
        sg_set_buf(&sg_out[0], dst, *dst_len);
        sg_set_buf(&sg_out[1], pad, sizeof(pad));
+       ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
+       if (ret)
+               goto out_tfm;
 
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
 
        /*
@@ -227,12 +316,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec  in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
-       crypto_free_blkcipher(tfm);
+       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
-               return ret;
+               goto out_sg;
        }
 
        if (src_len <= *dst_len)
@@ -250,7 +337,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_in);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_decrypt2(const void *key, int key_len,
@@ -258,7 +350,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
                             void *dst2, size_t *dst2_len,
                             const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[1], sg_out[3];
+       struct sg_table sg_in;
+       struct scatterlist sg_out[3], prealloc_sg;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm };
        char pad[16];
@@ -270,17 +363,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
-       sg_init_table(sg_in, 1);
-       sg_set_buf(sg_in, src, src_len);
        sg_init_table(sg_out, 3);
        sg_set_buf(&sg_out[0], dst1, *dst1_len);
        sg_set_buf(&sg_out[1], dst2, *dst2_len);
        sg_set_buf(&sg_out[2], pad, sizeof(pad));
+       ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
+       if (ret)
+               goto out_tfm;
 
        crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
 
        /*
@@ -289,12 +382,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec   in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
-       crypto_free_blkcipher(tfm);
+       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
-               return ret;
+               goto out_sg;
        }
 
        if (src_len <= *dst1_len)
@@ -324,7 +415,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
                       dst2, *dst2_len, 1);
        */
 
-       return 0;
+out_sg:
+       teardown_sgtable(&sg_in);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 
index b9d7df17570075d3a865ce6f232e36aeacd3acbd..e3bea2e0821a4e64591e0cd6cc4b9dd3c364b35c 100644 (file)
@@ -290,7 +290,8 @@ int ceph_msgr_init(void)
        if (ceph_msgr_slab_init())
                return -ENOMEM;
 
-       ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
+       ceph_msgr_wq = alloc_workqueue("ceph-msgr",
+                                      WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
        if (ceph_msgr_wq)
                return 0;
 
@@ -904,7 +905,7 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
        BUG_ON(page_count > (int)USHRT_MAX);
        cursor->page_count = (unsigned short)page_count;
        BUG_ON(length > SIZE_MAX - cursor->page_offset);
-       cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE;
+       cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
 }
 
 static struct page *
@@ -3144,7 +3145,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
        INIT_LIST_HEAD(&m->data);
 
        /* front */
-       m->front_max = front_len;
+       m->front_alloc_len = front_len;
        if (front_len) {
                if (front_len > PAGE_CACHE_SIZE) {
                        m->front.iov_base = __vmalloc(front_len, flags,
@@ -3319,8 +3320,8 @@ EXPORT_SYMBOL(ceph_msg_last_put);
 
 void ceph_msg_dump(struct ceph_msg *msg)
 {
-       pr_debug("msg_dump %p (front_max %d length %zd)\n", msg,
-                msg->front_max, msg->data_length);
+       pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
+                msg->front_alloc_len, msg->data_length);
        print_hex_dump(KERN_DEBUG, "header: ",
                       DUMP_PREFIX_OFFSET, 16, 1,
                       &msg->hdr, sizeof(msg->hdr), true);
index 1fe25cd29d0eceb66bb4924ee85e5a62b598c48a..dbcbf5a4707fa8d93f4f5d0810d60e29989c667d 100644 (file)
@@ -152,7 +152,7 @@ static int __open_session(struct ceph_mon_client *monc)
                /* initiatiate authentication handshake */
                ret = ceph_auth_build_hello(monc->auth,
                                            monc->m_auth->front.iov_base,
-                                           monc->m_auth->front_max);
+                                           monc->m_auth->front_alloc_len);
                __send_prepared_auth_request(monc, ret);
        } else {
                dout("open_session mon%d already open\n", monc->cur_mon);
@@ -196,7 +196,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
                int num;
 
                p = msg->front.iov_base;
-               end = p + msg->front_max;
+               end = p + msg->front_alloc_len;
 
                num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
                ceph_encode_32(&p, num);
@@ -897,7 +897,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
        ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
                                     msg->front.iov_len,
                                     monc->m_auth->front.iov_base,
-                                    monc->m_auth->front_max);
+                                    monc->m_auth->front_alloc_len);
        if (ret < 0) {
                monc->client->auth_err = ret;
                wake_up_all(&monc->client->auth_wq);
@@ -939,7 +939,7 @@ static int __validate_auth(struct ceph_mon_client *monc)
                return 0;
 
        ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
-                             monc->m_auth->front_max);
+                             monc->m_auth->front_alloc_len);
        if (ret <= 0)
                return ret; /* either an error, or no need to authenticate */
        __send_prepared_auth_request(monc, ret);
@@ -1041,7 +1041,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
        if (!m) {
                pr_info("alloc_msg unknown type %d\n", type);
                *skip = 1;
+       } else if (front_len > m->front_alloc_len) {
+               pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
+                          front_len, m->front_alloc_len,
+                          (unsigned int)con->peer_name.type,
+                          le64_to_cpu(con->peer_name.num));
+               ceph_msg_put(m);
+               m = ceph_msg_new(type, front_len, GFP_NOFS, false);
        }
+
        return m;
 }
 
index f50161fb812eace2eb659ae78bf12062c608b5b6..cbc1a2a265876277848f0a697040cea3db75b7d7 100644 (file)
@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
 {
        int tot_len;
 
-       if (kern_msg->msg_namelen) {
+       if (kern_msg->msg_name && kern_msg->msg_namelen) {
                if (mode == VERIFY_READ) {
                        int err = move_addr_to_kernel(kern_msg->msg_name,
                                                      kern_msg->msg_namelen,
@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
                        if (err < 0)
                                return err;
                }
-               if (kern_msg->msg_name)
-                       kern_msg->msg_name = kern_address;
-       } else
+               kern_msg->msg_name = kern_address;
+       } else {
                kern_msg->msg_name = NULL;
+               kern_msg->msg_namelen = 0;
+       }
 
        tot_len = iov_from_user_compat_to_kern(kern_iov,
                                          (struct compat_iovec __user *)kern_msg->msg_iov,
index df9cc810ec8e3a78bbdb3b4480deeb8fd35df721..c0e021871df899079a834fdda0d1fbfdecfc252f 100644 (file)
@@ -267,6 +267,15 @@ again:
 }
 EXPORT_SYMBOL(dst_destroy);
 
+static void dst_destroy_rcu(struct rcu_head *head)
+{
+       struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
+
+       dst = dst_destroy(dst);
+       if (dst)
+               __dst_free(dst);
+}
+
 void dst_release(struct dst_entry *dst)
 {
        if (dst) {
@@ -274,11 +283,8 @@ void dst_release(struct dst_entry *dst)
 
                newrefcnt = atomic_dec_return(&dst->__refcnt);
                WARN_ON(newrefcnt < 0);
-               if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
-                       dst = dst_destroy(dst);
-                       if (dst)
-                               __dst_free(dst);
-               }
+               if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
+                       call_rcu(&dst->rcu_head, dst_destroy_rcu);
        }
 }
 EXPORT_SYMBOL(dst_release);
index 55e08e2de3a1c2b163b917bcae03ed7fabc95710..da78f5c6d295d11b0557ed0e9fe33880a0a8395d 100644 (file)
@@ -31,6 +31,8 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
        r->pref = pref;
        r->table = table;
        r->flags = flags;
+       r->uid_start = INVALID_UID;
+       r->uid_end = INVALID_UID;
        r->fr_net = hold_net(ops->fro_net);
 
        /* The lock is not required here, the list in unreacheable
@@ -179,6 +181,23 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
 }
 EXPORT_SYMBOL_GPL(fib_rules_unregister);
 
+static inline kuid_t fib_nl_uid(struct nlattr *nla)
+{
+       return make_kuid(current_user_ns(), nla_get_u32(nla));
+}
+
+static int nla_put_uid(struct sk_buff *skb, int idx, kuid_t uid)
+{
+       return nla_put_u32(skb, idx, from_kuid_munged(current_user_ns(), uid));
+}
+
+static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule)
+{
+       return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) ||
+              (uid_gte(fl->flowi_uid, rule->uid_start) &&
+               uid_lte(fl->flowi_uid, rule->uid_end));
+}
+
 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
                          struct flowi *fl, int flags)
 {
@@ -193,6 +212,9 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
        if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
                goto out;
 
+       if (!fib_uid_range_match(fl, rule))
+               goto out;
+
        ret = ops->match(rule, fl, flags);
 out:
        return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -363,6 +385,19 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
        } else if (rule->action == FR_ACT_GOTO)
                goto errout_free;
 
+       /* UID start and end must either both be valid or both unspecified. */
+       rule->uid_start = rule->uid_end = INVALID_UID;
+       if (tb[FRA_UID_START] || tb[FRA_UID_END]) {
+               if (tb[FRA_UID_START] && tb[FRA_UID_END]) {
+                       rule->uid_start = fib_nl_uid(tb[FRA_UID_START]);
+                       rule->uid_end = fib_nl_uid(tb[FRA_UID_END]);
+               }
+               if (!uid_valid(rule->uid_start) ||
+                   !uid_valid(rule->uid_end) ||
+                   !uid_lte(rule->uid_start, rule->uid_end))
+               goto errout_free;
+       }
+
        err = ops->configure(rule, skb, frh, tb);
        if (err < 0)
                goto errout_free;
@@ -469,6 +504,14 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
                    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
                        continue;
 
+               if (tb[FRA_UID_START] &&
+                   !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START])))
+                       continue;
+
+               if (tb[FRA_UID_END] &&
+                   !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END])))
+                       continue;
+
                if (!ops->compare(rule, frh, tb))
                        continue;
 
@@ -525,7 +568,9 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
                         + nla_total_size(4) /* FRA_PRIORITY */
                         + nla_total_size(4) /* FRA_TABLE */
                         + nla_total_size(4) /* FRA_FWMARK */
-                        + nla_total_size(4); /* FRA_FWMASK */
+                        + nla_total_size(4) /* FRA_FWMASK */
+                        + nla_total_size(4) /* FRA_UID_START */
+                        + nla_total_size(4); /* FRA_UID_END */
 
        if (ops->nlmsg_payload)
                payload += ops->nlmsg_payload(rule);
@@ -579,7 +624,11 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
            ((rule->mark_mask || rule->mark) &&
             nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
            (rule->target &&
-            nla_put_u32(skb, FRA_GOTO, rule->target)))
+            nla_put_u32(skb, FRA_GOTO, rule->target)) ||
+           (uid_valid(rule->uid_start) &&
+            nla_put_uid(skb, FRA_UID_START, rule->uid_start)) ||
+           (uid_valid(rule->uid_end) &&
+            nla_put_uid(skb, FRA_UID_END, rule->uid_end)))
                goto nla_put_failure;
        if (ops->fill(rule, skb, frh) < 0)
                goto nla_put_failure;
index 9a31515fb8e33e77dddfcbd53df106c774d6eeec..1117a26a854809b6c89eff531b7784f25a353191 100644 (file)
@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
 {
        int size, ct, err;
 
-       if (m->msg_namelen) {
+       if (m->msg_name && m->msg_namelen) {
                if (mode == VERIFY_READ) {
                        void __user *namep;
                        namep = (void __user __force *) m->msg_name;
@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
                        if (err < 0)
                                return err;
                }
-               if (m->msg_name)
-                       m->msg_name = address;
+               m->msg_name = address;
        } else {
                m->msg_name = NULL;
+               m->msg_namelen = 0;
        }
 
        size = m->msg_iovlen * sizeof(struct iovec);
@@ -107,6 +107,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
 int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
                        int offset, int len)
 {
+       /* No data? Done! */
+       if (len == 0)
+               return 0;
+
        /* Skip over the finished iovecs */
        while (offset >= iov->iov_len) {
                offset -= iov->iov_len;
index 8d9d05edd2eb1e66024311c62c6b7679c52bdc7d..d0afc322b961f37be72a908c4e3fb66b40ff761a 100644 (file)
@@ -95,31 +95,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
 #endif
 
 #ifdef CONFIG_INET
-__u32 secure_ip_id(__be32 daddr)
-{
-       u32 hash[MD5_DIGEST_WORDS];
-
-       net_secret_init();
-       hash[0] = (__force __u32) daddr;
-       hash[1] = net_secret[13];
-       hash[2] = net_secret[14];
-       hash[3] = net_secret[15];
-
-       md5_transform(hash, net_secret);
-
-       return hash[0];
-}
-
-__u32 secure_ipv6_id(const __be32 daddr[4])
-{
-       __u32 hash[4];
-
-       net_secret_init();
-       memcpy(hash, daddr, 16);
-       md5_transform(hash, net_secret);
-
-       return hash[0];
-}
 
 __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
                                 __be16 sport, __be16 dport)
index 9f84a5f7404d4b2ae38f78a94152ef72010c50ef..6148716884ae85e6919ec6e8e93885f1fb8c2e9c 100644 (file)
@@ -2810,7 +2810,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
                tail = nskb;
 
                __copy_skb_header(nskb, skb);
-               nskb->mac_len = skb->mac_len;
 
                /* nskb and skb might have different headroom */
                if (nskb->ip_summed == CHECKSUM_PARTIAL)
@@ -2820,6 +2819,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
                skb_set_network_header(nskb, skb->mac_len);
                nskb->transport_header = (nskb->network_header +
                                          skb_network_header_len(skb));
+               skb_reset_mac_len(nskb);
 
                skb_copy_from_linear_data_offset(skb, -tnl_hlen,
                                                 nskb->data - tnl_hlen,
index c32be292c7e382c4a2c600e9e3c559906ff9784b..2022b46ab38fdf464650052923261ea03eddfd99 100644 (file)
@@ -150,7 +150,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
        if (!*_result)
                goto put;
 
-       memcpy(*_result, upayload->data, len + 1);
+       memcpy(*_result, upayload->data, len);
+       (*_result)[len] = '\0';
+
        if (_expiry)
                *_expiry = rkey->expiry;
 
index 4556cd25acde2fe11986dbfe2498c83674612726..ea47f2fc3ea47cf5057805db9f5f0d25543f553b 100644 (file)
@@ -531,6 +531,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
        [RTA_FLOW]              = { .type = NLA_U32 },
+       [RTA_UID]               = { .type = NLA_U32 },
 };
 
 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
index 9c3979a50804a0133a6898ed2017a5027922c058..bc773a10dca63bf8ee83f60ed9b36ff8987683b3 100644 (file)
@@ -533,7 +533,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
                        return 1;
 
                attrlen = rtnh_attrlen(rtnh);
-               if (attrlen < 0) {
+               if (attrlen > 0) {
                        struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
index cc38f44306ed29bca916f2428332bebee6573a8e..5af8781b65e13f49c0f3ce90bcfa695cf83756a6 100644 (file)
@@ -704,8 +704,6 @@ static void icmp_unreach(struct sk_buff *skb)
                                               &iph->daddr);
                        } else {
                                info = ntohs(icmph->un.frag.mtu);
-                               if (!info)
-                                       goto out;
                        }
                        break;
                case ICMP_SR_FAILED:
index 089b4af4fecc3fa32efa19a33372b535d05bdf1b..155adf8729c2360915584773ed96d12c2bac0590 100644 (file)
@@ -343,7 +343,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
        pip->saddr    = fl4.saddr;
        pip->protocol = IPPROTO_IGMP;
        pip->tot_len  = 0;      /* filled in later */
-       ip_select_ident(skb, &rt->dst, NULL);
+       ip_select_ident(skb, NULL);
        ((u8 *)&pip[1])[0] = IPOPT_RA;
        ((u8 *)&pip[1])[1] = 4;
        ((u8 *)&pip[1])[2] = 0;
@@ -687,7 +687,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        iph->daddr    = dst;
        iph->saddr    = fl4.saddr;
        iph->protocol = IPPROTO_IGMP;
-       ip_select_ident(skb, &rt->dst, NULL);
+       ip_select_ident(skb, NULL);
        ((u8 *)&iph[1])[0] = IPOPT_RA;
        ((u8 *)&iph[1])[1] = 4;
        ((u8 *)&iph[1])[2] = 0;
@@ -1874,6 +1874,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
        rtnl_lock();
        in_dev = ip_mc_find_dev(net, imr);
+       if (!in_dev) {
+               ret = -ENODEV;
+               goto out;
+       }
        ifindex = imr->imr_ifindex;
        for (imlp = &inet->mc_list;
             (iml = rtnl_dereference(*imlp)) != NULL;
@@ -1891,16 +1895,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
                *imlp = iml->next_rcu;
 
-               if (in_dev)
-                       ip_mc_dec_group(in_dev, group);
+               ip_mc_dec_group(in_dev, group);
                rtnl_unlock();
                /* decrease mem now to avoid the memleak warning */
                atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
                kfree_rcu(iml, rcu);
                return 0;
        }
-       if (!in_dev)
-               ret = -ENODEV;
+out:
        rtnl_unlock();
        return ret;
 }
index 442087d371f69e637a85682378381fbd0927e812..6dfec2f1821457df65d9f17cdba6748d063e8ae2 100644 (file)
@@ -422,7 +422,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
                           sk->sk_protocol,
                           flags,
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
-                          ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+                          ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport,
+                          sock_i_uid(sk));
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -458,7 +459,8 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
-                          ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+                          ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport,
+                          sock_i_uid(sk));
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
index 33d5537881ed7b39e33199dfe978bbe5b912d706..67140efc15fdfad8acf23cf7ba3ea767a0e81de0 100644 (file)
  *  Theory of operations.
  *  We keep one entry for each peer IP address.  The nodes contains long-living
  *  information about the peer which doesn't depend on routes.
- *  At this moment this information consists only of ID field for the next
- *  outgoing IP packet.  This field is incremented with each packet as encoded
- *  in inet_getid() function (include/net/inetpeer.h).
- *  At the moment of writing this notes identifier of IP packets is generated
- *  to be unpredictable using this code only for packets subjected
- *  (actually or potentially) to defragmentation.  I.e. DF packets less than
- *  PMTU in size when local fragmentation is disabled use a constant ID and do
- *  not use this code (see ip_select_ident() in include/net/ip.h).
  *
- *  Route cache entries hold references to our nodes.
- *  New cache entries get references via lookup by destination IP address in
- *  the avl tree.  The reference is grabbed only when it's needed i.e. only
- *  when we try to output IP packet which needs an unpredictable ID (see
- *  __ip_select_ident() in net/ipv4/route.c).
  *  Nodes are removed only when reference counter goes to 0.
  *  When it's happened the node may be removed when a sufficient amount of
  *  time has been passed since its last use.  The less-recently-used entry can
@@ -62,7 +49,6 @@
  *             refcnt: atomically against modifications on other CPU;
  *                usually under some other lock to prevent node disappearing
  *             daddr: unchangeable
- *             ip_id_count: atomic value (no lock needed)
  */
 
 static struct kmem_cache *peer_cachep __read_mostly;
@@ -504,10 +490,6 @@ relookup:
                p->daddr = *daddr;
                atomic_set(&p->refcnt, 1);
                atomic_set(&p->rid, 0);
-               atomic_set(&p->ip_id_count,
-                               (daddr->family == AF_INET) ?
-                                       secure_ip_id(daddr->addr.a4) :
-                                       secure_ipv6_id(daddr->addr.a6));
                p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
                p->rate_tokens = 0;
                /* 60*HZ is arbitrary, but chosen enough high so that the first
index ec7264514a82e0a130bf15ee01a9030596880921..089ed81d18785c232c10a544899a9bbbfd77f307 100644 (file)
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
                        optptr++;
                        continue;
                }
+               if (unlikely(l < 2)) {
+                       pp_ptr = optptr;
+                       goto error;
+               }
                optlen = optptr[1];
                if (optlen<2 || optlen>l) {
                        pp_ptr = optptr;
index 7e94d6da35f0ce3fd8280101180ba042291d90ee..1bb117130e3ed599f319a6b986295d97fafcfbf7 100644 (file)
@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
        iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
        iph->saddr    = saddr;
        iph->protocol = sk->sk_protocol;
-       ip_select_ident(skb, &rt->dst, sk);
+       ip_select_ident(skb, sk);
 
        if (opt && opt->opt.optlen) {
                iph->ihl += opt->opt.optlen>>2;
@@ -394,8 +394,7 @@ packet_routed:
                ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
        }
 
-       ip_select_ident_more(skb, &rt->dst, sk,
-                            (skb_shinfo(skb)->gso_segs ?: 1) - 1);
+       ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
 
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
@@ -1332,7 +1331,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        iph->ttl = ttl;
        iph->protocol = sk->sk_protocol;
        ip_copy_addrs(iph, fl4);
-       ip_select_ident(skb, &rt->dst, sk);
+       ip_select_ident(skb, sk);
 
        if (opt) {
                iph->ihl += opt->optlen>>2;
@@ -1482,6 +1481,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
        struct sk_buff *nskb;
        struct sock *sk;
        struct inet_sock *inet;
+       int err;
 
        if (ip_options_echo(&replyopts.opt.opt, skb))
                return;
@@ -1503,7 +1503,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
                           RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
                           ip_reply_arg_flowi_flags(arg),
                           daddr, saddr,
-                          tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
+                          tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
+                          arg->uid);
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
        rt = ip_route_output_key(net, &fl4);
        if (IS_ERR(rt))
@@ -1519,8 +1520,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
        sock_net_set(sk, net);
        __skb_queue_head_init(&sk->sk_write_queue);
        sk->sk_sndbuf = sysctl_wmem_default;
-       ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
-                      &ipc, &rt, MSG_DONTWAIT);
+       err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
+                            len, 0, &ipc, &rt, MSG_DONTWAIT);
+       if (unlikely(err)) {
+               ip_flush_pending_frames(sk);
+               goto out;
+       }
+
        nskb = skb_peek(&sk->sk_write_queue);
        if (nskb) {
                if (arg->csumoffset >= 0)
@@ -1532,7 +1538,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
                skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
                ip_push_pending_frames(sk, &fl4);
        }
-
+out:
        put_cpu_var(unicast_sock);
 
        ip_rt_put(rt);
index fa6573264c8a3bf94f81e97997209284e2dddf13..84aa69caee59486e3dffa0e0c8447a0c278ab0ed 100644 (file)
@@ -166,6 +166,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
                if (remote != t->parms.iph.daddr ||
+                   t->parms.iph.saddr != 0 ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
@@ -182,10 +183,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
        head = &itn->tunnels[hash];
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
-               if ((local != t->parms.iph.saddr &&
-                    (local != t->parms.iph.daddr ||
-                     !ipv4_is_multicast(local))) ||
-                   !(t->dev->flags & IFF_UP))
+               if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
+                   (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
+                       continue;
+
+               if (!(t->dev->flags & IFF_UP))
                        continue;
 
                if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -202,6 +204,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
                if (t->parms.i_key != key ||
+                   t->parms.iph.saddr != 0 ||
+                   t->parms.iph.daddr != 0 ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
@@ -687,7 +691,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        iph->daddr      =       fl4.daddr;
        iph->saddr      =       fl4.saddr;
        iph->ttl        =       ttl;
-       __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
+       __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
 
        iptunnel_xmit(skb, dev);
        return;
index 49797ed0917c6ff644e471b3d28293d0459b5ef4..56d079b63ad3fe552db8dab66e9e18ae9edf2b3c 100644 (file)
@@ -1661,7 +1661,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
        iph->protocol   =       IPPROTO_IPIP;
        iph->ihl        =       5;
        iph->tot_len    =       htons(skb->len);
-       ip_select_ident(skb, skb_dst(skb), NULL);
+       ip_select_ident(skb, NULL);
        ip_send_check(iph);
 
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
index b61b28a2121e712243f6a2f6b02277bd6180eba9..64d9d4345cb76ee5cfeb511b1fdb8ab536972a26 100644 (file)
@@ -768,7 +768,8 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
                           RT_SCOPE_UNIVERSE, sk->sk_protocol,
-                          inet_sk_flowi_flags(sk), faddr, saddr, 0, 0);
+                          inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
+                          sock_i_uid(sk));
 
        security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
index 402870fdfa0ee5dc121742c8feb4c56363e27624..2dfe804fd663822e7ebe7f3c1b41b1804b1a9015 100644 (file)
@@ -387,7 +387,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
                iph->check   = 0;
                iph->tot_len = htons(length);
                if (!iph->id)
-                       ip_select_ident(skb, &rt->dst, NULL);
+                       ip_select_ident(skb, NULL);
 
                iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
        }
@@ -571,9 +571,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
                           RT_SCOPE_UNIVERSE,
                           inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
-                          inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
-                           (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
-                          daddr, saddr, 0, 0);
+                          inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP,
+                          daddr, saddr, 0, 0,
+                          sock_i_uid(sk));
 
        if (!inet->hdrincl) {
                err = raw_probe_proto_opt(&fl4, msg);
index bea0e1f2df0e5f2d9e4e0edbbc3ec129cee69e05..271732355ee98538ec40e96f8f4962b93c65c4b8 100644 (file)
@@ -89,6 +89,7 @@
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <linux/slab.h>
+#include <linux/jhash.h>
 #include <net/dst.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
@@ -464,43 +465,57 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
        return neigh_create(&arp_tbl, pkey, dev);
 }
 
-/*
- * Peer allocation may fail only in serious out-of-memory conditions.  However
- * we still can generate some output.
- * Random ID selection looks a bit dangerous because we have no chances to
- * select ID being unique in a reasonable period of time.
- * But broken packet identifier may be better than no packet at all.
+#define IP_IDENTS_SZ 2048u
+struct ip_ident_bucket {
+       atomic_t        id;
+       u32             stamp32;
+};
+
+static struct ip_ident_bucket *ip_idents __read_mostly;
+
+/* In order to protect privacy, we add a perturbation to identifiers
+ * if one generator is seldom used. This makes hard for an attacker
+ * to infer how many packets were sent between two points in time.
  */
-static void ip_select_fb_ident(struct iphdr *iph)
+u32 ip_idents_reserve(u32 hash, int segs)
 {
-       static DEFINE_SPINLOCK(ip_fb_id_lock);
-       static u32 ip_fallback_id;
-       u32 salt;
+       struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
+       u32 old = ACCESS_ONCE(bucket->stamp32);
+       u32 now = (u32)jiffies;
+       u32 delta = 0;
 
-       spin_lock_bh(&ip_fb_id_lock);
-       salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
-       iph->id = htons(salt & 0xFFFF);
-       ip_fallback_id = salt;
-       spin_unlock_bh(&ip_fb_id_lock);
+       if (old != now && cmpxchg(&bucket->stamp32, old, now) == old) {
+               u64 x = prandom_u32();
+
+               x *= (now - old);
+               delta = (u32)(x >> 32);
+       }
+
+       return atomic_add_return(segs + delta, &bucket->id) - segs;
 }
+EXPORT_SYMBOL(ip_idents_reserve);
 
-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
+void __ip_select_ident(struct iphdr *iph, int segs)
 {
-       struct net *net = dev_net(dst->dev);
-       struct inet_peer *peer;
+       static u32 ip_idents_hashrnd __read_mostly;
+       static bool hashrnd_initialized = false;
+       u32 hash, id;
 
-       peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
-       if (peer) {
-               iph->id = htons(inet_getid(peer, more));
-               inet_putpeer(peer);
-               return;
+       if (unlikely(!hashrnd_initialized)) {
+               hashrnd_initialized = true;
+               get_random_bytes(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
        }
 
-       ip_select_fb_ident(iph);
+       hash = jhash_3words((__force u32)iph->daddr,
+                           (__force u32)iph->saddr,
+                           iph->protocol,
+                           ip_idents_hashrnd);
+       id = ip_idents_reserve(hash, segs);
+       iph->id = htons(id);
 }
 EXPORT_SYMBOL(__ip_select_ident);
 
-static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void __build_flow_key(struct flowi4 *fl4, struct sock *sk,
                             const struct iphdr *iph,
                             int oif, u8 tos,
                             u8 prot, u32 mark, int flow_flags)
@@ -516,11 +531,12 @@ static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
        flowi4_init_output(fl4, oif, mark, tos,
                           RT_SCOPE_UNIVERSE, prot,
                           flow_flags,
-                          iph->daddr, iph->saddr, 0, 0);
+                          iph->daddr, iph->saddr, 0, 0,
+                          sk ? sock_i_uid(sk) : 0);
 }
 
 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
-                              const struct sock *sk)
+                              struct sock *sk)
 {
        const struct iphdr *iph = ip_hdr(skb);
        int oif = skb->dev->ifindex;
@@ -531,7 +547,7 @@ static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
        __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
 }
 
-static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
+static void build_sk_flow_key(struct flowi4 *fl4, struct sock *sk)
 {
        const struct inet_sock *inet = inet_sk(sk);
        const struct ip_options_rcu *inet_opt;
@@ -545,11 +561,12 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
                           inet_sk_flowi_flags(sk),
-                          daddr, inet->inet_saddr, 0, 0);
+                          daddr, inet->inet_saddr, 0, 0,
+                          sock_i_uid(sk));
        rcu_read_unlock();
 }
 
-static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void ip_rt_build_flow_key(struct flowi4 *fl4, struct sock *sk,
                                 const struct sk_buff *skb)
 {
        if (skb)
@@ -992,20 +1009,21 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        const struct iphdr *iph = (const struct iphdr *) skb->data;
        struct flowi4 fl4;
        struct rtable *rt;
-       struct dst_entry *dst;
+       struct dst_entry *odst = NULL;
        bool new = false;
 
        bh_lock_sock(sk);
-       rt = (struct rtable *) __sk_dst_get(sk);
+       odst = sk_dst_get(sk);
 
-       if (sock_owned_by_user(sk) || !rt) {
+       if (sock_owned_by_user(sk) || !odst) {
                __ipv4_sk_update_pmtu(skb, sk, mtu);
                goto out;
        }
 
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
 
-       if (!__sk_dst_check(sk, 0)) {
+       rt = (struct rtable *)odst;
+       if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
                rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
                if (IS_ERR(rt))
                        goto out;
@@ -1015,8 +1033,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
 
        __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
 
-       dst = dst_check(&rt->dst, 0);
-       if (!dst) {
+       if (!dst_check(&rt->dst, 0)) {
                if (new)
                        dst_release(&rt->dst);
 
@@ -1028,10 +1045,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        }
 
        if (new)
-               __sk_dst_set(sk, &rt->dst);
+               sk_dst_set(sk, &rt->dst);
 
 out:
        bh_unlock_sock(sk);
+       dst_release(odst);
 }
 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
 
@@ -2291,6 +2309,11 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
            nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
                goto nla_put_failure;
 
+       if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
+           nla_put_u32(skb, RTA_UID,
+                       from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
+               goto nla_put_failure;
+
        error = rt->dst.error;
 
        if (rt_is_input_route(rt)) {
@@ -2340,6 +2363,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        int err;
        int mark;
        struct sk_buff *skb;
+       kuid_t uid;
 
        err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
        if (err < 0)
@@ -2367,6 +2391,10 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
        iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
        mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
+       if (tb[RTA_UID])
+               uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
+       else
+               uid = (iif ? INVALID_UID : current_uid());
 
        memset(&fl4, 0, sizeof(fl4));
        fl4.daddr = dst;
@@ -2374,6 +2402,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        fl4.flowi4_tos = rtm->rtm_tos;
        fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
        fl4.flowi4_mark = mark;
+       fl4.flowi4_uid = uid;
 
        if (iif) {
                struct net_device *dev;
@@ -2662,6 +2691,12 @@ int __init ip_rt_init(void)
 {
        int rc = 0;
 
+       ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
+       if (!ip_idents)
+               panic("IP: failed to allocate ip_idents\n");
+
+       prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
+
 #ifdef CONFIG_IP_ROUTE_CLASSID
        ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
        if (!ip_rt_acct)
index 5abb45e281bea0ffdcfd14d9e017742d7b19c71a..c94032b95c60142255c41cb911aca47aae9d5ec3 100644 (file)
@@ -353,7 +353,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
                           inet_sk_flowi_flags(sk),
                           (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
-                          ireq->loc_addr, th->source, th->dest);
+                          ireq->loc_addr, th->source, th->dest,
+                          sock_i_uid(sk));
        security_req_classify_flow(req, flowi4_to_flowi(&fl4));
        rt = ip_route_output_key(sock_net(sk), &fl4);
        if (IS_ERR(rt)) {
index 59e8bbc04d95ac24c0c0331c128700bfa78f4003..aabb52855662fbe141ded363b0e5f1eff23b135c 100644 (file)
@@ -1069,7 +1069,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (unlikely(tp->repair)) {
                if (tp->repair_queue == TCP_RECV_QUEUE) {
                        copied = tcp_send_rcvq(sk, msg, size);
-                       goto out;
+                       goto out_nopush;
                }
 
                err = -EINVAL;
@@ -1242,6 +1242,7 @@ wait_for_memory:
 out:
        if (copied)
                tcp_push(sk, flags, mss_now, tp->nonagle);
+out_nopush:
        release_sock(sk);
 
        if (copied + copied_syn)
@@ -3584,3 +3585,4 @@ restart:
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(tcp_nuke_addr);
index f8b30fb54a275b44010a5c05f5543bdc5fd79940..7aa7faa7c3dde7911c3b10ed4760aa93d7e7a4a4 100644 (file)
@@ -1076,7 +1076,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
        }
 
        /* D-SACK for already forgotten data... Do dumb counting. */
-       if (dup_sack && tp->undo_marker && tp->undo_retrans &&
+       if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
            !after(end_seq_0, prior_snd_una) &&
            after(end_seq_0, tp->undo_marker))
                tp->undo_retrans--;
@@ -1131,7 +1131,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
                        unsigned int new_len = (pkt_len / mss) * mss;
                        if (!in_sack && new_len < pkt_len) {
                                new_len += mss;
-                               if (new_len > skb->len)
+                               if (new_len >= skb->len)
                                        return 0;
                        }
                        pkt_len = new_len;
@@ -1155,7 +1155,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
 
        /* Account D-SACK for retransmitted packet. */
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
-               if (tp->undo_marker && tp->undo_retrans &&
+               if (tp->undo_marker && tp->undo_retrans > 0 &&
                    after(end_seq, tp->undo_marker))
                        tp->undo_retrans--;
                if (sacked & TCPCB_SACKED_ACKED)
@@ -1851,7 +1851,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
        tp->lost_out = 0;
 
        tp->undo_marker = 0;
-       tp->undo_retrans = 0;
+       tp->undo_retrans = -1;
 }
 
 void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2701,7 +2701,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 
        tp->prior_ssthresh = 0;
        tp->undo_marker = tp->snd_una;
-       tp->undo_retrans = tp->retrans_out;
+       tp->undo_retrans = tp->retrans_out ? : -1;
 
        if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
                if (!ece_ack)
index 1d62d94ac7e1b8963e5b5b5788c020fbc7ce371e..ef47406debbc9a9bb50d8c51f1321da0420e3430 100644 (file)
@@ -268,7 +268,7 @@ EXPORT_SYMBOL(tcp_v4_connect);
  * It can be called through tcp_release_cb() if socket was owned by user
  * at the time tcp_v4_err() was called to handle ICMP message.
  */
-static void tcp_v4_mtu_reduced(struct sock *sk)
+void tcp_v4_mtu_reduced(struct sock *sk)
 {
        struct dst_entry *dst;
        struct inet_sock *inet = inet_sk(sk);
@@ -298,6 +298,7 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
                tcp_simple_retransmit(sk);
        } /* else let the usual retransmit timer handle it */
 }
+EXPORT_SYMBOL(tcp_v4_mtu_reduced);
 
 static void do_redirect(struct sk_buff *skb, struct sock *sk)
 {
@@ -2143,6 +2144,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
        .compat_setsockopt = compat_ip_setsockopt,
        .compat_getsockopt = compat_ip_getsockopt,
 #endif
+       .mtu_reduced       = tcp_v4_mtu_reduced,
 };
 EXPORT_SYMBOL(ipv4_specific);
 
@@ -2868,7 +2870,6 @@ struct proto tcp_prot = {
        .sendpage               = tcp_sendpage,
        .backlog_rcv            = tcp_v4_do_rcv,
        .release_cb             = tcp_release_cb,
-       .mtu_reduced            = tcp_v4_mtu_reduced,
        .hash                   = inet_hash,
        .unhash                 = inet_unhash,
        .get_port               = inet_csk_get_port,
index 9e171160f4857073332fcee815390e858077c7a0..1fd846463d333d4422d9d7c8a1274abe6e779b48 100644 (file)
@@ -774,7 +774,7 @@ void tcp_release_cb(struct sock *sk)
                __sock_put(sk);
        }
        if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
-               sk->sk_prot->mtu_reduced(sk);
+               inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
                __sock_put(sk);
        }
 }
@@ -2035,9 +2035,7 @@ void tcp_send_loss_probe(struct sock *sk)
        if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
                goto rearm_timer;
 
-       /* Probe with zero data doesn't trigger fast recovery. */
-       if (skb->len > 0)
-               err = __tcp_retransmit_skb(sk, skb);
+       err = __tcp_retransmit_skb(sk, skb);
 
        /* Record snd_nxt for loss detection. */
        if (likely(!err))
@@ -2427,13 +2425,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                if (!tp->retrans_stamp)
                        tp->retrans_stamp = TCP_SKB_CB(skb)->when;
 
-               tp->undo_retrans += tcp_skb_pcount(skb);
-
                /* snd_nxt is stored to detect loss of retransmitted segment,
                 * see tcp_input.c tcp_sacktag_write_queue().
                 */
                TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
        }
+
+       if (tp->undo_retrans < 0)
+               tp->undo_retrans = 0;
+       tp->undo_retrans += tcp_skb_pcount(skb);
        return err;
 }
 
index 80fa2bfd7edef91e309119d2b5bda953342e1e1b..c042e529a11e1251f0c5f48af2a303ba2bd67cb5 100644 (file)
@@ -218,7 +218,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                         * This is:
                         *     (actual rate in segments) * baseRTT
                         */
-                       target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
+                       target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
+                       do_div(target_cwnd, rtt);
 
                        /* Calculate the difference between the window we had,
                         * and the window we would like to have. This quantity
index ac43cd747bcebdd83ce98fb26c3f7b1496ea3c69..b4d1858be55031183a7555591f43ed4dbcb4bfbe 100644 (file)
@@ -144,7 +144,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
 
                rtt = veno->minrtt;
 
-               target_cwnd = (tp->snd_cwnd * veno->basertt);
+               target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
                target_cwnd <<= V_PARAM_SHIFT;
                do_div(target_cwnd, rtt);
 
index c3075b552248b5853f642aed19f0a89d199cb7b0..58c5dbd5fb945aa66720746ebcd4437855fa7dff 100644 (file)
@@ -963,7 +963,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
                                   RT_SCOPE_UNIVERSE, sk->sk_protocol,
                                   inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
-                                  faddr, saddr, dport, inet->inet_sport);
+                                  faddr, saddr, dport, inet->inet_sport,
+                                  sock_i_uid(sk));
 
                security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
                rt = ip_route_output_flow(net, fl4, sk);
index b5663c37f089ed0afe33115bcbfad2555b8d0f48..e3f64831bc369fd2217b2ad4858ec64877b45f0a 100644 (file)
@@ -117,12 +117,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 
        top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
                0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
-       ip_select_ident(skb, dst->child, NULL);
 
        top_iph->ttl = ip4_dst_hoplimit(dst->child);
 
        top_iph->saddr = x->props.saddr.a4;
        top_iph->daddr = x->id.daddr.a4;
+       ip_select_ident(skb, NULL);
 
        return 0;
 }
index 263bd9ec652d0905c4f6eefb0bb59c4add8738b1..08b13803d617d0e241f0395a58e6e16fa7c79cec 100644 (file)
@@ -1175,6 +1175,9 @@ enum {
 #endif
        IPV6_SADDR_RULE_ORCHID,
        IPV6_SADDR_RULE_PREFIX,
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+       IPV6_SADDR_RULE_NOT_OPTIMISTIC,
+#endif
        IPV6_SADDR_RULE_MAX
 };
 
@@ -1202,6 +1205,15 @@ static inline int ipv6_saddr_preferred(int type)
        return 0;
 }
 
+static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
+{
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+       return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
+#else
+       return false;
+#endif
+}
+
 static int ipv6_get_saddr_eval(struct net *net,
                               struct ipv6_saddr_score *score,
                               struct ipv6_saddr_dst *dst,
@@ -1262,10 +1274,16 @@ static int ipv6_get_saddr_eval(struct net *net,
                score->scopedist = ret;
                break;
        case IPV6_SADDR_RULE_PREFERRED:
+           {
                /* Rule 3: Avoid deprecated and optimistic addresses */
+               u8 avoid = IFA_F_DEPRECATED;
+
+               if (!ipv6_use_optimistic_addr(score->ifa->idev))
+                       avoid |= IFA_F_OPTIMISTIC;
                ret = ipv6_saddr_preferred(score->addr_type) ||
-                     !(score->ifa->flags & (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC));
+                     !(score->ifa->flags & avoid);
                break;
+           }
 #ifdef CONFIG_IPV6_MIP6
        case IPV6_SADDR_RULE_HOA:
            {
@@ -1313,6 +1331,14 @@ static int ipv6_get_saddr_eval(struct net *net,
                        ret = score->ifa->prefix_len;
                score->matchlen = ret;
                break;
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+       case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
+               /* Optimistic addresses still have lower precedence than other
+                * preferred addresses.
+                */
+               ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
+               break;
+#endif
        default:
                ret = 0;
        }
@@ -2719,8 +2745,18 @@ static void init_loopback(struct net_device *dev)
                        if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
                                continue;
 
-                       if (sp_ifa->rt)
-                               continue;
+                       if (sp_ifa->rt) {
+                               /* This dst has been added to garbage list when
+                                * lo device down, release this obsolete dst and
+                                * reallocate a new router for ifa.
+                                */
+                               if (sp_ifa->rt->dst.obsolete > 0) {
+                                       ip6_rt_put(sp_ifa->rt);
+                                       sp_ifa->rt = NULL;
+                               } else {
+                                       continue;
+                               }
+                       }
 
                        sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
 
@@ -3281,8 +3317,15 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp)
         * Optimistic nodes can start receiving
         * Frames right away
         */
-       if (ifp->flags & IFA_F_OPTIMISTIC)
+       if (ifp->flags & IFA_F_OPTIMISTIC) {
                ip6_ins_rt(ifp->rt);
+               if (ipv6_use_optimistic_addr(idev)) {
+                       /* Because optimistic nodes can use this address,
+                        * notify listeners. If DAD fails, RTM_DELADDR is sent.
+                        */
+                       ipv6_ifa_notify(RTM_NEWADDR, ifp);
+               }
+       }
 
        addrconf_dad_kick(ifp);
 out:
@@ -4228,6 +4271,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
        array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
+       array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
 #endif
 #ifdef CONFIG_IPV6_MROUTE
        array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
@@ -4962,6 +5006,14 @@ static struct addrconf_sysctl_table
                        .proc_handler   = proc_dointvec,
 
                },
+               {
+                       .procname       = "use_optimistic",
+                       .data           = &ipv6_devconf.use_optimistic,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec,
+
+               },
 #endif
 #ifdef CONFIG_IPV6_MROUTE
                {
index a4cfde67fcb7dd664bb9cde4f41fab2f406b30d6..d29ae19ae698f18b8c5c676dd6c3727035ffebcc 100644 (file)
@@ -694,6 +694,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
                fl6.flowi6_mark = sk->sk_mark;
                fl6.fl6_dport = inet->inet_dport;
                fl6.fl6_sport = inet->inet_sport;
+               fl6.flowi6_uid = sock_i_uid(sk);
                security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
                final_p = fl6_update_dst(&fl6, np->opt, &final);
index bb02e176cb70537ac7cdc01a03c0c641ab672741..b903e19463c949b6e0dda0dfaae5fe44246b9d66 100644 (file)
@@ -630,7 +630,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, 0, 0);
        else
-               ip6_update_pmtu(skb, net, info, 0, 0);
+               ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
        xfrm_state_put(x);
 }
 
index 1aef8b22ba73494a7edc0f7edba0a2541d057420..b58e3f246438ba413d71c0bdd8710a2db3d99de0 100644 (file)
@@ -162,6 +162,7 @@ ipv4_connected:
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = inet->inet_dport;
        fl6.fl6_sport = inet->inet_sport;
+       fl6.flowi6_uid = sock_i_uid(sk);
 
        if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
                fl6.flowi6_oif = np->mcast_oif;
index 40ffd72243a4f2d1ec9e5da494b3f2650dd027a6..fdc81cb29e80c875154f48257ce45c5393b7f8de 100644 (file)
@@ -449,7 +449,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, 0, 0);
        else
-               ip6_update_pmtu(skb, net, info, 0, 0);
+               ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
        xfrm_state_put(x);
 }
 
index 84bdcd06dd34bcb89bc2d13a1750c7d0f19dec40..c1b611cc55ae8acde4196b3dca82d6006f61944a 100644 (file)
@@ -90,7 +90,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct net *net = dev_net(skb->dev);
 
        if (type == ICMPV6_PKT_TOOBIG)
-               ip6_update_pmtu(skb, net, info, 0, 0);
+               ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
        else if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, 0, 0);
 
index f1493138d21e237de7fdeb150b3053962690f71a..65a46058c85448b00901e26e9f49d286c7f96558 100644 (file)
@@ -84,6 +84,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
        fl6->flowi6_mark = inet_rsk(req)->ir_mark;
        fl6->fl6_dport = inet_rsk(req)->rmt_port;
        fl6->fl6_sport = inet_rsk(req)->loc_port;
+       fl6->flowi6_uid = sock_i_uid(sk);
        security_req_classify_flow(req, flowi6_to_flowi(fl6));
 
        dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
@@ -211,6 +212,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
        fl6->flowi6_mark = sk->sk_mark;
        fl6->fl6_sport = inet->inet_sport;
        fl6->fl6_dport = inet->inet_dport;
+       fl6->flowi6_uid = sock_i_uid(sk);
        security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
 
        final_p = fl6_update_dst(fl6, np->opt, &final);
index 7dca7c43fdf14f8a5d61afd2beced09fe8e2c211..6c20f4731f1a0322e9dd1416e670b2d3e1bcd515 100644 (file)
@@ -787,7 +787,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-       fl6.flowi6_proto = IPPROTO_IPIP;
+       fl6.flowi6_proto = IPPROTO_GRE;
 
        dsfield = ipv4_get_dsfield(iph);
 
@@ -837,7 +837,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-       fl6.flowi6_proto = IPPROTO_IPV6;
+       fl6.flowi6_proto = IPPROTO_GRE;
 
        dsfield = ipv6_get_dsfield(ipv6h);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
@@ -962,8 +962,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
        else
                dev->flags &= ~IFF_POINTOPOINT;
 
-       dev->iflink = p->link;
-
        /* Precalculate GRE options length */
        if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
                if (t->parms.o_flags&GRE_CSUM)
@@ -1267,6 +1265,8 @@ static int ip6gre_tunnel_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       dev->iflink = tunnel->parms.link;
+
        return 0;
 }
 
@@ -1282,7 +1282,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
        dev_hold(dev);
 }
 
-
 static struct inet6_protocol ip6gre_protocol __read_mostly = {
        .handler     = ip6gre_rcv,
        .err_handler = ip6gre_err,
@@ -1458,6 +1457,8 @@ static int ip6gre_tap_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       dev->iflink = tunnel->parms.link;
+
        return 0;
 }
 
index ffa8d295c56c8294a42cdc090c1903c59646c47b..071edcba4158ede71b98c6c3380e85a210b8b240 100644 (file)
@@ -540,6 +540,23 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        skb_copy_secmark(to, from);
 }
 
+static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+{
+       static u32 ip6_idents_hashrnd __read_mostly;
+       static bool hashrnd_initialized = false;
+       u32 hash, id;
+
+       if (unlikely(!hashrnd_initialized)) {
+               hashrnd_initialized = true;
+               get_random_bytes(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
+       }
+       hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
+       hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
+
+       id = ip_idents_reserve(hash, 1);
+       fhdr->identification = htonl(id);
+}
+
 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 {
        struct sk_buff *frag;
index a0ecdf596f2fbf0ccec406ea092ee045458df5e5..14f46af17704f813a5c86b36e8c6521e23698b81 100644 (file)
@@ -265,9 +265,6 @@ static int ip6_tnl_create2(struct net_device *dev)
        int err;
 
        t = netdev_priv(dev);
-       err = ip6_tnl_dev_init(dev);
-       if (err < 0)
-               goto out;
 
        err = register_netdevice(dev);
        if (err < 0)
@@ -1433,6 +1430,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 
 
 static const struct net_device_ops ip6_tnl_netdev_ops = {
+       .ndo_init       = ip6_tnl_dev_init,
        .ndo_uninit     = ip6_tnl_dev_uninit,
        .ndo_start_xmit = ip6_tnl_xmit,
        .ndo_do_ioctl   = ip6_tnl_ioctl,
@@ -1514,16 +1512,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
        struct ip6_tnl *t = netdev_priv(dev);
        struct net *net = dev_net(dev);
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
-       int err = ip6_tnl_dev_init_gen(dev);
-
-       if (err)
-               return err;
 
        t->parms.proto = IPPROTO_IPV6;
        dev_hold(dev);
 
-       ip6_tnl_link_config(t);
-
        rcu_assign_pointer(ip6n->tnls_wc[0], t);
        return 0;
 }
index 7af5aee75d982327e7b258f72091780e83a01cc4..a1beb59a841eb1c03778df0cba1bf376c3bbd64c 100644 (file)
@@ -78,7 +78,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, 0, 0);
        else
-               ip6_update_pmtu(skb, net, info, 0, 0);
+               ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
        xfrm_state_put(x);
 }
 
index 3d2c81a66d6a10e150ff0c2b0190d1f08b073a6d..a5d465105b69375cab96217d0afd2485da303261 100644 (file)
@@ -6,29 +6,6 @@
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
 
-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
-{
-       static atomic_t ipv6_fragmentation_id;
-       int ident;
-
-#if IS_ENABLED(CONFIG_IPV6)
-       if (rt && !(rt->dst.flags & DST_NOPEER)) {
-               struct inet_peer *peer;
-               struct net *net;
-
-               net = dev_net(rt->dst.dev);
-               peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
-               if (peer) {
-                       fhdr->identification = htonl(inet_getid(peer, 0));
-                       inet_putpeer(peer);
-                       return;
-               }
-       }
-#endif
-       ident = atomic_inc_return(&ipv6_fragmentation_id);
-       fhdr->identification = htonl(ident);
-}
-EXPORT_SYMBOL(ipv6_select_ident);
 
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 {
index 8d1c2064056365e64775f2a8557cd00669293a99..5f0d294b36cd206549792ac8c8f88d2735fb8a14 100644 (file)
@@ -159,6 +159,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        fl6.saddr = np->saddr;
        fl6.daddr = *daddr;
        fl6.flowi6_mark = sk->sk_mark;
+       fl6.flowi6_uid = sock_i_uid(sk);
        fl6.fl6_icmp_type = user_icmph.icmp6_type;
        fl6.fl6_icmp_code = user_icmph.icmp6_code;
        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
index dff1f4b2c668f94fcb48582ce77975e9b214091b..a4bf16d0d3d0912580979ff11e7448bfe896ef5d 100644 (file)
@@ -761,6 +761,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
        memset(&fl6, 0, sizeof(fl6));
 
        fl6.flowi6_mark = sk->sk_mark;
+       fl6.flowi6_uid = sock_i_uid(sk);
 
        if (sin6) {
                if (addr_len < SIN6_LEN_RFC2133)
index 18e27efa1bc31db0ea47eef6d109c971ede82a69..03b1e6fde278509719cdde2a993ef80f7e598ca1 100644 (file)
@@ -1149,7 +1149,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
 }
 
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
-                    int oif, u32 mark)
+                    int oif, u32 mark, kuid_t uid)
 {
        const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
        struct dst_entry *dst;
@@ -1162,6 +1162,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
+       fl6.flowi6_uid = uid;
 
        dst = ip6_route_output(net, NULL, &fl6);
        if (!dst->error)
@@ -1173,7 +1174,7 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
 {
        ip6_update_pmtu(skb, sock_net(sk), mtu,
-                       sk->sk_bound_dev_if, sk->sk_mark);
+                       sk->sk_bound_dev_if, sk->sk_mark, sock_i_uid(sk));
 }
 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
 
@@ -2250,6 +2251,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
        [RTA_PRIORITY]          = { .type = NLA_U32 },
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
+       [RTA_UID]               = { .type = NLA_U32 },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2636,6 +2638,12 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
        if (tb[RTA_OIF])
                oif = nla_get_u32(tb[RTA_OIF]);
 
+       if (tb[RTA_UID])
+               fl6.flowi6_uid = make_kuid(current_user_ns(),
+                                          nla_get_u32(tb[RTA_UID]));
+       else
+               fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
+
        if (iif) {
                struct net_device *dev;
                int flags = 0;
index 540d58921007bdcc31900ac4dc4722e9eb5351d4..4ddf67c6355bc1662016caba331b785d7cb8e3b4 100644 (file)
@@ -101,19 +101,19 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
        for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) {
                if (local == t->parms.iph.saddr &&
                    remote == t->parms.iph.daddr &&
-                   (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
+                   (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
                    (t->dev->flags & IFF_UP))
                        return t;
        }
        for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) {
                if (remote == t->parms.iph.daddr &&
-                   (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
+                   (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
                    (t->dev->flags & IFF_UP))
                        return t;
        }
        for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) {
                if (local == t->parms.iph.saddr &&
-                   (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
+                   (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
                    (t->dev->flags & IFF_UP))
                        return t;
        }
@@ -919,7 +919,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                iph->ttl        =       iph6->hop_limit;
 
        skb->ip_summed = CHECKSUM_NONE;
-       ip_select_ident(skb, skb_dst(skb), NULL);
+       ip_select_ident(skb, NULL);
        iptunnel_xmit(skb, dev);
        return NETDEV_TX_OK;
 
index 1efbc6f44a6a73d727ffdb0c1a2e1d71051bcaf5..ba8622daffd7eec4de498f55c4c242af39be0e99 100644 (file)
@@ -243,6 +243,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                fl6.flowi6_mark = ireq->ir_mark;
                fl6.fl6_dport = inet_rsk(req)->rmt_port;
                fl6.fl6_sport = inet_sk(sk)->inet_sport;
+               fl6.flowi6_uid = sock_i_uid(sk);
                security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
                dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
index 747374c9887639a931c3969c0feebfa85cbdd330..0acad490d9d77c2152db760e7f5b490d6456e115 100644 (file)
@@ -252,6 +252,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = usin->sin6_port;
        fl6.fl6_sport = inet->inet_sport;
+       fl6.flowi6_uid = sock_i_uid(sk);
 
        final_p = fl6_update_dst(&fl6, np->opt, &final);
 
@@ -1653,6 +1654,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
        .compat_setsockopt = compat_ipv6_setsockopt,
        .compat_getsockopt = compat_ipv6_getsockopt,
 #endif
+       .mtu_reduced       = tcp_v6_mtu_reduced,
 };
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -1684,6 +1686,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
        .compat_setsockopt = compat_ipv6_setsockopt,
        .compat_getsockopt = compat_ipv6_getsockopt,
 #endif
+       .mtu_reduced       = tcp_v4_mtu_reduced,
 };
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -1921,7 +1924,6 @@ struct proto tcpv6_prot = {
        .sendpage               = tcp_sendpage,
        .backlog_rcv            = tcp_v6_do_rcv,
        .release_cb             = tcp_release_cb,
-       .mtu_reduced            = tcp_v6_mtu_reduced,
        .hash                   = tcp_v6_hash,
        .unhash                 = inet_unhash,
        .get_port               = inet_csk_get_port,
index c46539a1df565c3abf4dd87926f21b58e371e567..e06772c4bccbefc2e9396e1b10171aef9debfe46 100644 (file)
@@ -1151,6 +1151,7 @@ do_udp_sendmsg:
                fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
 
        fl6.flowi6_mark = sk->sk_mark;
+       fl6.flowi6_uid = sock_i_uid(sk);
 
        if (msg->msg_controllen) {
                opt = &opt_space;
index 9a0e5874e73e97f328f9f69a554e2e4e92c068d8..c3ae2411650c703171e61fda811add6ad281c614 100644 (file)
@@ -756,7 +756,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
        /* If PMTU discovery was enabled, use the MTU that was discovered */
        dst = sk_dst_get(tunnel->sock);
        if (dst != NULL) {
-               u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
+               u32 pmtu = dst_mtu(dst);
+
                if (pmtu != 0)
                        session->mtu = session->mru = pmtu -
                                PPPOL2TP_HEADER_OVERHEAD;
@@ -1365,7 +1366,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
        int err;
 
        if (level != SOL_PPPOL2TP)
-               return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+               return -EINVAL;
 
        if (optlen < sizeof(int))
                return -EINVAL;
@@ -1491,7 +1492,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
        struct pppol2tp_session *ps;
 
        if (level != SOL_PPPOL2TP)
-               return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+               return -EINVAL;
 
        if (get_user(len, optlen))
                return -EFAULT;
index fc94937cd7b356a87ff91b0114f0663ac5105143..e606e4a113e127e571c095f8071e7ac1ddc2fbbc 100644 (file)
@@ -4395,8 +4395,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        rcu_read_unlock();
 
        if (bss->wmm_used && bss->uapsd_supported &&
-           (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) &&
-           sdata->wmm_acm != 0xff) {
+           (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
                assoc_data->uapsd = true;
                ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
        } else {
index a02bef35b134e28e6d5ad8d6da38c505c309e756..d68d6cfac3b5a98385789cf81907740ab3a47477 100644 (file)
@@ -448,7 +448,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif,
         */
        if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) {
                u32 basic_rates = vif->bss_conf.basic_rates;
-               s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0;
+               s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0;
 
                rate = &sband->bitrates[rates[0].idx];
 
index fae73b0ef14b9b40be4a7de5b44e8ccabb0a7e96..85bc6d498b46f59b76658881cf8e55aac2df6e34 100644 (file)
@@ -1585,11 +1585,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        sc = le16_to_cpu(hdr->seq_ctrl);
        frag = sc & IEEE80211_SCTL_FRAG;
 
-       if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
-                  is_multicast_ether_addr(hdr->addr1))) {
-               /* not fragmented */
+       if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+               goto out;
+
+       if (is_multicast_ether_addr(hdr->addr1)) {
+               rx->local->dot11MulticastReceivedFrameCount++;
                goto out;
        }
+
        I802_DEBUG_INC(rx->local->rx_handlers_fragments);
 
        if (skb_linearize(rx->skb))
@@ -1682,10 +1685,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
  out:
        if (rx->sta)
                rx->sta->rx_packets++;
-       if (is_multicast_ether_addr(hdr->addr1))
-               rx->local->dot11MulticastReceivedFrameCount++;
-       else
-               ieee80211_led_rx(rx->local);
+       ieee80211_led_rx(rx->local);
        return RX_CONTINUE;
 }
 
index d566cdba24ecf3ac3dddc2d02d2bfe6cd4e03ec4..10eea23260225368b7319f579382e65946043261 100644 (file)
@@ -398,6 +398,9 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
        if (ieee80211_has_order(hdr->frame_control))
                return TX_CONTINUE;
 
+       if (ieee80211_is_probe_req(hdr->frame_control))
+               return TX_CONTINUE;
+
        /* no stations in PS mode */
        if (!atomic_read(&ps->num_sta_ps))
                return TX_CONTINUE;
@@ -447,6 +450,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
 {
        struct sta_info *sta = tx->sta;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
        struct ieee80211_local *local = tx->local;
 
        if (unlikely(!sta))
@@ -457,6 +461,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
                     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
                int ac = skb_get_queue_mapping(tx->skb);
 
+               /* only deauth, disassoc and action are bufferable MMPDUs */
+               if (ieee80211_is_mgmt(hdr->frame_control) &&
+                   !ieee80211_is_deauth(hdr->frame_control) &&
+                   !ieee80211_is_disassoc(hdr->frame_control) &&
+                   !ieee80211_is_action(hdr->frame_control)) {
+                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+                       return TX_CONTINUE;
+               }
+
                ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
                       sta->sta.addr, sta->sta.aid, ac);
                if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -514,22 +527,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
-
        if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
                return TX_CONTINUE;
-
-       /* only deauth, disassoc and action are bufferable MMPDUs */
-       if (ieee80211_is_mgmt(hdr->frame_control) &&
-           !ieee80211_is_deauth(hdr->frame_control) &&
-           !ieee80211_is_disassoc(hdr->frame_control) &&
-           !ieee80211_is_action(hdr->frame_control)) {
-               if (tx->flags & IEEE80211_TX_UNICAST)
-                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
-               return TX_CONTINUE;
-       }
-
        if (tx->flags & IEEE80211_TX_UNICAST)
                return ieee80211_tx_h_unicast_ps_buf(tx);
        else
index a083bda322b6058cf0ba6b65b604bb046c8a79fd..90e756cf6e524eebb996bbf6118265c007389f11 100644 (file)
@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
                        ip_vs_control_del(cp);
 
                if (cp->flags & IP_VS_CONN_F_NFCT) {
-                       ip_vs_conn_drop_conntrack(cp);
                        /* Do not access conntracks during subsys cleanup
                         * because nf_conntrack_find_get can not be used after
                         * conntrack cleanup for the net.
index 663042e84e81ac7f89ffcbc6e458c9b475aacf8d..26b9a986a87f4fc23491b287a17d07fd847a3b01 100644 (file)
@@ -1898,7 +1898,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        {
                .hook           = ip_vs_local_reply6,
                .owner          = THIS_MODULE,
-               .pf             = NFPROTO_IPV4,
+               .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP6_PRI_NAT_DST + 1,
        },
index c47444e4cf8ccc9977fa0622689b4fb55799ff4b..1692e75347593c04bd8cd93aa6d0e6887437eaf2 100644 (file)
@@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        iph->daddr              =       cp->daddr.ip;
        iph->saddr              =       saddr;
        iph->ttl                =       old_iph->ttl;
-       ip_select_ident(skb, &rt->dst, NULL);
+       ip_select_ident(skb, NULL);
 
        /* Another hack: avoid icmp_send in ip_fragment */
        skb->local_df = 1;
@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        iph->nexthdr            =       IPPROTO_IPV6;
        iph->payload_len        =       old_iph->payload_len;
        be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
-       iph->priority           =       old_iph->priority;
        memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
+       ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
        iph->daddr = cp->daddr.in6;
        iph->saddr = saddr;
        iph->hop_limit          =       old_iph->hop_limit;
index 4d4d8f1d01fcbfb3e42c9e2cf7432578df4df9da..7dcc376eea5f9205d1abf76f62a8d58eb54bc788 100644 (file)
@@ -1043,6 +1043,12 @@ static int tcp_packet(struct nf_conn *ct,
                        nf_ct_kill_acct(ct, ctinfo, skb);
                        return NF_ACCEPT;
                }
+               /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
+                * pickup with loose=1. Avoid large ESTABLISHED timeout.
+                */
+               if (new_state == TCP_CONNTRACK_ESTABLISHED &&
+                   timeout > timeouts[TCP_CONNTRACK_UNACK])
+                       timeout = timeouts[TCP_CONNTRACK_UNACK];
        } else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
                   && (old_state == TCP_CONNTRACK_SYN_RECV
                       || old_state == TCP_CONNTRACK_ESTABLISHED)
index 038eee5c8f8548787bff468c40256d52bb6655fd..2bb801e3ee8c488243e7d50cf47057c94fc394d3 100644 (file)
@@ -487,6 +487,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
        return i->status & IPS_NAT_MASK ? 1 : 0;
 }
 
+static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
+{
+       struct nf_conn_nat *nat = nfct_nat(ct);
+
+       if (nf_nat_proto_remove(ct, data))
+               return 1;
+
+       if (!nat || !nat->ct)
+               return 0;
+
+       /* This netns is being destroyed, and conntrack has nat null binding.
+        * Remove it from bysource hash, as the table will be freed soon.
+        *
+        * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
+        * will delete entry from already-freed table.
+        */
+       if (!del_timer(&ct->timeout))
+               return 1;
+
+       spin_lock_bh(&nf_nat_lock);
+       hlist_del_rcu(&nat->bysource);
+       ct->status &= ~IPS_NAT_DONE_MASK;
+       nat->ct = NULL;
+       spin_unlock_bh(&nf_nat_lock);
+
+       add_timer(&ct->timeout);
+
+       /* don't delete conntrack.  Although that would make things a lot
+        * simpler, we'd end up flushing all conntracks on nat rmmod.
+        */
+       return 0;
+}
+
 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
 {
        struct nf_nat_proto_clean clean = {
@@ -749,7 +782,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
 {
        struct nf_nat_proto_clean clean = {};
 
-       nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
+       nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
        synchronize_rcu();
        nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
 }
index 962e9792e3179997db98a448a76fc909432d841f..216261dd32aeca3f9ec9b7027484e7f2bcabeba1 100644 (file)
@@ -45,7 +45,8 @@
 #define NFULNL_NLBUFSIZ_DEFAULT        NLMSG_GOODSIZE
 #define NFULNL_TIMEOUT_DEFAULT         100     /* every second */
 #define NFULNL_QTHRESH_DEFAULT         100     /* 100 packets */
-#define NFULNL_COPY_RANGE_MAX  0xFFFF  /* max packet size is limited by 16-bit struct nfattr nfa_len field */
+/* max packet size is limited by 16-bit struct nfattr nfa_len field */
+#define NFULNL_COPY_RANGE_MAX  (0xFFFF - NLA_HDRLEN)
 
 #define PRINTR(x, args...)     do { if (net_ratelimit()) \
                                     printk(x, ## args); } while (0);
@@ -255,6 +256,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
 
        case NFULNL_COPY_PACKET:
                inst->copy_mode = mode;
+               if (range == 0)
+                       range = NFULNL_COPY_RANGE_MAX;
                inst->copy_range = min_t(unsigned int,
                                         range, NFULNL_COPY_RANGE_MAX);
                break;
@@ -345,26 +348,25 @@ nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size)
        return skb;
 }
 
-static int
+static void
 __nfulnl_send(struct nfulnl_instance *inst)
 {
-       int status = -1;
-
        if (inst->qlen > 1) {
                struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
                                                 NLMSG_DONE,
                                                 sizeof(struct nfgenmsg),
                                                 0);
-               if (!nlh)
+               if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n",
+                             inst->skb->len, skb_tailroom(inst->skb))) {
+                       kfree_skb(inst->skb);
                        goto out;
+               }
        }
-       status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
-                                  MSG_DONTWAIT);
-
+       nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
+                         MSG_DONTWAIT);
+out:
        inst->qlen = 0;
        inst->skb = NULL;
-out:
-       return status;
 }
 
 static void
@@ -647,7 +649,8 @@ nfulnl_log_packet(struct net *net,
                + nla_total_size(sizeof(u_int32_t))     /* gid */
                + nla_total_size(plen)                  /* prefix */
                + nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
-               + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp));
+               + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
+               + nla_total_size(sizeof(struct nfgenmsg));      /* NLMSG_DONE */
 
        if (in && skb_mac_header_was_set(skb)) {
                size +=   nla_total_size(skb->dev->hard_header_len)
@@ -676,8 +679,7 @@ nfulnl_log_packet(struct net *net,
                break;
 
        case NFULNL_COPY_PACKET:
-               if (inst->copy_range == 0
-                   || inst->copy_range > skb->len)
+               if (inst->copy_range > skb->len)
                        data_len = skb->len;
                else
                        data_len = inst->copy_range;
@@ -690,8 +692,7 @@ nfulnl_log_packet(struct net *net,
                goto unlock_and_release;
        }
 
-       if (inst->skb &&
-           size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) {
+       if (inst->skb && size > skb_tailroom(inst->skb)) {
                /* either the queue len is too high or we don't have
                 * enough room in the skb left. flush to userspace. */
                __nfulnl_flush(inst);
index 5352b2d2d5bf644cffd04ed5a571924f623f65d5..2b8199f68785d1a8f17cc65049826976d921f7c6 100644 (file)
@@ -227,22 +227,23 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
        spin_unlock_bh(&queue->lock);
 }
 
-static void
+static int
 nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
 {
        int i, j = 0;
        int plen = 0; /* length of skb->head fragment */
+       int ret;
        struct page *page;
        unsigned int offset;
 
        /* dont bother with small payloads */
-       if (len <= skb_tailroom(to)) {
-               skb_copy_bits(from, 0, skb_put(to, len), len);
-               return;
-       }
+       if (len <= skb_tailroom(to))
+               return skb_copy_bits(from, 0, skb_put(to, len), len);
 
        if (hlen) {
-               skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
+               ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
+               if (unlikely(ret))
+                       return ret;
                len -= hlen;
        } else {
                plen = min_t(int, skb_headlen(from), len);
@@ -260,6 +261,11 @@ nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
        to->len += len + plen;
        to->data_len += len + plen;
 
+       if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
+               skb_tx_error(from);
+               return -ENOMEM;
+       }
+
        for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
                if (!len)
                        break;
@@ -270,6 +276,8 @@ nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
                j++;
        }
        skb_shinfo(to)->nr_frags = j;
+
+       return 0;
 }
 
 static int nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet)
@@ -355,13 +363,16 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
 
        skb = nfnetlink_alloc_skb(&init_net, size, queue->peer_portid,
                                  GFP_ATOMIC);
-       if (!skb)
+       if (!skb) {
+               skb_tx_error(entskb);
                return NULL;
+       }
 
        nlh = nlmsg_put(skb, 0, 0,
                        NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
                        sizeof(struct nfgenmsg), 0);
        if (!nlh) {
+               skb_tx_error(entskb);
                kfree_skb(skb);
                return NULL;
        }
@@ -481,13 +492,15 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
                nla->nla_type = NFQA_PAYLOAD;
                nla->nla_len = nla_attr_size(data_len);
 
-               nfqnl_zcopy(skb, entskb, data_len, hlen);
+               if (nfqnl_zcopy(skb, entskb, data_len, hlen))
+                       goto nla_put_failure;
        }
 
        nlh->nlmsg_len = skb->len;
        return skb;
 
 nla_put_failure:
+       skb_tx_error(entskb);
        kfree_skb(skb);
        net_err_ratelimited("nf_queue: error creating packet message\n");
        return NULL;
index be34adde692fb4918268d23715913c0273157ec9..afe41178c9fb7f8ff00533c5b148a6b0ff068346 100644 (file)
@@ -500,7 +500,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
                while (nlk->cb != NULL && netlink_dump_space(nlk)) {
                        err = netlink_dump(sk);
                        if (err < 0) {
-                               sk->sk_err = err;
+                               sk->sk_err = -err;
                                sk->sk_error_report(sk);
                                break;
                        }
@@ -571,7 +571,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
         * after validation, the socket and the ring may only be used by a
         * single process, otherwise we fall back to copying.
         */
-       if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
+       if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
            atomic_read(&nlk->mapped) > 1)
                excl = false;
 
@@ -2272,7 +2272,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
                ret = netlink_dump(sk);
                if (ret) {
-                       sk->sk_err = ret;
+                       sk->sk_err = -ret;
                        sk->sk_error_report(sk);
                }
        }
index 894b6cbdd9295841e6782268b8743fcb63358391..c4779ca590322115ae3eadc3daa7ef7583b02997 100644 (file)
@@ -40,6 +40,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 
 static int make_writable(struct sk_buff *skb, int write_len)
 {
+       if (!pskb_may_pull(skb, write_len))
+               return -ENOMEM;
+
        if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
                return 0;
 
@@ -68,6 +71,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
 
        vlan_set_encap_proto(skb, vhdr);
        skb->mac_header += VLAN_HLEN;
+       if (skb_network_offset(skb) < ETH_HLEN)
+               skb_set_network_header(skb, ETH_HLEN);
        skb_reset_mac_len(skb);
 
        return 0;
index e8b5a0dfca21bc4920d9272db57c95174dc153e3..81b4b816f13132b8bddc1cd3ca28a7bdad835066 100644 (file)
@@ -565,6 +565,7 @@ static void init_prb_bdqc(struct packet_sock *po,
        p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
        p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 
+       p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
        prb_init_ft_ops(p1, req_u);
        prb_setup_retire_blk_timer(po, tx_ring);
        prb_open_block(p1, pbd);
@@ -1803,6 +1804,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                        if ((int)snaplen < 0)
                                snaplen = 0;
                }
+       } else if (unlikely(macoff + snaplen >
+                           GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
+               u32 nval;
+
+               nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
+               pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
+                           snaplen, nval, macoff);
+               snaplen = nval;
+               if (unlikely((int)snaplen < 0)) {
+                       snaplen = 0;
+                       macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
+               }
        }
        spin_lock(&sk->sk_receive_queue.lock);
        h.raw = packet_current_rx_frame(po, skb,
@@ -3642,6 +3655,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
                if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
                        goto out;
+               if (po->tp_version >= TPACKET_V3 &&
+                   (int)(req->tp_block_size -
+                         BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
+                       goto out;
                if (unlikely(req->tp_frame_size < po->tp_hdrlen +
                                        po->tp_reserve))
                        goto out;
index 1035fa2d909c7f18c100266c85ec07bce4a73a97..ca086c0c2c085cb3f3ff00834644d05d8769e7e3 100644 (file)
@@ -29,6 +29,7 @@ struct tpacket_kbdq_core {
        char            *pkblk_start;
        char            *pkblk_end;
        int             kblk_size;
+       unsigned int    max_frame_len;
        unsigned int    knum_blocks;
        uint64_t        knxt_seq_num;
        char            *prev;
index 28a7e225d8cc00909cc5a27c708e8215d1321ecf..c4c257979eee5a5bd7826be6d53e8d1e340115a2 100755 (executable)
@@ -21,7 +21,6 @@
 #include <linux/rfkill.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <asm/gpio.h>
 #include <linux/delay.h>
 #include <linux/rfkill-bt.h>
 #include <linux/rfkill-wlan.h>
index 9e9c5e716af7ab168a27c77ebc7b9d77aa74014b..986643d32b6ff377cc3c5b2d4f9cc42ac9c324a3 100755 (executable)
@@ -20,7 +20,6 @@
 #include <linux/rfkill.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <asm/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/delay.h>
 #include <linux/rfkill-wlan.h>
index 229b3c3fb6c98d7bc63404bcfa8e4c0dd43f396c..ca4a1a1b8e693b0e743e64a5d3410d133ff7ae17 100644 (file)
@@ -1213,6 +1213,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
        asoc->c = new->c;
        asoc->peer.rwnd = new->peer.rwnd;
        asoc->peer.sack_needed = new->peer.sack_needed;
+       asoc->peer.auth_capable = new->peer.auth_capable;
        asoc->peer.i = new->peer.i;
        sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
                         asoc->peer.i.initial_tsn, GFP_ATOMIC);
@@ -1658,6 +1659,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
         * ack chunk whose serial number matches that of the request.
         */
        list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
+               if (sctp_chunk_pending(ack))
+                       continue;
                if (ack->subh.addip_hdr->serial == serial) {
                        sctp_chunk_hold(ack);
                        return ack;
index 7a19117254db8e26fb2d7d5e1f21ac4c58a81266..bc2fae7e67be6d0625058a80190ba1b737fffe97 100644 (file)
@@ -874,8 +874,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
                list_add(&cur_key->key_list, sh_keys);
 
        cur_key->key = key;
-       sctp_auth_key_hold(key);
-
        return 0;
 nomem:
        if (!replace)
index 3221d073448ce0356ac24078a3f69c6501e91371..49c58eadbfa2af0d05de9b024bbab711e845639c 100644 (file)
@@ -147,18 +147,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
                } else {
                        /* Nothing to do. Next chunk in the packet, please. */
                        ch = (sctp_chunkhdr_t *) chunk->chunk_end;
-
                        /* Force chunk->skb->data to chunk->chunk_end.  */
-                       skb_pull(chunk->skb,
-                                chunk->chunk_end - chunk->skb->data);
-
-                       /* Verify that we have at least chunk headers
-                        * worth of buffer left.
-                        */
-                       if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
-                               sctp_chunk_free(chunk);
-                               chunk = queue->in_progress = NULL;
-                       }
+                       skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
+                       /* We are guaranteed to pull a SCTP header. */
                }
        }
 
@@ -194,24 +185,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
        skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
        chunk->subh.v = NULL; /* Subheader is no longer valid.  */
 
-       if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
+       if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
+           skb_tail_pointer(chunk->skb)) {
                /* This is not a singleton */
                chunk->singleton = 0;
        } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
-               /* RFC 2960, Section 6.10  Bundling
-                *
-                * Partial chunks MUST NOT be placed in an SCTP packet.
-                * If the receiver detects a partial chunk, it MUST drop
-                * the chunk.
-                *
-                * Since the end of the chunk is past the end of our buffer
-                * (which contains the whole packet, we can freely discard
-                * the whole packet.
-                */
-               sctp_chunk_free(chunk);
-               chunk = queue->in_progress = NULL;
-
-               return NULL;
+               /* Discard inside state machine. */
+               chunk->pdiscard = 1;
+               chunk->chunk_end = skb_tail_pointer(chunk->skb);
        } else {
                /* We are at the end of the packet, so mark the chunk
                 * in case we need to send a SACK.
index 0beb2f9c8a7c6f816a6161d66e44cbd3087dd624..b6f5fc3127b944bcc68766f815e36f4af0e34a3e 100644 (file)
@@ -618,7 +618,7 @@ out:
        return err;
 no_route:
        kfree_skb(nskb);
-       IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+       IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
 
        /* FIXME: Returning the 'err' will effect all the associations
         * associated with a socket, although only one of the paths of the
index 87e244be899a78dd076cf1b4e7b1301c748df8d9..29fc16f3633f211bd4ec658f2dd7db4333a3c211 100644 (file)
@@ -2596,6 +2596,9 @@ do_addr_param:
                addr_param = param.v + sizeof(sctp_addip_param_t);
 
                af = sctp_get_af_specific(param_type2af(param.p->type));
+               if (af == NULL)
+                       break;
+
                af->from_addr_param(&addr, addr_param,
                                    htons(asoc->peer.port), 0);
 
@@ -3094,50 +3097,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
        return SCTP_ERROR_NO_ERROR;
 }
 
-/* Verify the ASCONF packet before we process it.  */
-int sctp_verify_asconf(const struct sctp_association *asoc,
-                      struct sctp_paramhdr *param_hdr, void *chunk_end,
-                      struct sctp_paramhdr **errp) {
-       sctp_addip_param_t *asconf_param;
+/* Verify the ASCONF packet before we process it. */
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+                       struct sctp_chunk *chunk, bool addr_param_needed,
+                       struct sctp_paramhdr **errp)
+{
+       sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
        union sctp_params param;
-       int length, plen;
+       bool addr_param_seen = false;
 
-       param.v = (sctp_paramhdr_t *) param_hdr;
-       while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
-               length = ntohs(param.p->length);
-               *errp = param.p;
-
-               if (param.v > chunk_end - length ||
-                   length < sizeof(sctp_paramhdr_t))
-                       return 0;
+       sctp_walk_params(param, addip, addip_hdr.params) {
+               size_t length = ntohs(param.p->length);
 
+               *errp = param.p;
                switch (param.p->type) {
+               case SCTP_PARAM_ERR_CAUSE:
+                       break;
+               case SCTP_PARAM_IPV4_ADDRESS:
+                       if (length != sizeof(sctp_ipv4addr_param_t))
+                               return false;
+                       addr_param_seen = true;
+                       break;
+               case SCTP_PARAM_IPV6_ADDRESS:
+                       if (length != sizeof(sctp_ipv6addr_param_t))
+                               return false;
+                       addr_param_seen = true;
+                       break;
                case SCTP_PARAM_ADD_IP:
                case SCTP_PARAM_DEL_IP:
                case SCTP_PARAM_SET_PRIMARY:
-                       asconf_param = (sctp_addip_param_t *)param.v;
-                       plen = ntohs(asconf_param->param_hdr.length);
-                       if (plen < sizeof(sctp_addip_param_t) +
-                           sizeof(sctp_paramhdr_t))
-                               return 0;
+                       /* In ASCONF chunks, these need to be first. */
+                       if (addr_param_needed && !addr_param_seen)
+                               return false;
+                       length = ntohs(param.addip->param_hdr.length);
+                       if (length < sizeof(sctp_addip_param_t) +
+                                    sizeof(sctp_paramhdr_t))
+                               return false;
                        break;
                case SCTP_PARAM_SUCCESS_REPORT:
                case SCTP_PARAM_ADAPTATION_LAYER_IND:
                        if (length != sizeof(sctp_addip_param_t))
-                               return 0;
-
+                               return false;
                        break;
                default:
-                       break;
+                       /* This is unkown to us, reject! */
+                       return false;
                }
-
-               param.v += WORD_ROUND(length);
        }
 
-       if (param.v != chunk_end)
-               return 0;
+       /* Remaining sanity checks. */
+       if (addr_param_needed && !addr_param_seen)
+               return false;
+       if (!addr_param_needed && addr_param_seen)
+               return false;
+       if (param.v != chunk->chunk_end)
+               return false;
 
-       return 1;
+       return true;
 }
 
 /* Process an incoming ASCONF chunk with the next expected serial no. and
@@ -3146,16 +3162,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc,
 struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                                       struct sctp_chunk *asconf)
 {
+       sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
+       bool all_param_pass = true;
+       union sctp_params param;
        sctp_addiphdr_t         *hdr;
        union sctp_addr_param   *addr_param;
        sctp_addip_param_t      *asconf_param;
        struct sctp_chunk       *asconf_ack;
-
        __be16  err_code;
        int     length = 0;
        int     chunk_len;
        __u32   serial;
-       int     all_param_pass = 1;
 
        chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
        hdr = (sctp_addiphdr_t *)asconf->skb->data;
@@ -3183,9 +3200,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                goto done;
 
        /* Process the TLVs contained within the ASCONF chunk. */
-       while (chunk_len > 0) {
+       sctp_walk_params(param, addip, addip_hdr.params) {
+               /* Skip preceeding address parameters. */
+               if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+                   param.p->type == SCTP_PARAM_IPV6_ADDRESS)
+                       continue;
+
                err_code = sctp_process_asconf_param(asoc, asconf,
-                                                    asconf_param);
+                                                    param.addip);
                /* ADDIP 4.1 A7)
                 * If an error response is received for a TLV parameter,
                 * all TLVs with no response before the failed TLV are
@@ -3193,28 +3215,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                 * the failed response are considered unsuccessful unless
                 * a specific success indication is present for the parameter.
                 */
-               if (SCTP_ERROR_NO_ERROR != err_code)
-                       all_param_pass = 0;
-
+               if (err_code != SCTP_ERROR_NO_ERROR)
+                       all_param_pass = false;
                if (!all_param_pass)
-                       sctp_add_asconf_response(asconf_ack,
-                                                asconf_param->crr_id, err_code,
-                                                asconf_param);
+                       sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
+                                                err_code, param.addip);
 
                /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
                 * an IP address sends an 'Out of Resource' in its response, it
                 * MUST also fail any subsequent add or delete requests bundled
                 * in the ASCONF.
                 */
-               if (SCTP_ERROR_RSRC_LOW == err_code)
+               if (err_code == SCTP_ERROR_RSRC_LOW)
                        goto done;
-
-               /* Move to the next ASCONF param. */
-               length = ntohs(asconf_param->param_hdr.length);
-               asconf_param = (void *)asconf_param + length;
-               chunk_len -= length;
        }
-
 done:
        asoc->peer.addip_serial++;
 
index 6eb26403de6a7d9b9b0103ed8fc58a9534dfec04..c52763a2629719d38afa624f85cd83756d51969f 100644 (file)
@@ -177,6 +177,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
 {
        __u16 chunk_length = ntohs(chunk->chunk_hdr->length);
 
+       /* Previously already marked? */
+       if (unlikely(chunk->pdiscard))
+               return 0;
        if (unlikely(chunk_length < required_length))
                return 0;
 
@@ -1782,9 +1785,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
        /* Update the content of current association. */
        sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
-       sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
-                       SCTP_STATE(SCTP_STATE_ESTABLISHED));
-       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+       if (sctp_state(asoc, SHUTDOWN_PENDING) &&
+           (sctp_sstate(asoc->base.sk, CLOSING) ||
+            sock_flag(asoc->base.sk, SOCK_DEAD))) {
+               /* if were currently in SHUTDOWN_PENDING, but the socket
+                * has been closed by user, don't transition to ESTABLISHED.
+                * Instead trigger SHUTDOWN bundled with COOKIE_ACK.
+                */
+               sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+               return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
+                                                    SCTP_ST_CHUNK(0), NULL,
+                                                    commands);
+       } else {
+               sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+                               SCTP_STATE(SCTP_STATE_ESTABLISHED));
+               sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+       }
        return SCTP_DISPOSITION_CONSUME;
 
 nomem_ev:
@@ -3580,9 +3596,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
        struct sctp_chunk       *asconf_ack = NULL;
        struct sctp_paramhdr    *err_param = NULL;
        sctp_addiphdr_t         *hdr;
-       union sctp_addr_param   *addr_param;
        __u32                   serial;
-       int                     length;
 
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3607,17 +3621,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
        hdr = (sctp_addiphdr_t *)chunk->skb->data;
        serial = ntohl(hdr->serial);
 
-       addr_param = (union sctp_addr_param *)hdr->params;
-       length = ntohs(addr_param->p.length);
-       if (length < sizeof(sctp_paramhdr_t))
-               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
-                          (void *)addr_param, commands);
-
        /* Verify the ASCONF chunk before processing it. */
-       if (!sctp_verify_asconf(asoc,
-                           (sctp_paramhdr_t *)((void *)addr_param + length),
-                           (void *)chunk->chunk_end,
-                           &err_param))
+       if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
                return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                                                  (void *)err_param, commands);
 
@@ -3735,10 +3740,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
        rcvd_serial = ntohl(addip_hdr->serial);
 
        /* Verify the ASCONF-ACK chunk before processing it. */
-       if (!sctp_verify_asconf(asoc,
-           (sctp_paramhdr_t *)addip_hdr->params,
-           (void *)asconf_ack->chunk_end,
-           &err_param))
+       if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
                return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                           (void *)err_param, commands);
 
index fe0ba7488bdf5f55a51392e28611ec96cd596c2b..29299dcabfbb77be13856478fb7feb667efcf854 100644 (file)
@@ -368,8 +368,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
                tbl.data = &net->sctp.auth_enable;
 
        ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
-
-       if (write) {
+       if (write && ret == 0) {
                struct sock *sk = net->sctp.ctl_sock;
 
                net->sctp.auth_enable = new_value;
index 10c018a5b9fee066c35c364c79cd6fef77e31580..ca907f2f5e5ae1311fde4d8e936096c05235b31e 100644 (file)
@@ -373,9 +373,10 @@ fail:
  * specification [SCTP] and any extensions for a list of possible
  * error formats.
  */
-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
-       const struct sctp_association *asoc, struct sctp_chunk *chunk,
-       __u16 flags, gfp_t gfp)
+struct sctp_ulpevent *
+sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
+                               struct sctp_chunk *chunk, __u16 flags,
+                               gfp_t gfp)
 {
        struct sctp_ulpevent *event;
        struct sctp_remote_error *sre;
@@ -394,8 +395,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
        /* Copy the skb to a new skb with room for us to prepend
         * notification with.
         */
-       skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
-                             0, gfp);
+       skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
 
        /* Pull off the rest of the cause TLV from the chunk.  */
        skb_pull(chunk->skb, elen);
@@ -406,62 +406,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
        event = sctp_skb2event(skb);
        sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
 
-       sre = (struct sctp_remote_error *)
-               skb_push(skb, sizeof(struct sctp_remote_error));
+       sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
 
        /* Trim the buffer to the right length.  */
-       skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
+       skb_trim(skb, sizeof(*sre) + elen);
 
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_type:
-        *   It should be SCTP_REMOTE_ERROR.
-        */
+       /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
+       memset(sre, 0, sizeof(*sre));
        sre->sre_type = SCTP_REMOTE_ERROR;
-
-       /*
-        * Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_flags: 16 bits (unsigned integer)
-        *   Currently unused.
-        */
        sre->sre_flags = 0;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_length: sizeof (__u32)
-        *
-        * This field is the total length of the notification data,
-        * including the notification header.
-        */
        sre->sre_length = skb->len;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_error: 16 bits (unsigned integer)
-        * This value represents one of the Operational Error causes defined in
-        * the SCTP specification, in network byte order.
-        */
        sre->sre_error = cause;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_assoc_id: sizeof (sctp_assoc_t)
-        *
-        * The association id field, holds the identifier for the association.
-        * All notifications for a given association have the same association
-        * identifier.  For TCP style socket, this field is ignored.
-        */
        sctp_ulpevent_set_owner(event, asoc);
        sre->sre_assoc_id = sctp_assoc2id(asoc);
 
        return event;
-
 fail:
        return NULL;
 }
@@ -906,7 +865,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
        return notification->sn_header.sn_type;
 }
 
-/* Copy out the sndrcvinfo into a msghdr.  */
+/* RFC6458, Section 5.3.2. SCTP Header Information Structure
+ * (SCTP_SNDRCV, DEPRECATED)
+ */
 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
                                   struct msghdr *msghdr)
 {
@@ -915,74 +876,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
        if (sctp_ulpevent_is_notification(event))
                return;
 
-       /* Sockets API Extensions for SCTP
-        * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
-        *
-        * sinfo_stream: 16 bits (unsigned integer)
-        *
-        * For recvmsg() the SCTP stack places the message's stream number in
-        * this value.
-       */
+       memset(&sinfo, 0, sizeof(sinfo));
        sinfo.sinfo_stream = event->stream;
-       /* sinfo_ssn: 16 bits (unsigned integer)
-        *
-        * For recvmsg() this value contains the stream sequence number that
-        * the remote endpoint placed in the DATA chunk.  For fragmented
-        * messages this is the same number for all deliveries of the message
-        * (if more than one recvmsg() is needed to read the message).
-        */
        sinfo.sinfo_ssn = event->ssn;
-       /* sinfo_ppid: 32 bits (unsigned integer)
-        *
-        * In recvmsg() this value is
-        * the same information that was passed by the upper layer in the peer
-        * application.  Please note that byte order issues are NOT accounted
-        * for and this information is passed opaquely by the SCTP stack from
-        * one end to the other.
-        */
        sinfo.sinfo_ppid = event->ppid;
-       /* sinfo_flags: 16 bits (unsigned integer)
-        *
-        * This field may contain any of the following flags and is composed of
-        * a bitwise OR of these values.
-        *
-        * recvmsg() flags:
-        *
-        * SCTP_UNORDERED - This flag is present when the message was sent
-        *                 non-ordered.
-        */
        sinfo.sinfo_flags = event->flags;
-       /* sinfo_tsn: 32 bit (unsigned integer)
-        *
-        * For the receiving side, this field holds a TSN that was
-        * assigned to one of the SCTP Data Chunks.
-        */
        sinfo.sinfo_tsn = event->tsn;
-       /* sinfo_cumtsn: 32 bit (unsigned integer)
-        *
-        * This field will hold the current cumulative TSN as
-        * known by the underlying SCTP layer.  Note this field is
-        * ignored when sending and only valid for a receive
-        * operation when sinfo_flags are set to SCTP_UNORDERED.
-        */
        sinfo.sinfo_cumtsn = event->cumtsn;
-       /* sinfo_assoc_id: sizeof (sctp_assoc_t)
-        *
-        * The association handle field, sinfo_assoc_id, holds the identifier
-        * for the association announced in the COMMUNICATION_UP notification.
-        * All notifications for a given association have the same identifier.
-        * Ignored for one-to-one style sockets.
-        */
        sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
-
-       /* context value that is set via SCTP_CONTEXT socket option. */
+       /* Context value that is set via SCTP_CONTEXT socket option. */
        sinfo.sinfo_context = event->asoc->default_rcv_context;
-
        /* These fields are not used while receiving. */
        sinfo.sinfo_timetolive = 0;
 
        put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
-                sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
+                sizeof(sinfo), &sinfo);
 }
 
 /* Do accounting for bytes received and hold a reference to the association
index 422759bae60d64f42a3bb488bca993d6fca22733..5c62c5e89b4633524b173edcbe7f442597e6e9bb 100644 (file)
@@ -683,6 +683,7 @@ static struct svc_xprt_class svc_udp_class = {
        .xcl_owner = THIS_MODULE,
        .xcl_ops = &svc_udp_ops,
        .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
+       .xcl_ident = XPRT_TRANSPORT_UDP,
 };
 
 static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
@@ -1275,6 +1276,7 @@ static struct svc_xprt_class svc_tcp_class = {
        .xcl_owner = THIS_MODULE,
        .xcl_ops = &svc_tcp_ops,
        .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+       .xcl_ident = XPRT_TRANSPORT_TCP,
 };
 
 void svc_init_xprt_sock(void)
index 095363eee764b3ff496a745f56e4fa646f5f02c0..42ce6bfc729d4f775741b5170dc99aa8a50102cb 100644 (file)
@@ -1290,7 +1290,7 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
                }
        }
        spin_unlock(&xprt_list_lock);
-       printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
+       dprintk("RPC: transport (%d) not supported\n", args->ident);
        return ERR_PTR(-EIO);
 
 found:
index 62e4f9bcc387182f829cc5c7d7559b0c16cfee34..ed36cb52cd8678422e45a773ecdafac6c3a1447e 100644 (file)
@@ -89,6 +89,7 @@ struct svc_xprt_class svc_rdma_class = {
        .xcl_owner = THIS_MODULE,
        .xcl_ops = &svc_rdma_ops,
        .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+       .xcl_ident = XPRT_TRANSPORT_RDMA,
 };
 
 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
index e5f3da507823678240df70fa26404b8d7ae00d36..bf2755419ec6fe1da9ecb993460cba495d804e75 100644 (file)
@@ -531,6 +531,7 @@ receive:
 
                buf = node->bclink.deferred_head;
                node->bclink.deferred_head = buf->next;
+               buf->next = NULL;
                node->bclink.deferred_size--;
                goto receive;
        }
index 95d93678bf55458ad194f998245dbd7987dfa004..43ab4b03d306119b5546fe3c8cdae4432cee28b1 100644 (file)
@@ -6642,6 +6642,9 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
        void *hdr = ((void **)skb->cb)[1];
        struct nlattr *data = ((void **)skb->cb)[2];
 
+       /* clear CB data for netlink core to own from now on */
+       memset(skb->cb, 0, sizeof(skb->cb));
+
        nla_nest_end(skb, data);
        genlmsg_end(skb, hdr);
 
@@ -9153,7 +9156,8 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_vendor_cmd,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_RTNL,
+               .internal_flags = NL80211_FLAG_NEED_WIPHY |
+                                 NL80211_FLAG_NEED_RTNL,
        },
 };
 
@@ -10973,11 +10977,11 @@ int nl80211_init(void)
        err = genl_register_mc_group(&nl80211_fam, &nl80211_testmode_mcgrp);
        if (err)
                goto err_out;
-#endif
 
        err = genl_register_mc_group(&nl80211_fam, &nl80211_vendor_mcgrp);
        if (err)
                goto err_out;
+#endif
 
        err = netlink_register_notifier(&nl80211_netlink_notifier);
        if (err)
index 4db2177a69ea1f684d537b54256b8bf17b8a1707..ab406d0462b7985703106edd9dfcb41536fc1767 100644 (file)
@@ -55,7 +55,7 @@
  * also linked into the probe response struct.
  */
 
-#define IEEE80211_SCAN_RESULT_EXPIRE   (3 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE   (7 * HZ)
 
 static void bss_free(struct cfg80211_internal_bss *bss)
 {
index 5755bc14abbd8220ff39a5f271043b5d4165d947..bc5a75b1aef803cdee7f728e28eb8b64f859f3da 100644 (file)
@@ -1972,7 +1972,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
                MAC_ASSIGN(addr, addr);
                __entry->key_type = key_type;
                __entry->key_id = key_id;
-               memcpy(__entry->tsc, tsc, 6);
+               if (tsc)
+                       memcpy(__entry->tsc, tsc, 6);
        ),
        TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
                  NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
index e9c6ac724fef153efb0c8ae3ceea6da99407ae7c..beb86b500adffd406e65b26d0cf9abeab3891703 100644 (file)
@@ -103,7 +103,7 @@ config INTEL_TXT
 config LSM_MMAP_MIN_ADDR
        int "Low address space for LSM to protect from user allocation"
        depends on SECURITY && SECURITY_SELINUX
-       default 32768 if ARM
+       default 32768 if ARM || (ARM64 && COMPAT)
        default 65536
        help
          This is the portion of low virtual memory which should be protected
index 859abdaac1eafb62fddb202eb6fcdf3777e9daa2..9aaa4e72cc1fc602afada0596a0f69c6c4d08eee 100644 (file)
@@ -629,7 +629,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
         * There is no exception for unconfined as change_hat is not
         * available.
         */
-       if (current->no_new_privs)
+       if (task_no_new_privs(current))
                return -EPERM;
 
        /* released below */
@@ -780,7 +780,7 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
         * no_new_privs is set because this aways results in a reduction
         * of permissions.
         */
-       if (current->no_new_privs && !unconfined(profile)) {
+       if (task_no_new_privs(current) && !unconfined(profile)) {
                put_cred(cred);
                return -EPERM;
        }
index 40aedd9f73eaf76c7ccab0374d5ae48db3ee2a31..4a8cbfeef8b3ab41837beaad3ad3d34bc1487ad3 100644 (file)
@@ -65,7 +65,6 @@ extern int apparmor_initialized __initdata;
 char *aa_split_fqname(char *args, char **ns_name);
 void aa_info_message(const char *str);
 void *kvmalloc(size_t size);
-void kvfree(void *buffer);
 
 
 /**
index 7430298116d6b2b71b9e6c0c55d6b2ff4867ee96..ce8d9a84ab2d1f3f8906b72324e688536fef7b1b 100644 (file)
@@ -103,35 +103,3 @@ void *kvmalloc(size_t size)
        }
        return buffer;
 }
-
-/**
- * do_vfree - workqueue routine for freeing vmalloced memory
- * @work: data to be freed
- *
- * The work_struct is overlaid to the data being freed, as at the point
- * the work is scheduled the data is no longer valid, be its freeing
- * needs to be delayed until safe.
- */
-static void do_vfree(struct work_struct *work)
-{
-       vfree(work);
-}
-
-/**
- * kvfree - free an allocation do by kvmalloc
- * @buffer: buffer to free (MAYBE_NULL)
- *
- * Free a buffer allocated by kvmalloc
- */
-void kvfree(void *buffer)
-{
-       if (is_vmalloc_addr(buffer)) {
-               /* Data is no longer valid so just use the allocated space
-                * as the work_struct
-                */
-               struct work_struct *work = (struct work_struct *) buffer;
-               INIT_WORK(work, do_vfree);
-               schedule_work(work);
-       } else
-               kfree(buffer);
-}
index 5870fdc224b436d954f240a546b5398bbc3ce028..0405522995c5742c29d7c792bd9c17cefd40b795 100644 (file)
@@ -432,6 +432,9 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
                cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
        }
 
+       cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+       cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+
        return 0;
 }
 
index b9b2bebeb3505596041d11ac47468f523def36c4..b980a6ce5c79769989eab48021d6baae586669b1 100644 (file)
@@ -286,9 +286,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
 {
        const struct evm_ima_xattr_data *xattr_data = xattr_value;
 
-       if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
-           && (xattr_data->type == EVM_XATTR_HMAC))
-               return -EPERM;
+       if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+               if (!xattr_value_len)
+                       return -EINVAL;
+               if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
+                       return -EPERM;
+       }
        return evm_protect_xattr(dentry, xattr_name, xattr_value,
                                 xattr_value_len);
 }
index e00585266536f5dbb35583d03f9b9ec272692ec5..2eca0e3f7b868a7fb3103b0f5fadd0c0c137d78b 100644 (file)
@@ -423,6 +423,13 @@ static int sb_finish_set_opts(struct super_block *sb)
        if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
                sbsec->flags |= SE_SBLABELSUPP;
 
+       /*
+        * Special handling for rootfs. Is genfs but supports
+        * setting SELinux context on in-core inodes.
+        */
+       if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0)
+               sbsec->flags |= SE_SBLABELSUPP;
+
        /* Initialize the root inode. */
        rc = inode_doinit_with_dentry(root_inode, root);
 
@@ -437,6 +444,7 @@ next_inode:
                                list_entry(sbsec->isec_head.next,
                                           struct inode_security_struct, list);
                struct inode *inode = isec->inode;
+               list_del_init(&isec->list);
                spin_unlock(&sbsec->isec_lock);
                inode = igrab(inode);
                if (inode) {
@@ -445,7 +453,6 @@ next_inode:
                        iput(inode);
                }
                spin_lock(&sbsec->isec_lock);
-               list_del_init(&isec->list);
                goto next_inode;
        }
        spin_unlock(&sbsec->isec_lock);
index 3fdf998ad0576d864ffca38691832aec3ff7e659..0d430adee88d6a142917fa77d720b962cd44f91b 100644 (file)
@@ -490,9 +490,6 @@ static int snd_compress_check_input(struct snd_compr_params *params)
        if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
                return -EINVAL;
 
-       if (!(params->codec.sample_rate & SNDRV_PCM_RATE_8000_192000))
-               return -EINVAL;
-
        return 0;
 }
 
index e79baa11b60eb15526ba3679537720b2f29d0de6..08070e1eefeb256541595cb8ac4ea1e227ed4089 100644 (file)
@@ -679,7 +679,7 @@ int snd_info_card_free(struct snd_card *card)
  * snd_info_get_line - read one line from the procfs buffer
  * @buffer: the procfs buffer
  * @line: the buffer to store
- * @len: the max. buffer size - 1
+ * @len: the max. buffer size
  *
  * Reads one line from the buffer and stores the string.
  *
@@ -699,7 +699,7 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len)
                        buffer->stop = 1;
                if (c == '\n')
                        break;
-               if (len) {
+               if (len > 1) {
                        len--;
                        *line++ = c;
                }
index af49721ba0e38310183adbe01dcb1f8acaa3d4ab..c4ac3c1e19af9a7b8966271671802b9c8565542e 100644 (file)
@@ -206,6 +206,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
        if (err < 0)
                return err;
 
+       if (clear_user(src, sizeof(*src)))
+               return -EFAULT;
        if (put_user(status.state, &src->state) ||
            compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
            compat_put_timespec(&status.tstamp, &src->tstamp) ||
index 3284940a4af2e319d37634edf7579e29606230c7..8eddece217bb8bb59950bdc793bb1b2224877362 100644 (file)
@@ -1782,14 +1782,16 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
 {
        struct snd_pcm_hw_params *params = arg;
        snd_pcm_format_t format;
-       int channels, width;
+       int channels;
+       ssize_t frame_size;
 
        params->fifo_size = substream->runtime->hw.fifo_size;
        if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
                format = params_format(params);
                channels = params_channels(params);
-               width = snd_pcm_format_physical_width(format);
-               params->fifo_size /= width * channels;
+               frame_size = snd_pcm_format_size(format, channels);
+               if (frame_size > 0)
+                       params->fifo_size /= (unsigned)frame_size;
        }
        return 0;
 }
index f92818155958d2a166d3a90f1f2ed2670e9217d2..175dca44c97e9eb72d552063adcc49bf6b1bcc14 100644 (file)
@@ -3197,7 +3197,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
 
 #ifndef ARCH_HAS_DMA_MMAP_COHERENT
 /* This should be defined / handled globally! */
-#ifdef CONFIG_ARM
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
 #define ARCH_HAS_DMA_MMAP_COHERENT
 #endif
 #endif
index daac7c7ebe9e0837b17558b704ba75c6aed2b306..3397ddbdfc0c7e4ab6ba445047522420535dc8d7 100644 (file)
@@ -856,8 +856,8 @@ config SND_VIRTUOSO
        select SND_JACK if INPUT=y || INPUT=SND
        help
          Say Y here to include support for sound cards based on the
-         Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS,
-         Essence ST (Deluxe), and Essence STX.
+         Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS, DSX,
+         Essence ST (Deluxe), and Essence STX (II).
          Support for the HDAV1.3 (Deluxe) and HDAV1.3 Slim is experimental;
          for the Xense, missing.
 
index cae36597aa718c3fc13abe95221a2308627c2c8c..0a34b5f1c47571b6c30ae5c04f53180b1e76e0ef 100644 (file)
@@ -85,6 +85,8 @@ snd_emu10k1_ops_setup(struct snd_emux *emux)
  * get more voice for pcm
  *
  * terminate most inactive voice and give it as a pcm voice.
+ *
+ * voice_lock is already held.
  */
 int
 snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
@@ -92,12 +94,10 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
        struct snd_emux *emu;
        struct snd_emux_voice *vp;
        struct best_voice best[V_END];
-       unsigned long flags;
        int i;
 
        emu = hw->synth;
 
-       spin_lock_irqsave(&emu->voice_lock, flags);
        lookup_voices(emu, hw, best, 1); /* no OFF voices */
        for (i = 0; i < V_END; i++) {
                if (best[i].voice >= 0) {
@@ -113,11 +113,9 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
                        vp->emu->num_voices--;
                        vp->ch = -1;
                        vp->state = SNDRV_EMUX_ST_OFF;
-                       spin_unlock_irqrestore(&emu->voice_lock, flags);
                        return ch;
                }
        }
-       spin_unlock_irqrestore(&emu->voice_lock, flags);
 
        /* not found */
        return -ENOMEM;
index 01fefbe29e4a47eba2ddc9b86c25da45ac2aca18..4126f3d9edb633443c0ecd486fc78ac7c151a865 100644 (file)
@@ -4379,6 +4379,9 @@ static void ca0132_download_dsp(struct hda_codec *codec)
        return; /* NOP */
 #endif
 
+       if (spec->dsp_state == DSP_DOWNLOAD_FAILED)
+               return; /* don't retry failures */
+
        chipio_enable_clocks(codec);
        spec->dsp_state = DSP_DOWNLOADING;
        if (!ca0132_download_dsp_images(codec))
@@ -4555,7 +4558,8 @@ static int ca0132_init(struct hda_codec *codec)
        struct auto_pin_cfg *cfg = &spec->autocfg;
        int i;
 
-       spec->dsp_state = DSP_DOWNLOAD_INIT;
+       if (spec->dsp_state != DSP_DOWNLOAD_FAILED)
+               spec->dsp_state = DSP_DOWNLOAD_INIT;
        spec->curr_chip_addx = INVALID_CHIP_ADDRESS;
 
        snd_hda_power_up(codec);
@@ -4666,6 +4670,7 @@ static int patch_ca0132(struct hda_codec *codec)
        codec->spec = spec;
        spec->codec = codec;
 
+       spec->dsp_state = DSP_DOWNLOAD_INIT;
        spec->num_mixers = 1;
        spec->mixers[0] = ca0132_mixer;
 
index 0b85e857f1c7299e519e8fb27fdfe194b2c83a04..4008034b6ebed9449e038e189b37e82f4c56d093 100644 (file)
@@ -175,6 +175,8 @@ static void alc_fix_pll(struct hda_codec *codec)
                            spec->pll_coef_idx);
        val = snd_hda_codec_read(codec, spec->pll_nid, 0,
                                 AC_VERB_GET_PROC_COEF, 0);
+       if (val == -1)
+               return;
        snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX,
                            spec->pll_coef_idx);
        snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_PROC_COEF,
@@ -316,6 +318,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
                case 0x10ec0885:
                case 0x10ec0887:
                /*case 0x10ec0889:*/ /* this causes an SPDIF problem */
+               case 0x10ec0900:
                        alc889_coef_init(codec);
                        break;
                case 0x10ec0888:
@@ -2250,6 +2253,7 @@ static int patch_alc882(struct hda_codec *codec)
        switch (codec->vendor_id) {
        case 0x10ec0882:
        case 0x10ec0885:
+       case 0x10ec0900:
                break;
        default:
                /* ALC883 and variants */
@@ -2677,6 +2681,8 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
 static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
 {
        int val = alc_read_coef_idx(codec, 0x04);
+       if (val == -1)
+               return;
        if (power_up)
                val |= 1 << 11;
        else
@@ -3822,27 +3828,30 @@ static void alc269_fill_coef(struct hda_codec *codec)
        if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
                val = alc_read_coef_idx(codec, 0x04);
                /* Power up output pin */
-               alc_write_coef_idx(codec, 0x04, val | (1<<11));
+               if (val != -1)
+                       alc_write_coef_idx(codec, 0x04, val | (1<<11));
        }
 
        if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
                val = alc_read_coef_idx(codec, 0xd);
-               if ((val & 0x0c00) >> 10 != 0x1) {
+               if (val != -1 && (val & 0x0c00) >> 10 != 0x1) {
                        /* Capless ramp up clock control */
                        alc_write_coef_idx(codec, 0xd, val | (1<<10));
                }
                val = alc_read_coef_idx(codec, 0x17);
-               if ((val & 0x01c0) >> 6 != 0x4) {
+               if (val != -1 && (val & 0x01c0) >> 6 != 0x4) {
                        /* Class D power on reset */
                        alc_write_coef_idx(codec, 0x17, val | (1<<7));
                }
        }
 
        val = alc_read_coef_idx(codec, 0xd); /* Class D */
-       alc_write_coef_idx(codec, 0xd, val | (1<<14));
+       if (val != -1)
+               alc_write_coef_idx(codec, 0xd, val | (1<<14));
 
        val = alc_read_coef_idx(codec, 0x4); /* HP */
-       alc_write_coef_idx(codec, 0x4, val | (1<<11));
+       if (val != -1)
+               alc_write_coef_idx(codec, 0x4, val | (1<<11));
 }
 
 /*
index 0c521b7752b2ef708384c985a0286927c8f3c468..5dd4c4af9c9f900955c16901c1a11b69d6a9e857 100644 (file)
@@ -84,6 +84,7 @@ enum {
        STAC_DELL_EQ,
        STAC_ALIENWARE_M17X,
        STAC_92HD89XX_HP_FRONT_JACK,
+       STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
        STAC_92HD73XX_MODELS
 };
 
@@ -538,8 +539,8 @@ static void stac_init_power_map(struct hda_codec *codec)
                if (snd_hda_jack_tbl_get(codec, nid))
                        continue;
                if (def_conf == AC_JACK_PORT_COMPLEX &&
-                   !(spec->vref_mute_led_nid == nid ||
-                     is_jack_detectable(codec, nid))) {
+                   spec->vref_mute_led_nid != nid &&
+                   is_jack_detectable(codec, nid)) {
                        snd_hda_jack_detect_enable_callback(codec, nid,
                                                            STAC_PWR_EVENT,
                                                            jack_update_power);
@@ -1783,6 +1784,11 @@ static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = {
        {}
 };
 
+static const struct hda_pintbl stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs[] = {
+       { 0x0e, 0x400000f0 },
+       {}
+};
+
 static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
                                   const struct hda_fixup *fix, int action)
 {
@@ -1905,6 +1911,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
        [STAC_92HD89XX_HP_FRONT_JACK] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = stac92hd89xx_hp_front_jack_pin_configs,
+       },
+       [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
        }
 };
 
@@ -1965,6 +1975,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
                      "Alienware M17x", STAC_ALIENWARE_M17X),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
                      "Alienware M17x R3", STAC_DELL_EQ),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927,
+                               "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
        SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
                                "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
        {} /* terminator */
@@ -3635,11 +3647,18 @@ static int stac_parse_auto_config(struct hda_codec *codec)
                        return err;
        }
 
-       stac_init_power_map(codec);
-
        return 0;
 }
 
+static int stac_build_controls(struct hda_codec *codec)
+{
+       int err = snd_hda_gen_build_controls(codec);
+
+       if (err < 0)
+               return err;
+       stac_init_power_map(codec);
+       return 0;
+}
 
 static int stac_init(struct hda_codec *codec)
 {
@@ -3782,7 +3801,7 @@ static void stac_set_power_state(struct hda_codec *codec, hda_nid_t fg,
 #endif /* CONFIG_PM */
 
 static const struct hda_codec_ops stac_patch_ops = {
-       .build_controls = snd_hda_gen_build_controls,
+       .build_controls = stac_build_controls,
        .build_pcms = snd_hda_gen_build_pcms,
        .init = stac_init,
        .free = stac_free,
index 64b9fda5f04a71f4d0898d083468a6ed94437d8f..dbbbacfd535e2439a294a26744986a3cb8357232 100644 (file)
@@ -53,6 +53,7 @@ static DEFINE_PCI_DEVICE_TABLE(xonar_ids) = {
        { OXYGEN_PCI_SUBID(0x1043, 0x835e) },
        { OXYGEN_PCI_SUBID(0x1043, 0x838e) },
        { OXYGEN_PCI_SUBID(0x1043, 0x8522) },
+       { OXYGEN_PCI_SUBID(0x1043, 0x85f4) },
        { OXYGEN_PCI_SUBID_BROKEN_EEPROM },
        { }
 };
index c8c7f2c9b355ae8f4dde5d8deb4b68a11a64f296..e0260593166936a70c15f5e12cd0829e519fbbb1 100644 (file)
  */
 
 /*
- * Xonar Essence ST (Deluxe)/STX
- * -----------------------------
+ * Xonar Essence ST (Deluxe)/STX (II)
+ * ----------------------------------
  *
  * CMI8788:
  *
@@ -1138,6 +1138,14 @@ int get_xonar_pcm179x_model(struct oxygen *chip,
                chip->model.resume = xonar_stx_resume;
                chip->model.set_dac_params = set_pcm1796_params;
                break;
+       case 0x85f4:
+               chip->model = model_xonar_st;
+               /* TODO: daughterboard support */
+               chip->model.shortname = "Xonar STX II";
+               chip->model.init = xonar_stx_init;
+               chip->model.resume = xonar_stx_resume;
+               chip->model.set_dac_params = set_pcm1796_params;
+               break;
        default:
                return -EINVAL;
        }
index 9b7746c9546f03180a9e15f988e87071eaa15a4a..76bfeb3c3e30cb56b040270567835d432667f3b7 100644 (file)
@@ -2234,7 +2234,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
        /* Register for interrupts */
        dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
 
-       ret = request_threaded_irq(max98090->irq, NULL,
+       ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL,
                max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                "max98090_interrupt", codec);
        if (ret < 0) {
index 875c402be3cecab696d556726a13ad172f63e1e4..e4468bd211d9727fa81c2636a556e5b157eef280 100644 (file)
@@ -1075,7 +1075,7 @@ static ssize_t dsp_reg_store(struct device *dev,
        unsigned int val=0,addr=0;
        int i;
 
-       printk("register \"%s\" count=%d\n",buf,count);
+       dev_dbg(codec->dev, "register \"%s\" count=%zu\n", buf, count);
 
        for(i=0;i<count;i++) //address
        {
@@ -1117,10 +1117,11 @@ static ssize_t dsp_reg_store(struct device *dev,
                        break;
                }
        }
-       printk("addr=0x%x val=0x%x\n",addr,val);
+       dev_dbg(codec->dev, "addr=0x%x val=0x%x\n", addr, val);
        if(i==count)
        {
-               printk("0x%04x = 0x%04x\n",addr,rt3261_dsp_read(codec, addr));
+               dev_dbg(codec->dev, "0x%04x = 0x%04x\n",
+                       addr, rt3261_dsp_read(codec, addr));
        }
        else
        {
@@ -1223,7 +1224,7 @@ int rt_codec_dsp_ioctl_common(struct snd_hwdep *hw, struct file *file, unsigned
                dev_err(codec->dev, "copy_from_user faild\n");
                return -EFAULT;
        }
-       dev_dbg(codec->dev, "rt_codec.number=%d\n",rt_codec.number);
+       dev_dbg(codec->dev, "rt_codec.number=%zu\n", rt_codec.number);
        buf = kmalloc(sizeof(*buf) * rt_codec.number, GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
index e2865dcad602c1aef5401f25b6ce0d868941044c..1350dadb900da92a2886f175f4345676c3562aef 100755 (executable)
@@ -197,8 +197,8 @@ int rt3261_ioctl_common(struct snd_hwdep *hw, struct file *file,
                dev_err(codec->dev,"copy_from_user faild\n");
                return -EFAULT;
        }
-       dev_dbg(codec->dev, "%s(): rt_codec.number=%d, cmd=%d\n",
-                       __func__, rt_codec.number, cmd);
+       dev_dbg(codec->dev, "%s(): rt_codec.number=%zu, cmd=%d\n",
+               __func__, rt_codec.number, cmd);
        buf = kmalloc(sizeof(*buf) * rt_codec.number, GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
index 62e7efcf2d0667a1f205f5f4706f8c6f21f6b223..d74aec90f324ab55c8d4096e7f55d358f2ef9fc9 100755 (executable)
@@ -198,8 +198,8 @@ int rt5640_ioctl_common(struct snd_hwdep *hw, struct file *file,
                dev_err(codec->dev,"copy_from_user faild\n");
                return -EFAULT;
        }
-       dev_dbg(codec->dev, "%s(): rt56xx.number=%d, cmd=%d\n",
-                       __func__, rt56xx.number, cmd);
+       dev_dbg(codec->dev, "%s(): rt56xx.number=%zu, cmd=%d\n",
+               __func__, rt56xx.number, cmd);
        buf = kmalloc(sizeof(*buf) * rt56xx.number, GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
index afc6dcd59846ace5ca204ca99db80298d3549d60..e9067232f9da872f66c9578541b85cb22c7eeca4 100755 (executable)
@@ -43,8 +43,8 @@ static int rt56xx_hwdep_ioctl_common(struct snd_hwdep *hw,
                dev_err(codec->dev,"copy_from_user faild\n");
                return -EFAULT;
        }
-       dev_dbg(codec->dev, "%s(): rt56xx.number=%d, cmd=%d\n",
-                       __func__, rt56xx.number, cmd);
+       dev_dbg(codec->dev, "%s(): rt56xx.number=%zu, cmd=%d\n",
+               __func__, rt56xx.number, cmd);
        buf = kmalloc(sizeof(*buf) * rt56xx.number, GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
index 0b993704f05a9b6a0ef5a2ef6224287981f499ec..ab952a62a8d5f70894e45e5863c31eb9bf5546d0 100644 (file)
@@ -43,8 +43,8 @@ static int rt_codec_hwdep_ioctl_common(struct snd_hwdep *hw,
                dev_err(codec->dev,"copy_from_user faild\n");
                return -EFAULT;
        }
-       dev_dbg(codec->dev, "%s(): rt_codec.number=%d, cmd=%d\n",
-                       __func__, rt_codec.number, cmd);
+       dev_dbg(codec->dev, "%s(): rt_codec.number=%zu, cmd=%d\n",
+               __func__, rt_codec.number, cmd);
        buf = kmalloc(sizeof(*buf) * rt_codec.number, GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
index 6dbb17d050c9f6ad2ea7480117e5f8c8ab89c210..ca1e999026e5c7b6b8adb2078fcbb7e8fe2f0fa6 100644 (file)
@@ -1284,3 +1284,5 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
        return 0;
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_init);
+
+MODULE_LICENSE("GPL v2");
index 81490febac6dc108decb890ac318ffbec9c2ab8b..ade9d6379c1b02869ce351e058f7e461aabe30d6 100644 (file)
@@ -632,8 +632,17 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
 {
        u32 fmt;
        u32 tx_rotate = (word_length / 4) & 0x7;
-       u32 rx_rotate = (32 - word_length) / 4;
        u32 mask = (1ULL << word_length) - 1;
+       /*
+        * For captured data we should not rotate, inversion and masking is
+        * enoguh to get the data to the right position:
+        * Format         data from bus         after reverse (XRBUF)
+        * S16_LE:      |LSB|MSB|xxx|xxx|       |xxx|xxx|MSB|LSB|
+        * S24_3LE:     |LSB|DAT|MSB|xxx|       |xxx|MSB|DAT|LSB|
+        * S24_LE:      |LSB|DAT|MSB|xxx|       |xxx|MSB|DAT|LSB|
+        * S32_LE:      |LSB|DAT|DAT|MSB|       |MSB|DAT|DAT|LSB|
+        */
+       u32 rx_rotate = 0;
 
        /*
         * if s BCLK-to-LRCLK ratio has been configured via the set_clkdiv()
index 6f4dd7543e829db8e73577f1184c667a596c89d0..95a9b07bbe966380d6b76ff36839c6975e1dcf5c 100644 (file)
@@ -757,9 +757,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai)
                          SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
                          SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
 
-#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
-                           SNDRV_PCM_FMTBIT_S24_LE |   \
-                           SNDRV_PCM_FMTBIT_S32_LE)
+#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
 static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
        .startup        = pxa_ssp_startup,
index 82ebb1a51479a7afdd4276b135e7c306092bbb3a..5c9b5e4f94c3dd8b7f3b0de704fa99d37554e714 100644 (file)
@@ -853,11 +853,9 @@ static int i2s_suspend(struct snd_soc_dai *dai)
 {
        struct i2s_dai *i2s = to_info(dai);
 
-       if (dai->active) {
-               i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
-               i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
-               i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
-       }
+       i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
+       i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
+       i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
 
        return 0;
 }
@@ -866,11 +864,9 @@ static int i2s_resume(struct snd_soc_dai *dai)
 {
        struct i2s_dai *i2s = to_info(dai);
 
-       if (dai->active) {
-               writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
-               writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
-               writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
-       }
+       writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
+       writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
+       writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
 
        return 0;
 }
index e9ec917cf529ae13779b488f2ec235453c46dadb..7b4bf0cc1f19342c5660c2abe0528b381396a256 100644 (file)
@@ -1882,6 +1882,7 @@ int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *widget)
                        dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
                }
 
+               dpcm_path_put(&list);
 capture:
                /* skip if FE doesn't have capture capability */
                if (!fe->cpu_dai->driver->capture.channels_min)
index ebe91440a068a8f500901652892d5a3575b68e63..c89a5bf5c00e6b3dccf2741ef7b20b4253ac91b1 100644 (file)
@@ -799,6 +799,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
        return changed;
 }
 
+static void kctl_private_value_free(struct snd_kcontrol *kctl)
+{
+       kfree((void *)kctl->private_value);
+}
+
 static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
        int validx, int bUnitID)
 {
@@ -833,6 +838,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
                return -ENOMEM;
        }
 
+       kctl->private_free = kctl_private_value_free;
        err = snd_ctl_add(mixer->chip->card, kctl);
        if (err < 0)
                return err;
index 8b75bcf136f6d17e91053f93820ae3b7042da027..d5bed1d2571384922241d1e670e288c1805c177d 100644 (file)
@@ -385,6 +385,36 @@ YAMAHA_DEVICE(0x105d, NULL),
                }
        }
 },
+{
+       USB_DEVICE(0x0499, 0x1509),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               /* .vendor_name = "Yamaha", */
+               /* .product_name = "Steinberg UR22", */
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_MIDI_YAMAHA
+                       },
+                       {
+                               .ifnum = 4,
+                               .type = QUIRK_IGNORE_INTERFACE
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 {
        USB_DEVICE(0x0499, 0x150a),
        .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
index 045d028fda5f5176d8f2e2ab966732efa2da1a4d..970ac6946150905d164af642eeddb571e6c95d77 100644 (file)
@@ -3,25 +3,34 @@ include $(CLEAR_VARS)
 
 XML_H := $(shell cd $(LOCAL_PATH) && make events_xml.h defaults_xml.h)
 
-LOCAL_CFLAGS += -Wall -O3 -mthumb-interwork -fno-exceptions -DETCDIR=\"/etc\" -Ilibsensors
-
 LOCAL_SRC_FILES := \
+       AnnotateListener.cpp \
        Buffer.cpp \
+       CCNDriver.cpp \
+       CPUFreqDriver.cpp \
        CapturedXML.cpp \
        Child.cpp \
+       Command.cpp \
        ConfigurationXML.cpp \
+       DiskIODriver.cpp \
        Driver.cpp \
        DriverSource.cpp \
        DynBuf.cpp \
        EventsXML.cpp \
        ExternalSource.cpp \
+       FSDriver.cpp \
        Fifo.cpp \
-       Hwmon.cpp \
+       FtraceDriver.cpp \
+       FtraceSource.cpp \
+       HwmonDriver.cpp \
        KMod.cpp \
        LocalCapture.cpp \
        Logging.cpp \
        main.cpp \
+       MaliVideoDriver.cpp \
+       MemInfoDriver.cpp\
        Monitor.cpp \
+       NetDriver.cpp \
        OlySocket.cpp \
        OlyUtility.cpp \
        PerfBuffer.cpp \
@@ -32,6 +41,7 @@ LOCAL_SRC_FILES := \
        Sender.cpp \
        SessionData.cpp \
        SessionXML.cpp \
+       Setup.cpp \
        Source.cpp \
        StreamlineSetup.cpp \
        UEvent.cpp \
@@ -55,7 +65,10 @@ LOCAL_SRC_FILES := \
        mxml/mxml-set.c \
        mxml/mxml-string.c
 
-LOCAL_C_INCLUDES := $(LOCAL_PATH) 
+LOCAL_CFLAGS += -Wall -O3 -fno-exceptions -pthread -DETCDIR=\"/etc\" -Ilibsensors -fPIE
+LOCAL_LDFLAGS += -fPIE -pie
+
+LOCAL_C_INCLUDES := $(LOCAL_PATH)
 
 LOCAL_MODULE := gatord
 LOCAL_MODULE_TAGS := optional
diff --git a/tools/gator/daemon/AnnotateListener.cpp b/tools/gator/daemon/AnnotateListener.cpp
new file mode 100644 (file)
index 0000000..50110b4
--- /dev/null
@@ -0,0 +1,69 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "AnnotateListener.h"
+
+#include <unistd.h>
+
+#include "OlySocket.h"
+
+struct AnnotateClient {
+       AnnotateClient *next;
+       int fd;
+};
+
+AnnotateListener::AnnotateListener() : mClients(NULL), mSock(NULL) {
+}
+
+AnnotateListener::~AnnotateListener() {
+       close();
+       delete mSock;
+}
+
+void AnnotateListener::setup() {
+       mSock = new OlyServerSocket(8082);
+}
+
+int AnnotateListener::getFd() {
+       return mSock->getFd();
+}
+
+void AnnotateListener::handle() {
+       AnnotateClient *const client = new AnnotateClient();
+       client->fd = mSock->acceptConnection();
+       client->next = mClients;
+       mClients = client;
+}
+
+void AnnotateListener::close() {
+       mSock->closeServerSocket();
+       while (mClients != NULL) {
+               ::close(mClients->fd);
+               AnnotateClient *next = mClients->next;
+               delete mClients;
+               mClients = next;
+       }
+}
+
+void AnnotateListener::signal() {
+       const char ch = 0;
+       AnnotateClient **ptr = &mClients;
+       AnnotateClient *client = mClients;
+       while (client != NULL) {
+               if (write(client->fd, &ch, sizeof(ch)) != 1) {
+                       ::close(client->fd);
+                       AnnotateClient *next = client->next;
+                       delete client;
+                       *ptr = next;
+                       client = next;
+                       continue;
+               }
+               ptr = &client->next;
+               client = client->next;
+       }
+}
diff --git a/tools/gator/daemon/AnnotateListener.h b/tools/gator/daemon/AnnotateListener.h
new file mode 100644 (file)
index 0000000..cdefef1
--- /dev/null
@@ -0,0 +1,31 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+class AnnotateClient;
+class OlyServerSocket;
+
+class AnnotateListener {
+public:
+       AnnotateListener();
+       ~AnnotateListener();
+
+       void setup();
+       int getFd();
+
+       void handle();
+       void close();
+       void signal();
+
+private:
+       AnnotateClient *mClients;
+       OlyServerSocket *mSock;
+
+       // Intentionally unimplemented
+       AnnotateListener(const AnnotateListener &);
+       AnnotateListener &operator=(const AnnotateListener &);
+};
diff --git a/tools/gator/daemon/Application.mk b/tools/gator/daemon/Application.mk
new file mode 100644 (file)
index 0000000..3ada471
--- /dev/null
@@ -0,0 +1,3 @@
+APP_PLATFORM := android-8
+# Replace armeabi-v7a with arm64-v8a to build an arm64 gatord or with armeabi to build an ARM11 gatord
+APP_ABI := armeabi-v7a
index 93557dabed9f1835910243ab7cff0802ad3d38fe..8fa6280150694d1b681a209d5f1eded1930a4686 100644 (file)
 #define mask (mSize - 1)
 
 enum {
-       CODE_PEA    = 1,
-       CODE_KEYS   = 2,
-       CODE_FORMAT = 3,
-       CODE_MAPS   = 4,
-       CODE_COMM   = 5,
+       CODE_PEA         = 1,
+       CODE_KEYS        = 2,
+       CODE_FORMAT      = 3,
+       CODE_MAPS        = 4,
+       CODE_COMM        = 5,
+       CODE_KEYS_OLD    = 6,
+       CODE_ONLINE_CPU  = 7,
+       CODE_OFFLINE_CPU = 8,
+       CODE_KALLSYMS    = 9,
 };
 
 // Summary Frame Messages
@@ -41,16 +45,18 @@ enum {
        /* Add another character so the length isn't 0x0a bytes */ \
        "5"
 
-Buffer::Buffer(const int32_t core, const int32_t buftype, const int size, sem_t *const readerSem) : mCore(core), mBufType(buftype), mSize(size), mReadPos(0), mWritePos(0), mCommitPos(0), mAvailable(true), mIsDone(false), mBuf(new char[mSize]), mCommitTime(gSessionData->mLiveRate), mReaderSem(readerSem) {
+Buffer::Buffer(const int32_t core, const int32_t buftype, const int size, sem_t *const readerSem) : mBuf(new char[size]), mReaderSem(readerSem), mCommitTime(gSessionData->mLiveRate), mSize(size), mReadPos(0), mWritePos(0), mCommitPos(0), mAvailable(true), mIsDone(false), mCore(core), mBufType(buftype) {
        if ((mSize & mask) != 0) {
                logg->logError(__FILE__, __LINE__, "Buffer size is not a power of 2");
                handleException();
        }
+       sem_init(&mWriterSem, 0, 0);
        frame();
 }
 
 Buffer::~Buffer() {
        delete [] mBuf;
+       sem_destroy(&mWriterSem);
 }
 
 void Buffer::write(Sender *const sender) {
@@ -58,14 +64,18 @@ void Buffer::write(Sender *const sender) {
                return;
        }
 
+       // commit and read are updated by the writer, only read them once
+       int commitPos = mCommitPos;
+       int readPos = mReadPos;
+
        // determine the size of two halves
-       int length1 = mCommitPos - mReadPos;
-       char *buffer1 = mBuf + mReadPos;
+       int length1 = commitPos - readPos;
+       char *buffer1 = mBuf + readPos;
        int length2 = 0;
        char *buffer2 = mBuf;
        if (length1 < 0) {
-               length1 = mSize - mReadPos;
-               length2 = mCommitPos;
+               length1 = mSize - readPos;
+               length2 = commitPos;
        }
 
        logg->logMessage("Sending data length1: %i length2: %i", length1, length2);
@@ -80,7 +90,10 @@ void Buffer::write(Sender *const sender) {
                sender->writeData(buffer2, length2, RESPONSE_APC_DATA);
        }
 
-       mReadPos = mCommitPos;
+       mReadPos = commitPos;
+
+       // send a notification that space is available
+       sem_post(&mWriterSem);
 }
 
 bool Buffer::commitReady() const {
@@ -167,7 +180,7 @@ void Buffer::check(const uint64_t time) {
        }
 }
 
-void Buffer::packInt(int32_t x) {
+void Buffer::packInt(char *const buf, const int size, int &writePos, int32_t x) {
        int packedBytes = 0;
        int more = true;
        while (more) {
@@ -181,14 +194,18 @@ void Buffer::packInt(int32_t x) {
                        b |= 0x80;
                }
 
-               mBuf[(mWritePos + packedBytes) & mask] = b;
+               buf[(writePos + packedBytes) & /*mask*/(size - 1)] = b;
                packedBytes++;
        }
 
-       mWritePos = (mWritePos + packedBytes) & mask;
+       writePos = (writePos + packedBytes) & /*mask*/(size - 1);
 }
 
-void Buffer::packInt64(int64_t x) {
+void Buffer::packInt(int32_t x) {
+       packInt(mBuf, mSize, mWritePos, x);
+}
+
+void Buffer::packInt64(char *const buf, const int size, int &writePos, int64_t x) {
        int packedBytes = 0;
        int more = true;
        while (more) {
@@ -202,11 +219,15 @@ void Buffer::packInt64(int64_t x) {
                        b |= 0x80;
                }
 
-               mBuf[(mWritePos + packedBytes) & mask] = b;
+               buf[(writePos + packedBytes) & /*mask*/(size - 1)] = b;
                packedBytes++;
        }
 
-       mWritePos = (mWritePos + packedBytes) & mask;
+       writePos = (writePos + packedBytes) & /*mask*/(size - 1);
+}
+
+void Buffer::packInt64(int64_t x) {
+       packInt64(mBuf, mSize, mWritePos, x);
 }
 
 void Buffer::writeBytes(const void *const data, size_t count) {
@@ -231,10 +252,12 @@ void Buffer::frame() {
        // Reserve space for the length
        mWritePos += sizeof(int32_t);
        packInt(mBufType);
-       packInt(mCore);
+       if ((mBufType == FRAME_BLOCK_COUNTER) || (mBufType == FRAME_PERF_ATTRS) || (mBufType == FRAME_PERF)) {
+               packInt(mCore);
+       }
 }
 
-void Buffer::summary(const int64_t timestamp, const int64_t uptime, const int64_t monotonicDelta, const char *const uname) {
+void Buffer::summary(const uint64_t currTime, const int64_t timestamp, const int64_t uptime, const int64_t monotonicDelta, const char *const uname) {
        packInt(MESSAGE_SUMMARY);
        writeString(NEWLINE_CANARY);
        packInt64(timestamp);
@@ -243,23 +266,24 @@ void Buffer::summary(const int64_t timestamp, const int64_t uptime, const int64_
        writeString("uname");
        writeString(uname);
        writeString("");
-       check(1);
+       check(currTime);
 }
 
-void Buffer::coreName(const int core, const int cpuid, const char *const name) {
+void Buffer::coreName(const uint64_t currTime, const int core, const int cpuid, const char *const name) {
        if (checkSpace(3 * MAXSIZE_PACK32 + 0x100)) {
                packInt(MESSAGE_CORE_NAME);
                packInt(core);
                packInt(cpuid);
                writeString(name);
        }
-       check(1);
+       check(currTime);
 }
 
 bool Buffer::eventHeader(const uint64_t curr_time) {
        bool retval = false;
        if (checkSpace(MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
-               packInt(0);     // key of zero indicates a timestamp
+               // key of zero indicates a timestamp
+               packInt(0);
                packInt64(curr_time);
                retval = true;
        }
@@ -270,7 +294,8 @@ bool Buffer::eventHeader(const uint64_t curr_time) {
 bool Buffer::eventTid(const int tid) {
        bool retval = false;
        if (checkSpace(2 * MAXSIZE_PACK32)) {
-               packInt(1);     // key of 1 indicates a tid
+               // key of 1 indicates a tid
+               packInt(1);
                packInt(tid);
                retval = true;
        }
@@ -278,87 +303,119 @@ bool Buffer::eventTid(const int tid) {
        return retval;
 }
 
-void Buffer::event(const int32_t key, const int32_t value) {
+void Buffer::event(const int key, const int32_t value) {
        if (checkSpace(2 * MAXSIZE_PACK32)) {
                packInt(key);
                packInt(value);
        }
 }
 
-void Buffer::event64(const int64_t key, const int64_t value) {
-       if (checkSpace(2 * MAXSIZE_PACK64)) {
-               packInt64(key);
+void Buffer::event64(const int key, const int64_t value) {
+       if (checkSpace(MAXSIZE_PACK64 + MAXSIZE_PACK32)) {
+               packInt(key);
                packInt64(value);
        }
 }
 
-void Buffer::pea(const struct perf_event_attr *const pea, int key) {
-       if (checkSpace(2 * MAXSIZE_PACK32 + pea->size)) {
-               packInt(CODE_PEA);
-               writeBytes(pea, pea->size);
-               packInt(key);
-       } else {
-               logg->logError(__FILE__, __LINE__, "Ran out of buffer space for perf attrs");
-               handleException();
+void Buffer::pea(const uint64_t currTime, const struct perf_event_attr *const pea, int key) {
+       while (!checkSpace(2 * MAXSIZE_PACK32 + pea->size)) {
+               sem_wait(&mWriterSem);
        }
-       // Don't know the real perf time so use 1 as it will work for now
-       check(1);
+       packInt(CODE_PEA);
+       writeBytes(pea, pea->size);
+       packInt(key);
+       check(currTime);
 }
 
-void Buffer::keys(const int count, const __u64 *const ids, const int *const keys) {
-       if (checkSpace(2 * MAXSIZE_PACK32 + count * (MAXSIZE_PACK32 + MAXSIZE_PACK64))) {
-               packInt(CODE_KEYS);
-               packInt(count);
-               for (int i = 0; i < count; ++i) {
-                       packInt64(ids[i]);
-                       packInt(keys[i]);
-               }
-       } else {
-               logg->logError(__FILE__, __LINE__, "Ran out of buffer space for perf attrs");
-               handleException();
+void Buffer::keys(const uint64_t currTime, const int count, const __u64 *const ids, const int *const keys) {
+       while (!checkSpace(2 * MAXSIZE_PACK32 + count * (MAXSIZE_PACK32 + MAXSIZE_PACK64))) {
+               sem_wait(&mWriterSem);
        }
-       check(1);
+       packInt(CODE_KEYS);
+       packInt(count);
+       for (int i = 0; i < count; ++i) {
+               packInt64(ids[i]);
+               packInt(keys[i]);
+       }
+       check(currTime);
 }
 
-void Buffer::format(const int length, const char *const format) {
-       if (checkSpace(MAXSIZE_PACK32 + length + 1)) {
-               packInt(CODE_FORMAT);
-               writeBytes(format, length + 1);
-       } else {
-               logg->logError(__FILE__, __LINE__, "Ran out of buffer space for perf attrs");
-               handleException();
+void Buffer::keysOld(const uint64_t currTime, const int keyCount, const int *const keys, const int bytes, const char *const buf) {
+       while (!checkSpace((2 + keyCount) * MAXSIZE_PACK32 + bytes)) {
+               sem_wait(&mWriterSem);
+       }
+       packInt(CODE_KEYS_OLD);
+       packInt(keyCount);
+       for (int i = 0; i < keyCount; ++i) {
+               packInt(keys[i]);
+       }
+       writeBytes(buf, bytes);
+       check(currTime);
+}
+
+void Buffer::format(const uint64_t currTime, const int length, const char *const format) {
+       while (!checkSpace(MAXSIZE_PACK32 + length + 1)) {
+               sem_wait(&mWriterSem);
        }
-       check(1);
+       packInt(CODE_FORMAT);
+       writeBytes(format, length + 1);
+       check(currTime);
 }
 
-void Buffer::maps(const int pid, const int tid, const char *const maps) {
+void Buffer::maps(const uint64_t currTime, const int pid, const int tid, const char *const maps) {
        const int mapsLen = strlen(maps) + 1;
-       if (checkSpace(3 * MAXSIZE_PACK32 + mapsLen)) {
-               packInt(CODE_MAPS);
-               packInt(pid);
-               packInt(tid);
-               writeBytes(maps, mapsLen);
-       } else {
-               logg->logError(__FILE__, __LINE__, "Ran out of buffer space for perf attrs");
-               handleException();
+       while (!checkSpace(3 * MAXSIZE_PACK32 + mapsLen)) {
+               sem_wait(&mWriterSem);
        }
-       check(1);
+       packInt(CODE_MAPS);
+       packInt(pid);
+       packInt(tid);
+       writeBytes(maps, mapsLen);
+       check(currTime);
 }
 
-void Buffer::comm(const int pid, const int tid, const char *const image, const char *const comm) {
+void Buffer::comm(const uint64_t currTime, const int pid, const int tid, const char *const image, const char *const comm) {
        const int imageLen = strlen(image) + 1;
        const int commLen = strlen(comm) + 1;
-       if (checkSpace(3 * MAXSIZE_PACK32 + imageLen + commLen)) {
-               packInt(CODE_COMM);
-               packInt(pid);
-               packInt(tid);
-               writeBytes(image, imageLen);
-               writeBytes(comm, commLen);
-       } else {
-               logg->logError(__FILE__, __LINE__, "Ran out of buffer space for perf attrs");
-               handleException();
+       while (!checkSpace(3 * MAXSIZE_PACK32 + imageLen + commLen)) {
+               sem_wait(&mWriterSem);
+       }
+       packInt(CODE_COMM);
+       packInt(pid);
+       packInt(tid);
+       writeBytes(image, imageLen);
+       writeBytes(comm, commLen);
+       check(currTime);
+}
+
+void Buffer::onlineCPU(const uint64_t currTime, const uint64_t time, const int cpu) {
+       while (!checkSpace(MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
+               sem_wait(&mWriterSem);
+       }
+       packInt(CODE_ONLINE_CPU);
+       packInt64(time);
+       packInt(cpu);
+       check(currTime);
+}
+
+void Buffer::offlineCPU(const uint64_t currTime, const uint64_t time, const int cpu) {
+       while (!checkSpace(MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
+               sem_wait(&mWriterSem);
+       }
+       packInt(CODE_OFFLINE_CPU);
+       packInt64(time);
+       packInt(cpu);
+       check(currTime);
+}
+
+void Buffer::kallsyms(const uint64_t currTime, const char *const kallsyms) {
+       const int kallsymsLen = strlen(kallsyms) + 1;
+       while (!checkSpace(3 * MAXSIZE_PACK32 + kallsymsLen)) {
+               sem_wait(&mWriterSem);
        }
-       check(1);
+       packInt(CODE_KALLSYMS);
+       writeBytes(kallsyms, kallsymsLen);
+       check(currTime);
 }
 
 void Buffer::setDone() {
index 50237771860c184d1c03b8c2a2687cadd8aed9d9..6cffd8e39a36748d9b2a447bb46ebaff926b662c 100644 (file)
@@ -39,24 +39,26 @@ public:
        void commit(const uint64_t time);
        void check(const uint64_t time);
 
-       void frame();
-
        // Summary messages
-       void summary(const int64_t timestamp, const int64_t uptime, const int64_t monotonicDelta, const char *const uname);
-       void coreName(const int core, const int cpuid, const char *const name);
+       void summary(const uint64_t currTime, const int64_t timestamp, const int64_t uptime, const int64_t monotonicDelta, const char *const uname);
+       void coreName(const uint64_t currTime, const int core, const int cpuid, const char *const name);
 
        // Block Counter messages
        bool eventHeader(uint64_t curr_time);
        bool eventTid(int tid);
-       void event(int32_t key, int32_t value);
-       void event64(int64_t key, int64_t value);
+       void event(int key, int32_t value);
+       void event64(int key, int64_t value);
 
        // Perf Attrs messages
-       void pea(const struct perf_event_attr *const pea, int key);
-       void keys(const int count, const __u64 *const ids, const int *const keys);
-       void format(const int length, const char *const format);
-       void maps(const int pid, const int tid, const char *const maps);
-       void comm(const int pid, const int tid, const char *const image, const char *const comm);
+       void pea(const uint64_t currTime, const struct perf_event_attr *const pea, int key);
+       void keys(const uint64_t currTime, const int count, const __u64 *const ids, const int *const keys);
+       void keysOld(const uint64_t currTime, const int keyCount, const int *const keys, const int bytes, const char *const buf);
+       void format(const uint64_t currTime, const int length, const char *const format);
+       void maps(const uint64_t currTime, const int pid, const int tid, const char *const maps);
+       void comm(const uint64_t currTime, const int pid, const int tid, const char *const image, const char *const comm);
+       void onlineCPU(const uint64_t currTime, const uint64_t time, const int cpu);
+       void offlineCPU(const uint64_t currTime, const uint64_t time, const int cpu);
+       void kallsyms(const uint64_t currTime, const char *const kallsyms);
 
        void setDone();
        bool isDone() const;
@@ -64,6 +66,12 @@ public:
        // Prefer a new member to using these functions if possible
        char *getWritePos() { return mBuf + mWritePos; }
        void advanceWrite(int bytes) { mWritePos = (mWritePos + bytes) & /*mask*/(mSize - 1); }
+       static void packInt(char *const buf, const int size, int &writePos, int32_t x);
+       void packInt(int32_t x);
+       static void packInt64(char *const buf, const int size, int &writePos, int64_t x);
+       void packInt64(int64_t x);
+       void writeBytes(const void *const data, size_t count);
+       void writeString(const char *const str);
 
        static void writeLEInt(unsigned char *buf, int v) {
                buf[0] = (v >> 0) & 0xFF;
@@ -73,25 +81,22 @@ public:
        }
 
 private:
+       void frame();
        bool commitReady() const;
        bool checkSpace(int bytes);
 
-       void packInt(int32_t x);
-       void packInt64(int64_t x);
-       void writeBytes(const void *const data, size_t count);
-       void writeString(const char *const str);
-
-       const int32_t mCore;
-       const int32_t mBufType;
+       char *const mBuf;
+       sem_t *const mReaderSem;
+       uint64_t mCommitTime;
+       sem_t mWriterSem;
        const int mSize;
        int mReadPos;
        int mWritePos;
        int mCommitPos;
        bool mAvailable;
        bool mIsDone;
-       char *const mBuf;
-       uint64_t mCommitTime;
-       sem_t *const mReaderSem;
+       const int32_t mCore;
+       const int32_t mBufType;
 
        // Intentionally unimplemented
        Buffer(const Buffer &);
diff --git a/tools/gator/daemon/CCNDriver.cpp b/tools/gator/daemon/CCNDriver.cpp
new file mode 100644 (file)
index 0000000..dd1a2b1
--- /dev/null
@@ -0,0 +1,295 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "CCNDriver.h"
+
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "k/perf_event.h"
+
+#include "Config.h"
+#include "DriverSource.h"
+#include "Logging.h"
+
+static const char TAG_CATEGORY[] = "category";
+static const char TAG_COUNTER_SET[] = "counter_set";
+static const char TAG_EVENT[] = "event";
+static const char TAG_OPTION[] = "option";
+static const char TAG_OPTION_SET[] = "option_set";
+
+static const char ATTR_AVERAGE_SELECTION[] = "average_selection";
+static const char ATTR_COUNTER[] = "counter";
+static const char ATTR_COUNTER_SET[] = "counter_set";
+static const char ATTR_COUNT[] = "count";
+static const char ATTR_DESCRIPTION[] = "description";
+static const char ATTR_DISPLAY[] = "display";
+static const char ATTR_EVENT[] = "event";
+static const char ATTR_EVENT_DELTA[] = "event_delta";
+static const char ATTR_NAME[] = "name";
+static const char ATTR_OPTION_SET[] = "option_set";
+static const char ATTR_TITLE[] = "title";
+static const char ATTR_UNITS[] = "units";
+
+static const char XP_REGION[] = "XP_Region";
+static const char HNF_REGION[] = "HN-F_Region";
+static const char RNI_REGION[] = "RN-I_Region";
+static const char SBAS_REGION[] = "SBAS_Region";
+static const char CCN_5XX[] = "CCN-5xx";
+#define ARM_CCN_5XX "ARM_CCN_5XX_"
+
+static const char *const VC_TYPES[] = { "REQ", "RSP", "SNP", "DAT" };
+static const char *const XP_EVENT_NAMES[] = { NULL, "H-bit", "S-bit", "P-Cnt", "TknV" };
+static const char *const XP_EVENT_DESCRIPTIONS[] = { NULL, "Set H-bit, signaled when this XP sets the H-bit.", "Set S-bit, signaled when this XP sets the S-bit.", "Set P-Cnt, signaled when this XP sets the P-Cnt. This is not applicable for the SNP VC.", "No TknV, signaled when this XP transmits a valid packet." };
+static const char *const HNF_EVENT_NAMES[] = { NULL, "Cache Miss", "L3 SF Cache Access", "Cache Fill", "POCQ Retry", "POCQ Reqs Recvd", "SF Hit", "SF Evictions", "Snoops Sent", "Snoops Broadcast", "L3 Eviction", "L3 Fill Invalid Way", "MC Retries", "MC Reqs", "QOS HH Retry" };
+static const char *const HNF_EVENT_DESCRIPTIONS[] = { NULL, "Counts the total cache misses. This is the first time lookup result, and is high priority.", "Counts the number of cache accesses. This is the first time access, and is high priority.", "Counts the total allocations in the HN L3 cache, and all cache line allocations to the L3 cache.", "Counts the number of requests that have been retried.", "Counts the number of requests received by HN.", "Counts the number of snoop filter hits.", "Counts the number of snoop filter evictions. Cache invalidations are initiated.", "Counts the number of snoops sent. Does not differentiate between broadcast or directed snoops.", "Counts the number of snoop broadcasts sent.", "Counts the number of L3 evictions.", "Counts the number of L3 fills to an invalid way.", "Counts the number of transactions retried by the memory controller.", "Counts the number of requests to the memory controller.", "Counts the number of times a highest-priority QoS class was retried at the HN-F." };
+static const char *const RNI_EVENT_NAMES[] = { NULL, "S0 RDataBeats", "S1 RDataBeats", "S2 RDataBeats", "RXDAT Flits received", "TXDAT Flits sent", "Total TXREQ Flits sent", "Retried TXREQ Flits sent", "RRT full", "WRT full", "Replayed TXREQ Flits" };
+static const char *const RNI_EVENT_DESCRIPTIONS[] = { NULL, "S0 RDataBeats.", "S1 RDataBeats.", "S2 RDataBeats.", "RXDAT Flits received.", "TXDAT Flits sent.", "Total TXREQ Flits sent.", "Retried TXREQ Flits sent.", "RRT full.", "WRT full.", "Replayed TXREQ Flits." };
+static const char *const SBAS_EVENT_NAMES[] = { NULL, "S0 RDataBeats", NULL, NULL, "RXDAT Flits received", "TXDAT Flits sent", "Total TXREQ Flits sent", "Retried TXREQ Flits sent", "RRT full", "WRT full", "Replayed TXREQ Flits" };
+static const char *const SBAS_EVENT_DESCRIPTIONS[] = { NULL, "S0 RDataBeats.", NULL, NULL, "RXDAT Flits received.", "TXDAT Flits sent.", "Total TXREQ Flits sent.", "Retried TXREQ Flits sent.", "RRT full.", "WRT full.", "Replayed TXREQ Flits." };
+
+// This class is used only to poll for CCN-5xx configuration and emit events XML for it. All other operations are handled by PerfDriver
+
+static int sys_perf_event_open(struct perf_event_attr *const attr, const pid_t pid, const int cpu, const int group_fd, const unsigned long flags) {
+       return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
+}
+
+static unsigned int getConfig(unsigned int node, unsigned int type, unsigned int event, unsigned int port, unsigned int vc) {
+  return
+    ((node  & 0xff) <<  0) |
+    ((type  & 0xff) <<  8) |
+    ((event & 0xff) << 16) |
+    ((port  & 0x03) << 24) |
+    ((vc    & 0x07) << 26) |
+    0;
+}
+
+static bool perfPoll(struct perf_event_attr *const pea) {
+       int fd = sys_perf_event_open(pea, -1, 0, -1, 0);
+       if (fd < 0) {
+               return false;
+       }
+       close(fd);
+       return true;
+}
+
+CCNDriver::CCNDriver() : mNodeTypes(NULL), mXpCount(0) {
+}
+
+CCNDriver::~CCNDriver() {
+       delete mNodeTypes;
+}
+
+bool CCNDriver::claimCounter(const Counter &) const {
+       // Handled by PerfDriver
+       return false;
+}
+
+void CCNDriver::resetCounters() {
+       // Handled by PerfDriver
+}
+
+void CCNDriver::setupCounter(Counter &) {
+       // Handled by PerfDriver
+}
+
+void CCNDriver::readEvents(mxml_node_t *const) {
+       struct stat st;
+       if (stat("/sys/bus/event_source/devices/ccn", &st) != 0) {
+               // Not found
+               return;
+       }
+
+       int type;
+       if (DriverSource::readIntDriver("/sys/bus/event_source/devices/ccn/type", &type) != 0) {
+               logg->logError(__FILE__, __LINE__, "Unable to read CCN-5xx type");
+               handleException();
+       }
+
+       // Detect number of xps
+       struct perf_event_attr pea;
+       memset(&pea, 0, sizeof(pea));
+       pea.type = type;
+       pea.size = sizeof(pea);
+
+       mXpCount = 1;
+       while (true) {
+               pea.config = getConfig(0, 0x08, 1, 0, 1) | mXpCount;
+               if (!perfPoll(&pea)) {
+                       break;
+               }
+               mXpCount *= 2;
+       };
+       {
+               int lower = mXpCount/2 + 1;
+               while (lower < mXpCount) {
+                       int mid = (lower + mXpCount)/2;
+                       pea.config = getConfig(0, 0x08, 1, 0, 1) | mid;
+                       if (perfPoll(&pea)) {
+                               lower = mid + 1;
+                       } else {
+                               mXpCount = mid;
+                       }
+               }
+       }
+
+       mNodeTypes = new NodeType[2*mXpCount];
+
+       // Detect node types
+       for (int i = 0; i < 2*mXpCount; ++i) {
+               pea.config = getConfig(0, 0x04, 1, 0, 0) | i;
+               if (perfPoll(&pea)) {
+                       mNodeTypes[i] = NT_HNF;
+                       continue;
+               }
+
+               pea.config = getConfig(0, 0x16, 1, 0, 0) | i;
+               if (perfPoll(&pea)) {
+                       mNodeTypes[i] = NT_RNI;
+                       continue;
+               }
+
+               pea.config = getConfig(0, 0x10, 1, 0, 0) | i;
+               if (perfPoll(&pea)) {
+                       mNodeTypes[i] = NT_SBAS;
+                       continue;
+               }
+
+               mNodeTypes[i] = NT_UNKNOWN;
+       }
+}
+
+int CCNDriver::writeCounters(mxml_node_t *const) const {
+       // Handled by PerfDriver
+       return 0;
+}
+
+void CCNDriver::writeEvents(mxml_node_t *const root) const {
+       mxml_node_t *const counter_set = mxmlNewElement(root, TAG_COUNTER_SET);
+       mxmlElementSetAttr(counter_set, ATTR_NAME, ARM_CCN_5XX "cnt");
+       mxmlElementSetAttr(counter_set, ATTR_COUNT, "8");
+
+       mxml_node_t *const category = mxmlNewElement(root, TAG_CATEGORY);
+       mxmlElementSetAttr(category, ATTR_NAME, CCN_5XX);
+       mxmlElementSetAttr(category, TAG_COUNTER_SET, ARM_CCN_5XX "cnt");
+
+       mxml_node_t *const clock_event = mxmlNewElement(category, TAG_EVENT);
+       mxmlElementSetAttr(clock_event, ATTR_COUNTER, ARM_CCN_5XX "ccnt");
+       mxmlElementSetAttr(clock_event, ATTR_EVENT, "0xff00");
+       mxmlElementSetAttr(clock_event, ATTR_TITLE, "CCN-5xx Clock");
+       mxmlElementSetAttr(clock_event, ATTR_NAME, "Cycles");
+       mxmlElementSetAttr(clock_event, ATTR_DISPLAY, "hertz");
+       mxmlElementSetAttr(clock_event, ATTR_UNITS, "Hz");
+       mxmlElementSetAttr(clock_event, ATTR_AVERAGE_SELECTION, "yes");
+       mxmlElementSetAttr(clock_event, ATTR_DESCRIPTION, "The number of core clock cycles");
+
+       mxml_node_t *const xp_option_set = mxmlNewElement(category, TAG_OPTION_SET);
+       mxmlElementSetAttr(xp_option_set, ATTR_NAME, XP_REGION);
+
+       for (int i = 0; i < mXpCount; ++i) {
+               mxml_node_t *const option = mxmlNewElement(xp_option_set, TAG_OPTION);
+               mxmlElementSetAttrf(option, ATTR_EVENT_DELTA, "0x%x", getConfig(i, 0, 0, 0, 0));
+               mxmlElementSetAttrf(option, ATTR_NAME, "XP %i", i);
+               mxmlElementSetAttrf(option, ATTR_DESCRIPTION, "Crosspoint %i", i);
+       }
+
+       for (int vc = 0; vc < ARRAY_LENGTH(VC_TYPES); ++vc) {
+               if (VC_TYPES[vc] == NULL) {
+                       continue;
+               }
+               for (int bus = 0; bus < 2; ++bus) {
+                       for (int eventId = 0; eventId < ARRAY_LENGTH(XP_EVENT_NAMES); ++eventId) {
+                               if (XP_EVENT_NAMES[eventId] == NULL) {
+                                       continue;
+                               }
+                               mxml_node_t *const event = mxmlNewElement(category, TAG_EVENT);
+                               mxmlElementSetAttrf(event, ATTR_EVENT, "0x%x", getConfig(0, 0x08, eventId, bus, vc));
+                               mxmlElementSetAttr(event, ATTR_OPTION_SET, XP_REGION);
+                               mxmlElementSetAttr(event, ATTR_TITLE, CCN_5XX);
+                               mxmlElementSetAttrf(event, ATTR_NAME, "Bus %i: %s: %s", bus, VC_TYPES[vc], XP_EVENT_NAMES[eventId]);
+                               mxmlElementSetAttrf(event, ATTR_DESCRIPTION, "Bus %i: %s: %s", bus, VC_TYPES[vc], XP_EVENT_DESCRIPTIONS[eventId]);
+                       }
+               }
+       }
+
+       mxml_node_t *const hnf_option_set = mxmlNewElement(category, TAG_OPTION_SET);
+       mxmlElementSetAttr(hnf_option_set, ATTR_NAME, HNF_REGION);
+
+       for (int eventId = 0; eventId < ARRAY_LENGTH(HNF_EVENT_NAMES); ++eventId) {
+               if (HNF_EVENT_NAMES[eventId] == NULL) {
+                       continue;
+               }
+               mxml_node_t *const event = mxmlNewElement(category, TAG_EVENT);
+               mxmlElementSetAttrf(event, ATTR_EVENT, "0x%x", getConfig(0, 0x04, eventId, 0, 0));
+               mxmlElementSetAttr(event, ATTR_OPTION_SET, HNF_REGION);
+               mxmlElementSetAttr(event, ATTR_TITLE, CCN_5XX);
+               mxmlElementSetAttr(event, ATTR_NAME, HNF_EVENT_NAMES[eventId]);
+               mxmlElementSetAttr(event, ATTR_DESCRIPTION, HNF_EVENT_DESCRIPTIONS[eventId]);
+       }
+
+       mxml_node_t *const rni_option_set = mxmlNewElement(category, TAG_OPTION_SET);
+       mxmlElementSetAttr(rni_option_set, ATTR_NAME, RNI_REGION);
+
+       for (int eventId = 0; eventId < ARRAY_LENGTH(RNI_EVENT_NAMES); ++eventId) {
+               if (RNI_EVENT_NAMES[eventId] == NULL) {
+                       continue;
+               }
+               mxml_node_t *const event = mxmlNewElement(category, TAG_EVENT);
+               mxmlElementSetAttrf(event, ATTR_EVENT, "0x%x", getConfig(0, 0x16, eventId, 0, 0));
+               mxmlElementSetAttr(event, ATTR_OPTION_SET, RNI_REGION);
+               mxmlElementSetAttr(event, ATTR_TITLE, CCN_5XX);
+               mxmlElementSetAttr(event, ATTR_NAME, RNI_EVENT_NAMES[eventId]);
+               mxmlElementSetAttr(event, ATTR_DESCRIPTION, RNI_EVENT_DESCRIPTIONS[eventId]);
+       }
+
+       mxml_node_t *const sbas_option_set = mxmlNewElement(category, TAG_OPTION_SET);
+       mxmlElementSetAttr(sbas_option_set, ATTR_NAME, SBAS_REGION);
+
+       for (int eventId = 0; eventId < ARRAY_LENGTH(SBAS_EVENT_NAMES); ++eventId) {
+               if (SBAS_EVENT_NAMES[eventId] == NULL) {
+                       continue;
+               }
+               mxml_node_t *const event = mxmlNewElement(category, TAG_EVENT);
+               mxmlElementSetAttrf(event, ATTR_EVENT, "0x%x", getConfig(0, 0x10, eventId, 0, 0));
+               mxmlElementSetAttr(event, ATTR_OPTION_SET, SBAS_REGION);
+               mxmlElementSetAttr(event, ATTR_TITLE, CCN_5XX);
+               mxmlElementSetAttr(event, ATTR_NAME, SBAS_EVENT_NAMES[eventId]);
+               mxmlElementSetAttr(event, ATTR_DESCRIPTION, SBAS_EVENT_DESCRIPTIONS[eventId]);
+       }
+
+       for (int i = 0; i < 2*mXpCount; ++i) {
+               switch (mNodeTypes[i]) {
+               case NT_HNF: {
+                       mxml_node_t *const option = mxmlNewElement(hnf_option_set, TAG_OPTION);
+                       mxmlElementSetAttrf(option, ATTR_EVENT_DELTA, "0x%x", getConfig(i, 0, 0, 0, 0));
+                       mxmlElementSetAttrf(option, ATTR_NAME, "HN-F %i", i);
+                       mxmlElementSetAttrf(option, ATTR_DESCRIPTION, "Fully-coherent Home Node %i", i);
+                       break;
+               }
+               case NT_RNI: {
+                       mxml_node_t *const option = mxmlNewElement(rni_option_set, TAG_OPTION);
+                       mxmlElementSetAttrf(option, ATTR_EVENT_DELTA, "0x%x", getConfig(i, 0, 0, 0, 0));
+                       mxmlElementSetAttrf(option, ATTR_NAME, "RN-I %i", i);
+                       mxmlElementSetAttrf(option, ATTR_DESCRIPTION, "I/O-coherent Requesting Node %i", i);
+                       break;
+               }
+               case NT_SBAS: {
+                       mxml_node_t *const option = mxmlNewElement(sbas_option_set, TAG_OPTION);
+                       mxmlElementSetAttrf(option, ATTR_EVENT_DELTA, "0x%x", getConfig(i, 0, 0, 0, 0));
+                       mxmlElementSetAttrf(option, ATTR_NAME, "SBAS %i", i);
+                       mxmlElementSetAttrf(option, ATTR_DESCRIPTION, "ACE master to CHI protocol bridge %i", i);
+                       break;
+               }
+               default:
+                       continue;
+               }
+       }
+}
diff --git a/tools/gator/daemon/CCNDriver.h b/tools/gator/daemon/CCNDriver.h
new file mode 100644 (file)
index 0000000..fb4c717
--- /dev/null
@@ -0,0 +1,43 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CCNDRIVER_H
+#define CCNDRIVER_H
+
+#include "Driver.h"
+
+class CCNDriver : public Driver {
+public:
+       CCNDriver();
+       ~CCNDriver();
+
+       bool claimCounter(const Counter &counter) const;
+       void resetCounters();
+       void setupCounter(Counter &counter);
+
+       void readEvents(mxml_node_t *const);
+       int writeCounters(mxml_node_t *const root) const;
+       void writeEvents(mxml_node_t *const) const;
+
+private:
+       enum NodeType {
+               NT_UNKNOWN,
+               NT_HNF,
+               NT_RNI,
+               NT_SBAS,
+       };
+
+       NodeType *mNodeTypes;
+       int mXpCount;
+
+       // Intentionally unimplemented
+       CCNDriver(const CCNDriver &);
+       CCNDriver &operator=(const CCNDriver &);
+};
+
+#endif // CCNDRIVER_H
diff --git a/tools/gator/daemon/CPUFreqDriver.cpp b/tools/gator/daemon/CPUFreqDriver.cpp
new file mode 100644 (file)
index 0000000..41f9d6f
--- /dev/null
@@ -0,0 +1,58 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "CPUFreqDriver.h"
+
+#include "Buffer.h"
+#include "DriverSource.h"
+#include "Logging.h"
+#include "SessionData.h"
+
+CPUFreqDriver::CPUFreqDriver() : mPrev() {
+}
+
+CPUFreqDriver::~CPUFreqDriver() {
+}
+
+void CPUFreqDriver::readEvents(mxml_node_t *const) {
+       // Only for use with perf
+       if (!gSessionData->perf.isSetup()) {
+               return;
+       }
+
+       setCounters(new DriverCounter(getCounters(), strdup("Linux_power_cpu_freq")));
+}
+
+void CPUFreqDriver::read(Buffer *const buffer) {
+       char buf[64];
+       const DriverCounter *const counter = getCounters();
+       if ((counter == NULL) || !counter->isEnabled()) {
+               return;
+       }
+
+       const int key = getCounters()->getKey();
+       bool resetCores = false;
+       for (int i = 0; i < gSessionData->mCores; ++i) {
+               snprintf(buf, sizeof(buf), "/sys/devices/system/cpu/cpu%i/cpufreq/cpuinfo_cur_freq", i);
+               int64_t freq;
+               if (DriverSource::readInt64Driver(buf, &freq) != 0) {
+                       freq = 0;
+               }
+               if (mPrev[i] != freq) {
+                       mPrev[i] = freq;
+                       // Change cores
+                       buffer->event64(2, i);
+                       resetCores = true;
+                       buffer->event64(key, 1000*freq);
+               }
+       }
+       if (resetCores) {
+               // Revert cores, UserSpaceSource is all on core 0
+               buffer->event64(2, 0);
+       }
+}
diff --git a/tools/gator/daemon/CPUFreqDriver.h b/tools/gator/daemon/CPUFreqDriver.h
new file mode 100644 (file)
index 0000000..ad8c9aa
--- /dev/null
@@ -0,0 +1,34 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CPUFREQDRIVER_H
+#define CPUFREQDRIVER_H
+
+#include "Config.h"
+#include "Driver.h"
+
+class CPUFreqDriver : public PolledDriver {
+private:
+       typedef PolledDriver super;
+
+public:
+       CPUFreqDriver();
+       ~CPUFreqDriver();
+
+       void readEvents(mxml_node_t *const root);
+       void read(Buffer *const buffer);
+
+private:
+       int64_t mPrev[NR_CPUS];
+
+       // Intentionally unimplemented
+       CPUFreqDriver(const CPUFreqDriver &);
+       CPUFreqDriver &operator=(const CPUFreqDriver &);
+};
+
+#endif // CPUFREQDRIVER_H
index cf79b72a1166966a2e1f71d0aff8a26a7befa59a..0b5802c893bbbfe10573041bfe26a1141ed6e8a4 100644 (file)
@@ -33,7 +33,8 @@ mxml_node_t* CapturedXML::getTree(bool includeTime) {
        captured = mxmlNewElement(xml, "captured");
        mxmlElementSetAttr(captured, "version", "1");
        if (gSessionData->perf.isSetup()) {
-         mxmlElementSetAttr(captured, "type", "Perf");
+               mxmlElementSetAttr(captured, "type", "Perf");
+               mxmlElementSetAttr(captured, "perf_beta", "yes");
        }
        mxmlElementSetAttrf(captured, "protocol", "%d", PROTOCOL_VERSION);
        if (includeTime) { // Send the following only after the capture is complete
@@ -66,10 +67,15 @@ mxml_node_t* CapturedXML::getTree(bool includeTime) {
                        mxml_node_t *const node = mxmlNewElement(counters, "counter");
                        mxmlElementSetAttrf(node, "key", "0x%x", counter.getKey());
                        mxmlElementSetAttr(node, "type", counter.getType());
-                       mxmlElementSetAttrf(node, "event", "0x%x", counter.getEvent());
+                       if (counter.getEvent() != -1) {
+                               mxmlElementSetAttrf(node, "event", "0x%x", counter.getEvent());
+                       }
                        if (counter.getCount() > 0) {
                                mxmlElementSetAttrf(node, "count", "%d", counter.getCount());
                        }
+                       if (counter.getCores() > 0) {
+                               mxmlElementSetAttrf(node, "cores", "%d", counter.getCores());
+                       }
                }
        }
 
@@ -89,7 +95,7 @@ void CapturedXML::write(char* path) {
 
        // Set full path
        snprintf(file, PATH_MAX, "%s/captured.xml", path);
-       
+
        char* xml = getXML(true);
        if (util->writeToDisk(file, xml) < 0) {
                logg->logError(__FILE__, __LINE__, "Error writing %s\nPlease verify the path.", file);
@@ -108,32 +114,32 @@ const char * mxmlWhitespaceCB(mxml_node_t *node, int loc) {
        if (loc == MXML_WS_BEFORE_OPEN) {
                // Single indentation
                if (!strcmp(name, "target") || !strcmp(name, "counters"))
-                       return("\n  ");
+                       return "\n  ";
 
                // Double indentation
                if (!strcmp(name, "counter"))
-                       return("\n    ");
+                       return "\n    ";
 
                // Avoid a carriage return on the first line of the xml file
                if (!strncmp(name, "?xml", 4))
-                       return(NULL);
+                       return NULL;
 
                // Default - no indentation
-               return("\n");
+               return "\n";
        }
 
        if (loc == MXML_WS_BEFORE_CLOSE) {
                // No indentation
                if (!strcmp(name, "captured"))
-                       return("\n");
+                       return "\n";
 
                // Single indentation
                if (!strcmp(name, "counters"))
-                       return("\n  ");
+                       return "\n  ";
 
                // Default - no carriage return
-               return(NULL);
+               return NULL;
        }
 
-       return(NULL);
+       return NULL;
 }
index efc1e52bdba33519fb83f8a0af02a8b835995ff5..b704f6e53bb593bcca46326b4a87b1525e4bc04c 100644 (file)
@@ -6,8 +6,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef        __CAPTURED_XML_H__
-#define        __CAPTURED_XML_H__
+#ifndef __CAPTURED_XML_H__
+#define __CAPTURED_XML_H__
 
 #include "mxml/mxml.h"
 
@@ -23,4 +23,4 @@ private:
 
 const char * mxmlWhitespaceCB(mxml_node_t *node, int where);
 
-#endif         //__CAPTURED_XML_H__
+#endif //__CAPTURED_XML_H__
index ca33561ffdca6d21dbacb4af3b8d4d23d252c5fc..6b5bbb3bf6afb8c727809b56313de32080ae0a69 100644 (file)
 #include <unistd.h>
 #include <sys/prctl.h>
 
-#include "Logging.h"
 #include "CapturedXML.h"
-#include "SessionData.h"
-#include "LocalCapture.h"
-#include "Sender.h"
-#include "OlyUtility.h"
-#include "OlySocket.h"
-#include "StreamlineSetup.h"
+#include "Command.h"
 #include "ConfigurationXML.h"
 #include "Driver.h"
-#include "PerfSource.h"
 #include "DriverSource.h"
-#include "UserSpaceSource.h"
 #include "ExternalSource.h"
+#include "FtraceSource.h"
+#include "LocalCapture.h"
+#include "Logging.h"
+#include "OlySocket.h"
+#include "OlyUtility.h"
+#include "PerfSource.h"
+#include "Sender.h"
+#include "SessionData.h"
+#include "StreamlineSetup.h"
+#include "UserSpaceSource.h"
 
 static sem_t haltPipeline, senderThreadStarted, startProfile, senderSem; // Shared by Child and spawned threads
 static Source *primarySource = NULL;
-static Source *userSpaceSource = NULL;
 static Source *externalSource = NULL;
+static Source *userSpaceSource = NULL;
+static Source *ftraceSource = NULL;
 static Sender* sender = NULL;        // Shared by Child.cpp and spawned threads
 Child* child = NULL;                 // shared by Child.cpp and main.cpp
 
@@ -147,15 +150,19 @@ static void *senderThread(void *) {
        prctl(PR_SET_NAME, (unsigned long)&"gatord-sender", 0, 0, 0);
        sem_wait(&haltPipeline);
 
-       while (!primarySource->isDone() || (userSpaceSource != NULL && !userSpaceSource->isDone()) || (externalSource != NULL && !externalSource->isDone())) {
+       while (!primarySource->isDone() ||
+              !externalSource->isDone() ||
+              (userSpaceSource != NULL && !userSpaceSource->isDone()) ||
+              (ftraceSource != NULL && !ftraceSource->isDone())) {
                sem_wait(&senderSem);
 
                primarySource->write(sender);
+               externalSource->write(sender);
                if (userSpaceSource != NULL) {
                        userSpaceSource->write(sender);
                }
-               if (externalSource != NULL) {
-                       externalSource->write(sender);
+               if (ftraceSource != NULL) {
+                       ftraceSource->write(sender);
                }
        }
 
@@ -202,6 +209,13 @@ void Child::initialization() {
 void Child::endSession() {
        gSessionData->mSessionIsActive = false;
        primarySource->interrupt();
+       externalSource->interrupt();
+       if (userSpaceSource != NULL) {
+               userSpaceSource->interrupt();
+       }
+       if (ftraceSource != NULL) {
+               ftraceSource->interrupt();
+       }
        sem_post(&haltPipeline);
 }
 
@@ -227,9 +241,9 @@ void Child::run() {
 
        // Set up the driver; must be done after gSessionData->mPerfCounterType[] is populated
        if (!gSessionData->perf.isSetup()) {
-         primarySource = new DriverSource(&senderSem, &startProfile);
+               primarySource = new DriverSource(&senderSem, &startProfile);
        } else {
-         primarySource = new PerfSource(&senderSem, &startProfile);
+               primarySource = new PerfSource(&senderSem, &startProfile);
        }
 
        // Initialize all drivers
@@ -265,40 +279,71 @@ void Child::run() {
                free(xmlString);
        }
 
+       if (gSessionData->kmod.isMaliCapture() && (gSessionData->mSampleRate == 0)) {
+               logg->logError(__FILE__, __LINE__, "Mali counters are not supported with Sample Rate: None.");
+               handleException();
+       }
+
        // Must be after session XML is parsed
        if (!primarySource->prepare()) {
-               logg->logError(__FILE__, __LINE__, "Unable to prepare for capture");
+               if (gSessionData->perf.isSetup()) {
+                       logg->logError(__FILE__, __LINE__, "Unable to prepare gator driver for capture");
+               } else {
+                       logg->logError(__FILE__, __LINE__, "Unable to communicate with the perf API, please ensure that CONFIG_TRACING and CONFIG_CONTEXT_SWITCH_TRACER are enabled. Please refer to README_Streamline.txt for more information.");
+               }
                handleException();
        }
 
        // Sender thread shall be halted until it is signaled for one shot mode
        sem_init(&haltPipeline, 0, gSessionData->mOneShot ? 0 : 2);
 
+       // Must be initialized before senderThread is started as senderThread checks externalSource
+       externalSource = new ExternalSource(&senderSem);
+       if (!externalSource->prepare()) {
+               logg->logError(__FILE__, __LINE__, "Unable to prepare external source for capture");
+               handleException();
+       }
+       externalSource->start();
+
        // Create the duration, stop, and sender threads
        bool thread_creation_success = true;
        if (gSessionData->mDuration > 0 && pthread_create(&durationThreadID, NULL, durationThread, NULL)) {
                thread_creation_success = false;
        } else if (socket && pthread_create(&stopThreadID, NULL, stopThread, NULL)) {
                thread_creation_success = false;
-       } else if (pthread_create(&senderThreadID, NULL, senderThread, NULL)){
+       } else if (pthread_create(&senderThreadID, NULL, senderThread, NULL)) {
                thread_creation_success = false;
        }
 
-       if (gSessionData->hwmon.countersEnabled()) {
+       bool startUSSource = false;
+       for (int i = 0; i < ARRAY_LENGTH(gSessionData->usDrivers); ++i) {
+               if (gSessionData->usDrivers[i]->countersEnabled()) {
+                       startUSSource = true;
+               }
+       }
+       if (startUSSource) {
                userSpaceSource = new UserSpaceSource(&senderSem);
                if (!userSpaceSource->prepare()) {
-                       logg->logError(__FILE__, __LINE__, "Unable to prepare for capture");
+                       logg->logError(__FILE__, __LINE__, "Unable to prepare userspace source for capture");
                        handleException();
                }
                userSpaceSource->start();
        }
-       if (access("/tmp/gator", F_OK) == 0) {
-               externalSource = new ExternalSource(&senderSem);
-               if (!externalSource->prepare()) {
-                       logg->logError(__FILE__, __LINE__, "Unable to prepare for capture");
+
+       if (gSessionData->ftraceDriver.countersEnabled()) {
+               ftraceSource = new FtraceSource(&senderSem);
+               if (!ftraceSource->prepare()) {
+                       logg->logError(__FILE__, __LINE__, "Unable to prepare userspace source for capture");
                        handleException();
                }
-               externalSource->start();
+               ftraceSource->start();
+       }
+
+       if (gSessionData->mAllowCommands && (gSessionData->mCaptureCommand != NULL)) {
+               pthread_t thread;
+               if (pthread_create(&thread, NULL, commandThread, NULL)) {
+                       thread_creation_success = false;
+               }
        }
 
        if (!thread_creation_success) {
@@ -312,12 +357,13 @@ void Child::run() {
        // Start profiling
        primarySource->run();
 
-       if (externalSource != NULL) {
-               externalSource->join();
+       if (ftraceSource != NULL) {
+               ftraceSource->join();
        }
        if (userSpaceSource != NULL) {
                userSpaceSource->join();
        }
+       externalSource->join();
 
        // Wait for the other threads to exit
        pthread_join(senderThreadID, NULL);
@@ -337,8 +383,9 @@ void Child::run() {
 
        logg->logMessage("Profiling ended.");
 
-       delete externalSource;
+       delete ftraceSource;
        delete userSpaceSource;
+       delete externalSource;
        delete primarySource;
        delete sender;
        delete localCapture;
index 9e206d7113b8353d3375f08fdff409ed1a1c5570..cc78202ceb5caaba8996a92409e7a4fc31cce0c2 100644 (file)
@@ -6,8 +6,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef        __CHILD_H__
-#define        __CHILD_H__
+#ifndef __CHILD_H__
+#define __CHILD_H__
 
 class OlySocket;
 
@@ -30,4 +30,4 @@ private:
        Child &operator=(const Child &);
 };
 
-#endif         //__CHILD_H__
+#endif //__CHILD_H__
diff --git a/tools/gator/daemon/Command.cpp b/tools/gator/daemon/Command.cpp
new file mode 100644 (file)
index 0000000..28d73cf
--- /dev/null
@@ -0,0 +1,172 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "Command.h"
+
+#include <fcntl.h>
+#include <pwd.h>
+#include <stdio.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "Logging.h"
+#include "SessionData.h"
+
+static int getUid(const char *const name, char *const shPath, const char *const tmpDir) {
+       // Lookups may fail when using a different libc or a statically compiled executable
+       char gatorTemp[32];
+       snprintf(gatorTemp, sizeof(gatorTemp), "%s/gator_temp", tmpDir);
+
+       const int fd = open(gatorTemp, 600, O_CREAT | O_CLOEXEC);
+       if (fd < 0) {
+               return -1;
+       }
+       close(fd);
+
+       char cmd[128];
+       snprintf(cmd, sizeof(cmd), "chown %s %s || rm %s", name, gatorTemp, gatorTemp);
+
+       const int pid = fork();
+       if (pid < 0) {
+               logg->logError(__FILE__, __LINE__, "fork failed");
+               handleException();
+       }
+       if (pid == 0) {
+               char cargv1[] = "-c";
+               char *cargv[] = {
+                       shPath,
+                       cargv1,
+                       cmd,
+                       NULL,
+               };
+
+               execv(cargv[0], cargv);
+               exit(-1);
+       }
+       while ((waitpid(pid, NULL, 0) < 0) && (errno == EINTR));
+
+       struct stat st;
+       int result = -1;
+       if (stat(gatorTemp, &st) == 0) {
+               result = st.st_uid;
+       }
+       unlink(gatorTemp);
+       return result;
+}
+
+static int getUid(const char *const name) {
+       // Look up the username
+       struct passwd *const user = getpwnam(name);
+       if (user != NULL) {
+               return user->pw_uid;
+       }
+
+
+       // Are we on Linux
+       char cargv0l[] = "/bin/sh";
+       if ((access(cargv0l, X_OK) == 0) && (access("/tmp", W_OK) == 0)) {
+               return getUid(name, cargv0l, "/tmp");
+       }
+
+       // Are we on android
+       char cargv0a[] = "/system/bin/sh";
+       if ((access(cargv0a, X_OK) == 0) && (access("/data", W_OK) == 0)) {
+               return getUid(name, cargv0a, "/data");
+       }
+
+       return -1;
+}
+
+void *commandThread(void *) {
+       prctl(PR_SET_NAME, (unsigned long)&"gatord-command", 0, 0, 0);
+
+       const char *const name = gSessionData->mCaptureUser == NULL ? "nobody" : gSessionData->mCaptureUser;
+       const int uid = getUid(name);
+       if (uid < 0) {
+               logg->logError(__FILE__, __LINE__, "Unable to lookup the user %s, please double check that the user exists", name);
+               handleException();
+       }
+
+       sleep(3);
+
+       char buf[128];
+       int pipefd[2];
+       if (pipe_cloexec(pipefd) != 0) {
+               logg->logError(__FILE__, __LINE__, "pipe failed");
+               handleException();
+       }
+
+       const int pid = fork();
+       if (pid < 0) {
+               logg->logError(__FILE__, __LINE__, "fork failed");
+               handleException();
+       }
+       if (pid == 0) {
+               char cargv0l[] = "/bin/sh";
+               char cargv0a[] = "/system/bin/sh";
+               char cargv1[] = "-c";
+               char *cargv[] = {
+                       cargv0l,
+                       cargv1,
+                       gSessionData->mCaptureCommand,
+                       NULL,
+               };
+
+               buf[0] = '\0';
+               close(pipefd[0]);
+
+               // Gator runs at a high priority, reset the priority to the default
+               if (setpriority(PRIO_PROCESS, syscall(__NR_gettid), 0) == -1) {
+                       snprintf(buf, sizeof(buf), "setpriority failed");
+                       goto fail_exit;
+               }
+
+               if (setuid(uid) != 0) {
+                       snprintf(buf, sizeof(buf), "setuid failed");
+                       goto fail_exit;
+               }
+
+               {
+                       const char *const path = gSessionData->mCaptureWorkingDir == NULL ? "/" : gSessionData->mCaptureWorkingDir;
+                       if (chdir(path) != 0) {
+                               snprintf(buf, sizeof(buf), "Unable to cd to %s, please verify the directory exists and is accessable to %s", path, name);
+                               goto fail_exit;
+                       }
+               }
+
+               execv(cargv[0], cargv);
+               cargv[0] = cargv0a;
+               execv(cargv[0], cargv);
+               snprintf(buf, sizeof(buf), "execv failed");
+
+       fail_exit:
+               if (buf[0] != '\0') {
+                       const ssize_t bytes = write(pipefd[1], buf, sizeof(buf));
+                       // Can't do anything if this fails
+                       (void)bytes;
+               }
+
+               exit(-1);
+       }
+
+       close(pipefd[1]);
+       const ssize_t bytes = read(pipefd[0], buf, sizeof(buf));
+       if (bytes > 0) {
+               logg->logError(__FILE__, __LINE__, buf);
+               handleException();
+       }
+       close(pipefd[0]);
+
+       return NULL;
+}
diff --git a/tools/gator/daemon/Command.h b/tools/gator/daemon/Command.h
new file mode 100644 (file)
index 0000000..17244b7
--- /dev/null
@@ -0,0 +1,14 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef COMMAND_H
+#define COMMAND_H
+
+void *commandThread(void *);
+
+#endif // COMMAND_H
index 6f5e2aae50e1df685b26f9a1a6be162554b93fad..bee383a1c797054cb95af2e2817ba96293dd2369 100644 (file)
 #define CONFIG_H
 
 #define ARRAY_LENGTH(A) static_cast<int>(sizeof(A)/sizeof((A)[0]))
+#define ACCESS_ONCE(x) (*(volatile typeof(x)*)&(x))
 
 #define MAX_PERFORMANCE_COUNTERS 50
-#define NR_CPUS 16
+#define NR_CPUS 32
+
+template<typename T>
+static inline T min(const T a, const T b) {
+       return (a < b ? a : b);
+}
+
+template<typename T>
+static inline T max(const T a, const T b) {
+       return (a > b ? a : b);
+}
 
 #endif // CONFIG_H
index fd479f2452cd0267bbbbbed280335ae524843924..6590dd38919627fdc20fb335b3e7b3cade55f986 100644 (file)
@@ -21,12 +21,13 @@ static const char* ATTR_COUNTER            = "counter";
 static const char* ATTR_REVISION           = "revision";
 static const char* ATTR_EVENT              = "event";
 static const char* ATTR_COUNT              = "count";
+static const char* ATTR_CORES              = "cores";
 
 ConfigurationXML::ConfigurationXML() {
        const char * configuration_xml;
        unsigned int configuration_xml_len;
        getDefaultConfigurationXml(configuration_xml, configuration_xml_len);
-       
+
        char path[PATH_MAX];
 
        getPath(path);
@@ -53,7 +54,7 @@ ConfigurationXML::ConfigurationXML() {
 
                break;
        }
-       
+
        validate();
 }
 
@@ -82,7 +83,7 @@ int ConfigurationXML::parse(const char* configurationXML) {
        node = mxmlGetFirstChild(tree);
        while (node && mxmlGetType(node) != MXML_ELEMENT)
                node = mxmlWalkNext(node, tree, MXML_NO_DESCEND);
-       
+
        ret = configurationsTag(node);
 
        node = mxmlGetFirstChild(node);
@@ -127,7 +128,7 @@ void ConfigurationXML::validate(void) {
 #define CONFIGURATION_REVISION 3
 int ConfigurationXML::configurationsTag(mxml_node_t *node) {
        const char* revision_string;
-       
+
        revision_string = mxmlElementGetAttr(node, ATTR_REVISION);
        if (!revision_string) {
                return 1; //revision issue;
@@ -158,6 +159,7 @@ void ConfigurationXML::configurationTag(mxml_node_t *node) {
        if (mxmlElementGetAttr(node, ATTR_COUNTER)) counter.setType(mxmlElementGetAttr(node, ATTR_COUNTER));
        if (mxmlElementGetAttr(node, ATTR_EVENT)) counter.setEvent(strtol(mxmlElementGetAttr(node, ATTR_EVENT), NULL, 16));
        if (mxmlElementGetAttr(node, ATTR_COUNT)) counter.setCount(strtol(mxmlElementGetAttr(node, ATTR_COUNT), NULL, 10));
+       if (mxmlElementGetAttr(node, ATTR_CORES)) counter.setCores(strtol(mxmlElementGetAttr(node, ATTR_CORES), NULL, 10));
        if (counter.getCount() > 0) {
                gSessionData->mIsEBS = true;
        }
index 689174573e4e5518292a6f06034ab947199f04c2..5202aa04636248754909873776f3c81f6b67269c 100644 (file)
@@ -27,6 +27,7 @@ public:
                mEnabled = false;
                mEvent = -1;
                mCount = 0;
+               mCores = -1;
                mKey = 0;
                mDriver = NULL;
        }
@@ -35,6 +36,7 @@ public:
        void setEnabled(const bool enabled) { mEnabled = enabled; }
        void setEvent(const int event) { mEvent = event; }
        void setCount(const int count) { mCount = count; }
+       void setCores(const int cores) { mCores = cores; }
        void setKey(const int key) { mKey = key; }
        void setDriver(Driver *const driver) { mDriver = driver; }
 
@@ -42,6 +44,7 @@ public:
        bool isEnabled() const { return mEnabled; }
        int getEvent() const { return mEvent; }
        int getCount() const { return mCount; }
+       int getCores() const { return mCores; }
        int getKey() const { return mKey; }
        Driver *getDriver() const { return mDriver; }
 
@@ -54,6 +57,7 @@ private:
        bool mEnabled;
        int mEvent;
        int mCount;
+       int mCores;
        int mKey;
        Driver *mDriver;
 };
diff --git a/tools/gator/daemon/DiskIODriver.cpp b/tools/gator/daemon/DiskIODriver.cpp
new file mode 100644 (file)
index 0000000..5deb0f3
--- /dev/null
@@ -0,0 +1,125 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+// Define to get format macros from inttypes.h
+#define __STDC_FORMAT_MACROS
+
+#include "DiskIODriver.h"
+
+#include <inttypes.h>
+
+#include "Logging.h"
+#include "SessionData.h"
+
+class DiskIOCounter : public DriverCounter {
+public:
+       DiskIOCounter(DriverCounter *next, char *const name, int64_t *const value);
+       ~DiskIOCounter();
+
+       int64_t read();
+
+private:
+       int64_t *const mValue;
+       int64_t mPrev;
+
+       // Intentionally unimplemented
+       DiskIOCounter(const DiskIOCounter &);
+       DiskIOCounter &operator=(const DiskIOCounter &);
+};
+
+DiskIOCounter::DiskIOCounter(DriverCounter *next, char *const name, int64_t *const value) : DriverCounter(next, name), mValue(value), mPrev(0) {
+}
+
+DiskIOCounter::~DiskIOCounter() {
+}
+
+int64_t DiskIOCounter::read() {
+       int64_t result = *mValue - mPrev;
+       mPrev = *mValue;
+       // Kernel assumes a sector is 512 bytes
+       return result << 9;
+}
+
+DiskIODriver::DiskIODriver() : mBuf(), mReadBytes(0), mWriteBytes(0) {
+}
+
+DiskIODriver::~DiskIODriver() {
+}
+
+void DiskIODriver::readEvents(mxml_node_t *const) {
+       // Only for use with perf
+       if (!gSessionData->perf.isSetup()) {
+               return;
+       }
+
+       setCounters(new DiskIOCounter(getCounters(), strdup("Linux_block_rq_rd"), &mReadBytes));
+       setCounters(new DiskIOCounter(getCounters(), strdup("Linux_block_rq_wr"), &mWriteBytes));
+}
+
+void DiskIODriver::doRead() {
+       if (!countersEnabled()) {
+               return;
+       }
+
+       if (!mBuf.read("/proc/diskstats")) {
+               logg->logError(__FILE__, __LINE__, "Unable to read /proc/diskstats");
+               handleException();
+       }
+
+       mReadBytes = 0;
+       mWriteBytes = 0;
+
+       char *lastName = NULL;
+       int lastNameLen = -1;
+       char *start = mBuf.getBuf();
+       while (*start != '\0') {
+               char *end = strchr(start, '\n');
+               if (end != NULL) {
+                       *end = '\0';
+               }
+
+               int nameStart = -1;
+               int nameEnd = -1;
+               int64_t readBytes = -1;
+               int64_t writeBytes = -1;
+               const int count = sscanf(start, "%*d %*d %n%*s%n %*u %*u %" SCNu64 " %*u %*u %*u %" SCNu64, &nameStart, &nameEnd, &readBytes, &writeBytes);
+               if (count != 2) {
+                       logg->logError(__FILE__, __LINE__, "Unable to parse /proc/diskstats");
+                       handleException();
+               }
+
+               // Skip partitions which are identified if the name is a substring of the last non-partition
+               if ((lastName == NULL) || (strncmp(lastName, start + nameStart, lastNameLen) != 0)) {
+                       lastName = start + nameStart;
+                       lastNameLen = nameEnd - nameStart;
+                       mReadBytes += readBytes;
+                       mWriteBytes += writeBytes;
+               }
+
+               if (end == NULL) {
+                       break;
+               }
+               start = end + 1;
+       }
+}
+
+void DiskIODriver::start() {
+       doRead();
+       // Initialize previous values
+       for (DriverCounter *counter = getCounters(); counter != NULL; counter = counter->getNext()) {
+               if (!counter->isEnabled()) {
+                       continue;
+               }
+               counter->read();
+       }
+}
+
+void DiskIODriver::read(Buffer *const buffer) {
+       doRead();
+       super::read(buffer);
+}
diff --git a/tools/gator/daemon/DiskIODriver.h b/tools/gator/daemon/DiskIODriver.h
new file mode 100644 (file)
index 0000000..d0db18c
--- /dev/null
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DISKIODRIVER_H
+#define DISKIODRIVER_H
+
+#include "Driver.h"
+#include "DynBuf.h"
+
+class DiskIODriver : public PolledDriver {
+private:
+       typedef PolledDriver super;
+
+public:
+       DiskIODriver();
+       ~DiskIODriver();
+
+       void readEvents(mxml_node_t *const root);
+       void start();
+       void read(Buffer *const buffer);
+
+private:
+       void doRead();
+
+       DynBuf mBuf;
+       int64_t mReadBytes;
+       int64_t mWriteBytes;
+
+       // Intentionally unimplemented
+       DiskIODriver(const DiskIODriver &);
+       DiskIODriver &operator=(const DiskIODriver &);
+};
+
+#endif // DISKIODRIVER_H
index 09e04016291232477fc2d4d38ff8a75ec48b0dc9..275da31c7a0d9d266a4d8e3865b0c9f8802b84c0 100644 (file)
@@ -8,8 +8,89 @@
 
 #include "Driver.h"
 
+#include "Buffer.h"
+#include "SessionData.h"
+
+DriverCounter::DriverCounter(DriverCounter *const next, const char *const name) : mNext(next), mName(name), mKey(getEventKey()), mEnabled(false) {
+}
+
+DriverCounter::~DriverCounter() {
+       delete mName;
+}
+
 Driver *Driver::head = NULL;
 
 Driver::Driver() : next(head) {
        head = this;
 }
+
+SimpleDriver::~SimpleDriver() {
+       DriverCounter *counters = mCounters;
+       while (counters != NULL) {
+               DriverCounter *counter = counters;
+               counters = counter->getNext();
+               delete counter;
+       }
+}
+
+DriverCounter *SimpleDriver::findCounter(const Counter &counter) const {
+       for (DriverCounter *driverCounter = mCounters; driverCounter != NULL; driverCounter = driverCounter->getNext()) {
+               if (strcmp(driverCounter->getName(), counter.getType()) == 0) {
+                       return driverCounter;
+               }
+       }
+
+       return NULL;
+}
+
+bool SimpleDriver::claimCounter(const Counter &counter) const {
+       return findCounter(counter) != NULL;
+}
+
+bool SimpleDriver::countersEnabled() const {
+       for (DriverCounter *counter = mCounters; counter != NULL; counter = counter->getNext()) {
+               if (counter->isEnabled()) {
+                       return true;
+               }
+       }
+       return false;
+}
+
+void SimpleDriver::resetCounters() {
+       for (DriverCounter *counter = mCounters; counter != NULL; counter = counter->getNext()) {
+               counter->setEnabled(false);
+       }
+}
+
+void SimpleDriver::setupCounter(Counter &counter) {
+       DriverCounter *const driverCounter = findCounter(counter);
+       if (driverCounter == NULL) {
+               counter.setEnabled(false);
+               return;
+       }
+       driverCounter->setEnabled(true);
+       counter.setKey(driverCounter->getKey());
+}
+
+int SimpleDriver::writeCounters(mxml_node_t *root) const {
+       int count = 0;
+       for (DriverCounter *counter = mCounters; counter != NULL; counter = counter->getNext()) {
+               mxml_node_t *node = mxmlNewElement(root, "counter");
+               mxmlElementSetAttr(node, "name", counter->getName());
+               ++count;
+       }
+
+       return count;
+}
+
+PolledDriver::~PolledDriver() {
+}
+
+void PolledDriver::read(Buffer *const buffer) {
+       for (DriverCounter *counter = getCounters(); counter != NULL; counter = counter->getNext()) {
+               if (!counter->isEnabled()) {
+                       continue;
+               }
+               buffer->event64(counter->getKey(), counter->read());
+       }
+}
index e5ed7b6c1295993bcc454a5b55efbb814b129129..72870e3dbca1a9ec99aa31686e43587d314ad4da 100644 (file)
@@ -9,10 +9,36 @@
 #ifndef DRIVER_H
 #define DRIVER_H
 
+#include <stdint.h>
+
 #include "mxml/mxml.h"
 
+class Buffer;
 class Counter;
 
+class DriverCounter {
+public:
+       DriverCounter(DriverCounter *const next, const char *const name);
+       virtual ~DriverCounter();
+
+       DriverCounter *getNext() const { return mNext; }
+       const char *getName() const { return mName; }
+       int getKey() const { return mKey; }
+       bool isEnabled() const { return mEnabled; }
+       void setEnabled(const bool enabled) { mEnabled = enabled; }
+       virtual int64_t read() { return -1; }
+
+private:
+       DriverCounter *const mNext;
+       const char *const mName;
+       const int mKey;
+       bool mEnabled;
+
+       // Intentionally unimplemented
+       DriverCounter(const DriverCounter &);
+       DriverCounter &operator=(const DriverCounter &);
+};
+
 class Driver {
 public:
        static Driver *getHead() { return head; }
@@ -26,15 +52,17 @@ public:
        // Enables and prepares the counter for capture
        virtual void setupCounter(Counter &counter) = 0;
 
+       // Performs any actions needed for setup or based on eventsXML
+       virtual void readEvents(mxml_node_t *const) {}
        // Emits available counters
-       virtual int writeCounters(mxml_node_t *root) const = 0;
+       virtual int writeCounters(mxml_node_t *const root) const = 0;
        // Emits possible dynamically generated events/counters
-       virtual void writeEvents(mxml_node_t *) const {}
+       virtual void writeEvents(mxml_node_t *const) const {}
 
        Driver *getNext() const { return next; }
 
 protected:
-       Driver ();
+       Driver();
 
 private:
        static Driver *head;
@@ -45,4 +73,46 @@ private:
        Driver &operator=(const Driver &);
 };
 
+class SimpleDriver : public Driver {
+public:
+       virtual ~SimpleDriver();
+
+       bool claimCounter(const Counter &counter) const;
+       bool countersEnabled() const;
+       void resetCounters();
+       void setupCounter(Counter &counter);
+       int writeCounters(mxml_node_t *root) const;
+
+protected:
+       SimpleDriver() : mCounters(NULL) {}
+
+       DriverCounter *getCounters() const { return mCounters; }
+       void setCounters(DriverCounter *const counter) { mCounters = counter; }
+
+       DriverCounter *findCounter(const Counter &counter) const;
+
+private:
+       DriverCounter *mCounters;
+
+       // Intentionally unimplemented
+       SimpleDriver(const SimpleDriver &);
+       SimpleDriver &operator=(const SimpleDriver &);
+};
+
+class PolledDriver : public SimpleDriver {
+public:
+       virtual ~PolledDriver();
+
+       virtual void start() {}
+       virtual void read(Buffer *const buffer);
+
+protected:
+       PolledDriver() {}
+
+private:
+       // Intentionally unimplemented
+       PolledDriver(const PolledDriver &);
+       PolledDriver &operator=(const PolledDriver &);
+};
+
 #endif // DRIVER_H
index f78ec6b7ce413648d6b3d8df8e6295b9f37d3fb2..7f299b646952c101c511d59f4985d0bc4fcf6775 100644 (file)
@@ -6,25 +6,31 @@
  * published by the Free Software Foundation.
  */
 
+// Define to get format macros from inttypes.h
 #define __STDC_FORMAT_MACROS
 
 #include "DriverSource.h"
 
 #include <fcntl.h>
 #include <inttypes.h>
+#include <sys/prctl.h>
 #include <unistd.h>
 
+#include "Buffer.h"
 #include "Child.h"
+#include "DynBuf.h"
 #include "Fifo.h"
 #include "Logging.h"
+#include "Proc.h"
 #include "Sender.h"
 #include "SessionData.h"
 
 extern Child *child;
 
-DriverSource::DriverSource(sem_t *senderSem, sem_t *startProfile) : mFifo(NULL), mSenderSem(senderSem), mStartProfile(startProfile), mBufferSize(0), mBufferFD(0), mLength(1) {
+DriverSource::DriverSource(sem_t *senderSem, sem_t *startProfile) : mBuffer(NULL), mFifo(NULL), mSenderSem(senderSem), mStartProfile(startProfile), mBufferSize(0), mBufferFD(0), mLength(1) {
        int driver_version = 0;
 
+       mBuffer = new Buffer(0, FRAME_PERF_ATTRS, 4*1024*1024, senderSem);
        if (readIntDriver("/dev/gator/version", &driver_version) == -1) {
                logg->logError(__FILE__, __LINE__, "Error reading gator driver version");
                handleException();
@@ -43,7 +49,7 @@ DriverSource::DriverSource(sem_t *senderSem, sem_t *startProfile) : mFifo(NULL),
                        handleException();
                } else {
                        // Release version mismatch
-                       logg->logError(__FILE__, __LINE__, 
+                       logg->logError(__FILE__, __LINE__,
                                "gator driver version \"%d\" is different than gator daemon version \"%d\".\n"
                                ">> Please upgrade the driver and daemon to the latest versions.", driver_version, PROTOCOL_VERSION);
                        handleException();
@@ -87,6 +93,28 @@ bool DriverSource::prepare() {
        return true;
 }
 
+void DriverSource::bootstrapThread() {
+       prctl(PR_SET_NAME, (unsigned long)&"gatord-proc", 0, 0, 0);
+
+       DynBuf printb;
+       DynBuf b1;
+       DynBuf b2;
+       const uint64_t currTime = getTime();
+
+       if (!readProcComms(currTime, mBuffer, &printb, &b1, &b2)) {
+               logg->logError(__FILE__, __LINE__, "readProcComms failed");
+               handleException();
+       }
+
+       mBuffer->commit(currTime);
+       mBuffer->setDone();
+}
+
+void *DriverSource::bootstrapThreadStatic(void *arg) {
+       static_cast<DriverSource *>(arg)->bootstrapThread();
+       return NULL;
+}
+
 void DriverSource::run() {
        // Get the initial pointer to the collect buffer
        char *collectBuffer = mFifo->start();
@@ -101,7 +129,7 @@ void DriverSource::run() {
        }
 
        // open the buffer which calls userspace_buffer_open() in the driver
-       mBufferFD = open("/dev/gator/buffer", O_RDONLY);
+       mBufferFD = open("/dev/gator/buffer", O_RDONLY | O_CLOEXEC);
        if (mBufferFD < 0) {
                logg->logError(__FILE__, __LINE__, "The gator driver did not set up properly. Please view the linux console or dmesg log for more information on the failure.");
                handleException();
@@ -138,6 +166,12 @@ void DriverSource::run() {
 
        sem_post(mStartProfile);
 
+       pthread_t bootstrapThreadID;
+       if (pthread_create(&bootstrapThreadID, NULL, bootstrapThreadStatic, this) != 0) {
+               logg->logError(__FILE__, __LINE__, "Unable to start the gator_bootstrap thread");
+               handleException();
+       }
+
        // Collect Data
        do {
                // This command will stall until data is received from the driver
@@ -164,6 +198,8 @@ void DriverSource::run() {
        } while (bytesCollected > 0);
 
        logg->logMessage("Exit collect data loop");
+
+       pthread_join(bootstrapThreadID, NULL);
 }
 
 void DriverSource::interrupt() {
@@ -174,7 +210,7 @@ void DriverSource::interrupt() {
 }
 
 bool DriverSource::isDone() {
-       return mLength <= 0;
+       return mLength <= 0 && (mBuffer == NULL || mBuffer->isDone());
 }
 
 void DriverSource::write(Sender *sender) {
@@ -182,12 +218,22 @@ void DriverSource::write(Sender *sender) {
        if (data != NULL) {
                sender->writeData(data, mLength, RESPONSE_APC_DATA);
                mFifo->release();
+               // Assume the summary packet is in the first block received from the driver
+               gSessionData->mSentSummary = true;
+       }
+       if (mBuffer != NULL && !mBuffer->isDone()) {
+               mBuffer->write(sender);
+               if (mBuffer->isDone()) {
+                       Buffer *buf = mBuffer;
+                       mBuffer = NULL;
+                       delete buf;
+               }
        }
 }
 
 int DriverSource::readIntDriver(const char *fullpath, int *value) {
        char data[40]; // Sufficiently large to hold any integer
-       const int fd = open(fullpath, O_RDONLY);
+       const int fd = open(fullpath, O_RDONLY | O_CLOEXEC);
        if (fd < 0) {
                return -1;
        }
@@ -212,7 +258,7 @@ int DriverSource::readIntDriver(const char *fullpath, int *value) {
 
 int DriverSource::readInt64Driver(const char *fullpath, int64_t *value) {
        char data[40]; // Sufficiently large to hold any integer
-       const int fd = open(fullpath, O_RDONLY);
+       const int fd = open(fullpath, O_RDONLY | O_CLOEXEC);
        if (fd < 0) {
                return -1;
        }
@@ -227,7 +273,7 @@ int DriverSource::readInt64Driver(const char *fullpath, int64_t *value) {
        char *endptr;
        errno = 0;
        *value = strtoll(data, &endptr, 10);
-       if (errno != 0 || *endptr != '\n') {
+       if (errno != 0 || (*endptr != '\n' && *endptr != '\0')) {
                logg->logMessage("Invalid value in file %s", fullpath);
                return -1;
        }
@@ -236,7 +282,7 @@ int DriverSource::readInt64Driver(const char *fullpath, int64_t *value) {
 }
 
 int DriverSource::writeDriver(const char *fullpath, const char *data) {
-       int fd = open(fullpath, O_WRONLY);
+       int fd = open(fullpath, O_WRONLY | O_CLOEXEC);
        if (fd < 0) {
                return -1;
        }
index dcf1078a239c374032d70b88c2ebaf01de1db60e..ec27b0815bbfe52dfbce9dcd52cf5a3829401a1b 100644 (file)
@@ -14,6 +14,7 @@
 
 #include "Source.h"
 
+class Buffer;
 class Fifo;
 
 class DriverSource : public Source {
@@ -37,6 +38,10 @@ public:
        static int writeReadDriver(const char *path, int64_t *value);
 
 private:
+       static void *bootstrapThreadStatic(void *arg);
+       void bootstrapThread();
+
+       Buffer *mBuffer;
        Fifo *mFifo;
        sem_t *const mSenderSem;
        sem_t *const mStartProfile;
index 6f92b336ae1955c33e902c66ad48fe8607fcb1c5..df20713ad63cac928b96b6e9bbd9faf09951a575 100644 (file)
@@ -40,7 +40,7 @@ int DynBuf::resize(const size_t minCapacity) {
 bool DynBuf::read(const char *const path) {
        int result = false;
 
-       const int fd = open(path, O_RDONLY);
+       const int fd = open(path, O_RDONLY | O_CLOEXEC);
        if (fd < 0) {
                logg->logMessage("%s(%s:%i): open failed", __FUNCTION__, __FILE__, __LINE__);
                return false;
index a07a046f335367b7825f639b6d037800c458a54c..d905bbabe98853cb760dfc3ad0c951e3c449faab 100644 (file)
@@ -13,7 +13,7 @@
 #include "OlyUtility.h"
 #include "SessionData.h"
 
-char* EventsXML::getXML() {
+mxml_node_t *EventsXML::getTree() {
 #include "events_xml.h" // defines and initializes char events_xml[] and int events_xml_len
        char path[PATH_MAX];
        mxml_node_t *xml;
@@ -38,29 +38,35 @@ char* EventsXML::getXML() {
                xml = mxmlLoadString(NULL, (const char *)events_xml, MXML_NO_CALLBACK);
        }
 
+       return xml;
+}
+
+char *EventsXML::getXML() {
+       mxml_node_t *xml = getTree();
+
        // Add dynamic events from the drivers
        mxml_node_t *events = mxmlFindElement(xml, xml, "events", NULL, NULL, MXML_DESCEND);
        if (!events) {
-               logg->logMessage("Unable to find <events> node in the events.xml");
+               logg->logError(__FILE__, __LINE__, "Unable to find <events> node in the events.xml");
                handleException();
        }
        for (Driver *driver = Driver::getHead(); driver != NULL; driver = driver->getNext()) {
                driver->writeEvents(events);
        }
 
-       charstring = mxmlSaveAllocString(xml, mxmlWhitespaceCB);
+       char *string = mxmlSaveAllocString(xml, mxmlWhitespaceCB);
        mxmlDelete(xml);
 
        return string;
 }
 
-void EventsXML::write(const charpath) {
+void EventsXML::write(const char *path) {
        char file[PATH_MAX];
 
        // Set full path
        snprintf(file, PATH_MAX, "%s/events.xml", path);
-       
-       charbuf = getXML();
+
+       char *buf = getXML();
        if (util->writeToDisk(file, buf) < 0) {
                logg->logError(__FILE__, __LINE__, "Error writing %s\nPlease verify the path.", file);
                handleException();
index 6cd1560f7d4e13d042281c31e1bdfe8288c054ba..ff7a02fd3c78b56dd702cdffaa30da86bcac2e4a 100644 (file)
@@ -9,9 +9,12 @@
 #ifndef EVENTS_XML
 #define EVENTS_XML
 
+#include "mxml/mxml.h"
+
 class EventsXML {
 public:
-       char* getXML();
+       mxml_node_t *getTree();
+       char *getXML();
        void write(const char* path);
 };
 
index fe5824b04812eb822738734695b79f731d1c2b72..8f5e6b684c538b192c03ab40529836d61a4d2157 100644 (file)
 
 #include "ExternalSource.h"
 
+#include <fcntl.h>
 #include <sys/prctl.h>
+#include <unistd.h>
 
 #include "Logging.h"
 #include "OlySocket.h"
 #include "SessionData.h"
 
-ExternalSource::ExternalSource(sem_t *senderSem) : mBuffer(0, FRAME_EXTERNAL, 1024, senderSem), mSock("/tmp/gator") {
+static const char MALI_VIDEO[] = "\0mali-video";
+static const char MALI_VIDEO_STARTUP[] = "\0mali-video-startup";
+static const char MALI_VIDEO_V1[] = "MALI_VIDEO 1\n";
+static const char MALI_GRAPHICS[] = "\0mali_thirdparty_server";
+static const char MALI_GRAPHICS_STARTUP[] = "\0mali_thirdparty_client";
+static const char MALI_GRAPHICS_V1[] = "MALI_GRAPHICS 1\n";
+
+static bool setNonblock(const int fd) {
+       int flags;
+
+       flags = fcntl(fd, F_GETFL);
+       if (flags < 0) {
+               logg->logMessage("fcntl getfl failed");
+               return false;
+       }
+
+       if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) != 0) {
+               logg->logMessage("fcntl setfl failed");
+               return false;
+       }
+
+       return true;
+}
+
+ExternalSource::ExternalSource(sem_t *senderSem) : mBuffer(0, FRAME_EXTERNAL, 128*1024, senderSem), mMonitor(), mMveStartupUds(MALI_VIDEO_STARTUP, sizeof(MALI_VIDEO_STARTUP)), mMaliStartupUds(MALI_GRAPHICS_STARTUP, sizeof(MALI_GRAPHICS_STARTUP)), mAnnotate(8083), mInterruptFd(-1), mMaliUds(-1), mMveUds(-1) {
+       sem_init(&mBufferSem, 0, 0);
 }
 
 ExternalSource::~ExternalSource() {
 }
 
+void ExternalSource::waitFor(const int bytes) {
+       while (mBuffer.bytesAvailable() <= bytes) {
+               sem_wait(&mBufferSem);
+       }
+}
+
+void ExternalSource::configureConnection(const int fd, const char *const handshake, size_t size) {
+       if (!setNonblock(fd)) {
+               logg->logError(__FILE__, __LINE__, "Unable to set nonblock on fh");
+               handleException();
+       }
+
+       if (!mMonitor.add(fd)) {
+               logg->logError(__FILE__, __LINE__, "Unable to add fh to monitor");
+               handleException();
+       }
+
+       // Write the handshake to the circular buffer
+       waitFor(Buffer::MAXSIZE_PACK32 + size - 1);
+       mBuffer.packInt(fd);
+       mBuffer.writeBytes(handshake, size - 1);
+       mBuffer.commit(1);
+}
+
+bool ExternalSource::connectMali() {
+       mMaliUds = OlySocket::connect(MALI_GRAPHICS, sizeof(MALI_GRAPHICS));
+       if (mMaliUds < 0) {
+               return false;
+       }
+
+       configureConnection(mMaliUds, MALI_GRAPHICS_V1, sizeof(MALI_GRAPHICS_V1));
+
+       return true;
+}
+
+bool ExternalSource::connectMve() {
+       if (!gSessionData->maliVideo.countersEnabled()) {
+               return true;
+       }
+
+       mMveUds = OlySocket::connect(MALI_VIDEO, sizeof(MALI_VIDEO));
+       if (mMveUds < 0) {
+               return false;
+       }
+
+       if (!gSessionData->maliVideo.start(mMveUds)) {
+               return false;
+       }
+
+       configureConnection(mMveUds, MALI_VIDEO_V1, sizeof(MALI_VIDEO_V1));
+
+       return true;
+}
+
 bool ExternalSource::prepare() {
+       if (!mMonitor.init() ||
+                       !setNonblock(mMveStartupUds.getFd()) || !mMonitor.add(mMveStartupUds.getFd()) ||
+                       !setNonblock(mMaliStartupUds.getFd()) || !mMonitor.add(mMaliStartupUds.getFd()) ||
+                       !setNonblock(mAnnotate.getFd()) || !mMonitor.add(mAnnotate.getFd()) ||
+                       false) {
+               return false;
+       }
+
+       connectMali();
+       connectMve();
+
        return true;
 }
 
 void ExternalSource::run() {
-       prctl(PR_SET_NAME, (unsigned long)&"gatord-uds", 0, 0, 0);
+       int pipefd[2];
+
+       prctl(PR_SET_NAME, (unsigned long)&"gatord-external", 0, 0, 0);
+
+       if (pipe_cloexec(pipefd) != 0) {
+               logg->logError(__FILE__, __LINE__, "pipe failed");
+               handleException();
+       }
+       mInterruptFd = pipefd[1];
+
+       if (!mMonitor.add(pipefd[0])) {
+               logg->logError(__FILE__, __LINE__, "Monitor::add failed");
+               handleException();
+       }
+
+       // Notify annotate clients to retry connecting to gatord
+       gSessionData->annotateListener.signal();
 
        while (gSessionData->mSessionIsActive) {
-               // Will be aborted when the socket is closed at the end of the capture
-               int length = mSock.receive(mBuffer.getWritePos(), mBuffer.contiguousSpaceAvailable());
-               if (length <= 0) {
-                       break;
+               struct epoll_event events[16];
+               // Clear any pending sem posts
+               while (sem_trywait(&mBufferSem) == 0);
+               int ready = mMonitor.wait(events, ARRAY_LENGTH(events), -1);
+               if (ready < 0) {
+                       logg->logError(__FILE__, __LINE__, "Monitor::wait failed");
+                       handleException();
                }
 
-               mBuffer.advanceWrite(length);
-               mBuffer.check(0);
+               const uint64_t currTime = getTime();
+
+               for (int i = 0; i < ready; ++i) {
+                       const int fd = events[i].data.fd;
+                       if (fd == mMveStartupUds.getFd()) {
+                               // Mali Video Engine says it's alive
+                               int client = mMveStartupUds.acceptConnection();
+                               // Don't read from this connection, establish a new connection to Mali-V500
+                               close(client);
+                               if (!connectMve()) {
+                                       logg->logError(__FILE__, __LINE__, "Unable to configure incoming Mali video connection");
+                                       handleException();
+                               }
+                       } else if (fd == mMaliStartupUds.getFd()) {
+                               // Mali Graphics says it's alive
+                               int client = mMaliStartupUds.acceptConnection();
+                               // Don't read from this connection, establish a new connection to Mali Graphics
+                               close(client);
+                               if (!connectMali()) {
+                                       logg->logError(__FILE__, __LINE__, "Unable to configure incoming Mali graphics connection");
+                                       handleException();
+                               }
+                       } else if (fd == mAnnotate.getFd()) {
+                               int client = mAnnotate.acceptConnection();
+                               if (!setNonblock(client) || !mMonitor.add(client)) {
+                                       logg->logError(__FILE__, __LINE__, "Unable to set socket options on incoming annotation connection");
+                                       handleException();
+                               }
+                       } else if (fd == pipefd[0]) {
+                               // Means interrupt has been called and mSessionIsActive should be reread
+                       } else {
+                               /* This can result in some starvation if there are multiple
+                                * threads which are annotating heavily, but it is not
+                                * recommended that threads annotate that much as it can also
+                                * starve out the gator data.
+                                */
+                               while (gSessionData->mSessionIsActive) {
+                                       // Wait until there is enough room for the fd, two headers and two ints
+                                       waitFor(7*Buffer::MAXSIZE_PACK32 + 2*sizeof(uint32_t));
+                                       mBuffer.packInt(fd);
+                                       const int contiguous = mBuffer.contiguousSpaceAvailable();
+                                       const int bytes = read(fd, mBuffer.getWritePos(), contiguous);
+                                       if (bytes < 0) {
+                                               if (errno == EAGAIN) {
+                                                       // Nothing left to read
+                                                       mBuffer.commit(currTime);
+                                                       break;
+                                               }
+                                               // Something else failed, close the socket
+                                               mBuffer.commit(currTime);
+                                               mBuffer.packInt(-1);
+                                               mBuffer.packInt(fd);
+                                               mBuffer.commit(currTime);
+                                               close(fd);
+                                               break;
+                                       } else if (bytes == 0) {
+                                               // The other side is closed
+                                               mBuffer.commit(currTime);
+                                               mBuffer.packInt(-1);
+                                               mBuffer.packInt(fd);
+                                               mBuffer.commit(currTime);
+                                               close(fd);
+                                               break;
+                                       }
+
+                                       mBuffer.advanceWrite(bytes);
+                                       mBuffer.commit(currTime);
+
+                                       // Short reads also mean nothing is left to read
+                                       if (bytes < contiguous) {
+                                               break;
+                                       }
+                               }
+                       }
+               }
        }
 
        mBuffer.setDone();
+
+       if (mMveUds >= 0) {
+               gSessionData->maliVideo.stop(mMveUds);
+       }
+
+       mInterruptFd = -1;
+       close(pipefd[0]);
+       close(pipefd[1]);
 }
 
 void ExternalSource::interrupt() {
-       // Do nothing
+       if (mInterruptFd >= 0) {
+               int8_t c = 0;
+               // Write to the pipe to wake the monitor which will cause mSessionIsActive to be reread
+               if (::write(mInterruptFd, &c, sizeof(c)) != sizeof(c)) {
+                       logg->logError(__FILE__, __LINE__, "write failed");
+                       handleException();
+               }
+       }
 }
 
 bool ExternalSource::isDone() {
@@ -50,7 +249,12 @@ bool ExternalSource::isDone() {
 }
 
 void ExternalSource::write(Sender *sender) {
+       // Don't send external data until the summary packet is sent so that monotonic delta is available
+       if (!gSessionData->mSentSummary) {
+               return;
+       }
        if (!mBuffer.isDone()) {
                mBuffer.write(sender);
+               sem_post(&mBufferSem);
        }
 }
index 2052bdf2823e39dcf651fa25c4282c8e4ea4f062..919e75e8a41af34d8935d8b58e3d6395602f87b6 100644 (file)
 #include <semaphore.h>
 
 #include "Buffer.h"
+#include "Monitor.h"
 #include "OlySocket.h"
 #include "Source.h"
 
-// Unix domain socket counters from external sources like graphics drivers
+// Counters from external sources like graphics drivers and annotations
 class ExternalSource : public Source {
 public:
        ExternalSource(sem_t *senderSem);
@@ -29,8 +30,20 @@ public:
        void write(Sender *sender);
 
 private:
+       void waitFor(const int bytes);
+       void configureConnection(const int fd, const char *const handshake, size_t size);
+       bool connectMali();
+       bool connectMve();
+
+       sem_t mBufferSem;
        Buffer mBuffer;
-       OlySocket mSock;
+       Monitor mMonitor;
+       OlyServerSocket mMveStartupUds;
+       OlyServerSocket mMaliStartupUds;
+       OlyServerSocket mAnnotate;
+       int mInterruptFd;
+       int mMaliUds;
+       int mMveUds;
 
        // Intentionally unimplemented
        ExternalSource(const ExternalSource &);
diff --git a/tools/gator/daemon/FSDriver.cpp b/tools/gator/daemon/FSDriver.cpp
new file mode 100644 (file)
index 0000000..dd8eb80
--- /dev/null
@@ -0,0 +1,158 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "FSDriver.h"
+
+#include <fcntl.h>
+#include <regex.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "DriverSource.h"
+#include "Logging.h"
+
+class FSCounter : public DriverCounter {
+public:
+       FSCounter(DriverCounter *next, char *name, char *path, const char *regex);
+       ~FSCounter();
+
+       const char *getPath() const { return mPath; }
+
+       int64_t read();
+
+private:
+       char *const mPath;
+       regex_t mReg;
+       bool mUseRegex;
+
+       // Intentionally unimplemented
+       FSCounter(const FSCounter &);
+       FSCounter &operator=(const FSCounter &);
+};
+
+FSCounter::FSCounter(DriverCounter *next, char *name, char *path, const char *regex) : DriverCounter(next, name), mPath(path), mUseRegex(regex != NULL) {
+       if (mUseRegex) {
+               int result = regcomp(&mReg, regex, REG_EXTENDED);
+               if (result != 0) {
+                       char buf[128];
+                       regerror(result, &mReg, buf, sizeof(buf));
+                       logg->logError(__FILE__, __LINE__, "Invalid regex '%s': %s", regex, buf);
+                       handleException();
+               }
+       }
+}
+
+FSCounter::~FSCounter() {
+       free(mPath);
+       if (mUseRegex) {
+               regfree(&mReg);
+       }
+}
+
+int64_t FSCounter::read() {
+       int64_t value;
+       if (mUseRegex) {
+               char buf[4096];
+               size_t pos = 0;
+               const int fd = open(mPath, O_RDONLY | O_CLOEXEC);
+               if (fd < 0) {
+                       goto fail;
+               }
+               while (pos < sizeof(buf) - 1) {
+                       const ssize_t bytes = ::read(fd, buf + pos, sizeof(buf) - pos - 1);
+                       if (bytes < 0) {
+                               goto fail;
+                       } else if (bytes == 0) {
+                               break;
+                       }
+                       pos += bytes;
+               }
+               close(fd);
+               buf[pos] = '\0';
+
+               regmatch_t match[2];
+               int result = regexec(&mReg, buf, 2, match, 0);
+               if (result != 0) {
+                       regerror(result, &mReg, buf, sizeof(buf));
+                       logg->logError(__FILE__, __LINE__, "Parsing %s failed: %s", mPath, buf);
+                       handleException();
+               }
+
+               if (match[1].rm_so < 0) {
+                       logg->logError(__FILE__, __LINE__, "Parsing %s failed", mPath);
+                       handleException();
+               }
+
+               errno = 0;
+               value = strtoll(buf + match[1].rm_so, NULL, 0);
+               if (errno != 0) {
+                       logg->logError(__FILE__, __LINE__, "Parsing %s failed: %s", mPath, strerror(errno));
+                       handleException();
+               }
+       } else {
+               if (DriverSource::readInt64Driver(mPath, &value) != 0) {
+                       goto fail;
+               }
+       }
+       return value;
+
+ fail:
+       logg->logError(__FILE__, __LINE__, "Unable to read %s", mPath);
+       handleException();
+}
+
+FSDriver::FSDriver() {
+}
+
+FSDriver::~FSDriver() {
+}
+
+void FSDriver::readEvents(mxml_node_t *const xml) {
+       mxml_node_t *node = xml;
+       while (true) {
+               node = mxmlFindElement(node, xml, "event", NULL, NULL, MXML_DESCEND);
+               if (node == NULL) {
+                       break;
+               }
+               const char *counter = mxmlElementGetAttr(node, "counter");
+               if (counter == NULL) {
+                       continue;
+               }
+
+               if (counter[0] == '/') {
+                       logg->logError(__FILE__, __LINE__, "Old style filesystem counter (%s) detected, please create a new unique counter value and move the filename into the path attribute, see events-Filesystem.xml for examples", counter);
+                       handleException();
+               }
+
+               if (strncmp(counter, "filesystem_", 11) != 0) {
+                       continue;
+               }
+
+               const char *path = mxmlElementGetAttr(node, "path");
+               if (path == NULL) {
+                       logg->logError(__FILE__, __LINE__, "The filesystem counter %s is missing the required path attribute", counter);
+                       handleException();
+               }
+               const char *regex = mxmlElementGetAttr(node, "regex");
+               setCounters(new FSCounter(getCounters(), strdup(counter), strdup(path), regex));
+       }
+}
+
+int FSDriver::writeCounters(mxml_node_t *root) const {
+       int count = 0;
+       for (FSCounter *counter = static_cast<FSCounter *>(getCounters()); counter != NULL; counter = static_cast<FSCounter *>(counter->getNext())) {
+               if (access(counter->getPath(), R_OK) == 0) {
+                       mxml_node_t *node = mxmlNewElement(root, "counter");
+                       mxmlElementSetAttr(node, "name", counter->getName());
+                       ++count;
+               }
+       }
+
+       return count;
+}
diff --git a/tools/gator/daemon/FSDriver.h b/tools/gator/daemon/FSDriver.h
new file mode 100644 (file)
index 0000000..a7dc8b4
--- /dev/null
@@ -0,0 +1,29 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FSDRIVER_H
+#define FSDRIVER_H
+
+#include "Driver.h"
+
+class FSDriver : public PolledDriver {
+public:
+       FSDriver();
+       ~FSDriver();
+
+       void readEvents(mxml_node_t *const xml);
+
+       int writeCounters(mxml_node_t *root) const;
+
+private:
+       // Intentionally unimplemented
+       FSDriver(const FSDriver &);
+       FSDriver &operator=(const FSDriver &);
+};
+
+#endif // FSDRIVER_H
index f672e92a6807403c43274ad3705cc16a2dd2aee6..41275fd287b8c7a803ced309fd0d5f5e30d49848 100644 (file)
@@ -9,9 +9,6 @@
 #include "Fifo.h"
 
 #include <stdlib.h>
-#ifdef WIN32
-#define valloc malloc
-#endif
 
 #include "Logging.h"
 
@@ -23,7 +20,7 @@ Fifo::Fifo(int singleBufferSize, int bufferSize, sem_t* readerSem) {
   mWrapThreshold = bufferSize;
   mSingleBufferSize = singleBufferSize;
   mReaderSem = readerSem;
-  mBuffer = (char*)valloc(bufferSize + singleBufferSize);
+  mBuffer = (char*)malloc(bufferSize + singleBufferSize);
   mEnd = false;
 
   if (mBuffer == NULL) {
index 7dd7426132d8c332c026a83879e20b20bfd7d0f5..21c8d8580391230eb992e8032b92c6d84a6d627a 100644 (file)
@@ -6,8 +6,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef        __FIFO_H__
-#define        __FIFO_H__
+#ifndef __FIFO_H__
+#define __FIFO_H__
 
 #ifdef WIN32
 #include <windows.h>
@@ -35,14 +35,14 @@ public:
 
 private:
   int mSingleBufferSize, mWrite, mRead, mReadCommit, mRaggedEnd, mWrapThreshold;
-  sem_t        mWaitForSpaceSem;
+  sem_t mWaitForSpaceSem;
   sem_t* mReaderSem;
-  char*        mBuffer;
-  bool mEnd;
+  char* mBuffer;
+  bool mEnd;
 
   // Intentionally unimplemented
   Fifo(const Fifo &);
   Fifo &operator=(const Fifo &);
 };
 
-#endif         //__FIFO_H__
+#endif //__FIFO_H__
diff --git a/tools/gator/daemon/FtraceDriver.cpp b/tools/gator/daemon/FtraceDriver.cpp
new file mode 100644 (file)
index 0000000..b156f1c
--- /dev/null
@@ -0,0 +1,118 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "FtraceDriver.h"
+
+#include <regex.h>
+
+#include "Logging.h"
+
+class FtraceCounter : public DriverCounter {
+public:
+       FtraceCounter(DriverCounter *next, char *name, const char *regex);
+       ~FtraceCounter();
+
+       int read(const char *const line, int64_t *values);
+
+private:
+       regex_t reg;
+
+       // Intentionally unimplemented
+       FtraceCounter(const FtraceCounter &);
+       FtraceCounter &operator=(const FtraceCounter &);
+};
+
+FtraceCounter::FtraceCounter(DriverCounter *next, char *name, const char *regex) : DriverCounter(next, name) {
+       int result = regcomp(&reg, regex, REG_EXTENDED);
+       if (result != 0) {
+               char buf[128];
+               regerror(result, &reg, buf, sizeof(buf));
+               logg->logError(__FILE__, __LINE__, "Invalid regex '%s': %s", regex, buf);
+               handleException();
+       }
+}
+
+FtraceCounter::~FtraceCounter() {
+       regfree(&reg);
+}
+
+int FtraceCounter::read(const char *const line, int64_t *values) {
+       regmatch_t match[2];
+       int result = regexec(&reg, line, 2, match, 0);
+       if (result != 0) {
+               // No match
+               return 0;
+       }
+
+       if (match[1].rm_so < 0) {
+               logg->logError(__FILE__, __LINE__, "Parsing %s failed", getName());
+               handleException();
+       }
+
+       errno = 0;
+       int64_t value = strtoll(line + match[1].rm_so, NULL, 0);
+       if (errno != 0) {
+               logg->logError(__FILE__, __LINE__, "Parsing %s failed: %s", getName(), strerror(errno));
+               handleException();
+       }
+
+       values[0] = getKey();
+       values[1] = value;
+
+       return 1;
+}
+
+FtraceDriver::FtraceDriver() : mValues(NULL) {
+}
+
+FtraceDriver::~FtraceDriver() {
+       delete [] mValues;
+}
+
+void FtraceDriver::readEvents(mxml_node_t *const xml) {
+       mxml_node_t *node = xml;
+       int count = 0;
+       while (true) {
+               node = mxmlFindElement(node, xml, "event", NULL, NULL, MXML_DESCEND);
+               if (node == NULL) {
+                       break;
+               }
+               const char *counter = mxmlElementGetAttr(node, "counter");
+               if (counter == NULL) {
+                       continue;
+               }
+
+               if (strncmp(counter, "ftrace_", 7) != 0) {
+                       continue;
+               }
+
+               const char *regex = mxmlElementGetAttr(node, "regex");
+               if (regex == NULL) {
+                       logg->logError(__FILE__, __LINE__, "The regex counter %s is missing the required regex attribute", counter);
+                       handleException();
+               }
+               setCounters(new FtraceCounter(getCounters(), strdup(counter), regex));
+               ++count;
+       }
+
+       mValues = new int64_t[2*count];
+}
+
+int FtraceDriver::read(const char *line, int64_t **buf) {
+       int count = 0;
+
+       for (FtraceCounter *counter = static_cast<FtraceCounter *>(getCounters()); counter != NULL; counter = static_cast<FtraceCounter *>(counter->getNext())) {
+               if (!counter->isEnabled()) {
+                       continue;
+               }
+               count += counter->read(line, mValues + 2*count);
+       }
+
+       *buf = mValues;
+       return count;
+}
diff --git a/tools/gator/daemon/FtraceDriver.h b/tools/gator/daemon/FtraceDriver.h
new file mode 100644 (file)
index 0000000..5f958be
--- /dev/null
@@ -0,0 +1,31 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FTRACEDRIVER_H
+#define FTRACEDRIVER_H
+
+#include "Driver.h"
+
+class FtraceDriver : public SimpleDriver {
+public:
+       FtraceDriver();
+       ~FtraceDriver();
+
+       void readEvents(mxml_node_t *const xml);
+
+       int read(const char *line, int64_t **buf);
+
+private:
+       int64_t *mValues;
+
+       // Intentionally unimplemented
+       FtraceDriver(const FtraceDriver &);
+       FtraceDriver &operator=(const FtraceDriver &);
+};
+
+#endif // FTRACEDRIVER_H
diff --git a/tools/gator/daemon/FtraceSource.cpp b/tools/gator/daemon/FtraceSource.cpp
new file mode 100644 (file)
index 0000000..5216333
--- /dev/null
@@ -0,0 +1,158 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "FtraceSource.h"
+
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "DriverSource.h"
+#include "Logging.h"
+#include "SessionData.h"
+
+static void handler(int signum)
+{
+       (void)signum;
+};
+
+FtraceSource::FtraceSource(sem_t *senderSem) : mFtraceFh(NULL), mBuffer(0, FRAME_BLOCK_COUNTER, 128*1024, senderSem), mTid(-1), mTracingOn(0) {
+}
+
+FtraceSource::~FtraceSource() {
+}
+
+bool FtraceSource::prepare() {
+       {
+               struct sigaction act;
+               act.sa_handler = handler;
+               act.sa_flags = (int)SA_RESETHAND;
+               if (sigaction(SIGUSR1, &act, NULL) != 0) {
+                       logg->logError(__FILE__, __LINE__, "sigaction failed: %s\n", strerror(errno));
+                       handleException();
+               }
+       }
+
+       if (DriverSource::readIntDriver("/sys/kernel/debug/tracing/tracing_on", &mTracingOn)) {
+               logg->logError(__FILE__, __LINE__, "Unable to read if ftrace is enabled");
+               handleException();
+       }
+
+       if (DriverSource::writeDriver("/sys/kernel/debug/tracing/tracing_on", "0") != 0) {
+               logg->logError(__FILE__, __LINE__, "Unable to turn ftrace off before truncating the buffer");
+               handleException();
+       }
+
+       {
+               int fd;
+               fd = open("/sys/kernel/debug/tracing/trace", O_WRONLY | O_TRUNC | O_CLOEXEC, 0666);
+               if (fd < 0) {
+                       logg->logError(__FILE__, __LINE__, "Unable truncate ftrace buffer: %s", strerror(errno));
+                       handleException();
+               }
+               close(fd);
+       }
+
+       if (DriverSource::writeDriver("/sys/kernel/debug/tracing/trace_clock", "perf") != 0) {
+               logg->logError(__FILE__, __LINE__, "Unable to switch ftrace to the perf clock, please ensure you are running Linux 3.10 or later");
+               handleException();
+       }
+
+       mFtraceFh = fopen_cloexec("/sys/kernel/debug/tracing/trace_pipe", "rb");
+       if (mFtraceFh == NULL) {
+               logg->logError(__FILE__, __LINE__, "Unable to open trace_pipe");
+               handleException();
+       }
+
+       return true;
+}
+
+void FtraceSource::run() {
+       prctl(PR_SET_NAME, (unsigned long)&"gatord-ftrace", 0, 0, 0);
+       mTid = syscall(__NR_gettid);
+
+       if (DriverSource::writeDriver("/sys/kernel/debug/tracing/tracing_on", "1") != 0) {
+               logg->logError(__FILE__, __LINE__, "Unable to turn ftrace on");
+               handleException();
+       }
+
+       while (gSessionData->mSessionIsActive) {
+               char buf[1<<12];
+
+               if (fgets(buf, sizeof(buf), mFtraceFh) == NULL) {
+                       if (errno == EINTR) {
+                               // Interrupted by interrupt - likely user request to terminate
+                               break;
+                       }
+                       logg->logError(__FILE__, __LINE__, "Unable read trace data: %s", strerror(errno));
+                       handleException();
+               }
+
+               const uint64_t currTime = getTime();
+
+               char *const colon = strstr(buf, ": ");
+               if (colon == NULL) {
+                       logg->logError(__FILE__, __LINE__, "Unable find colon: %s", buf);
+                       handleException();
+               }
+               *colon = '\0';
+
+               char *const space = strrchr(buf, ' ');
+               if (space == NULL) {
+                       logg->logError(__FILE__, __LINE__, "Unable find space: %s", buf);
+                       handleException();
+               }
+               *colon = ':';
+
+               int64_t *data = NULL;
+               int count = gSessionData->ftraceDriver.read(colon + 2, &data);
+               if (count > 0) {
+                       errno = 0;
+                       const long long time = strtod(space, NULL) * 1000000000;
+                       if (errno != 0) {
+                               logg->logError(__FILE__, __LINE__, "Unable to parse time: %s", strerror(errno));
+                               handleException();
+                       }
+                       mBuffer.event64(-1, time);
+
+                       for (int i = 0; i < count; ++i) {
+                               mBuffer.event64(data[2*i + 0], data[2*i + 1]);
+                       }
+
+                       mBuffer.check(currTime);
+               }
+
+       }
+
+       mBuffer.setDone();
+
+       DriverSource::writeDriver("/sys/kernel/debug/tracing/tracing_on", mTracingOn);
+       fclose(mFtraceFh);
+       DriverSource::writeDriver("/sys/kernel/debug/tracing/trace_clock", "local");
+}
+
+void FtraceSource::interrupt() {
+       // Closing the underlying file handle does not result in the read on the ftrace file handle to return, so send a signal to the thread
+       syscall(__NR_tgkill, getpid(), mTid, SIGUSR1);
+}
+
+bool FtraceSource::isDone() {
+       return mBuffer.isDone();
+}
+
+void FtraceSource::write(Sender *sender) {
+       // Don't send ftrace data until the summary packet is sent so that monotonic delta is available
+       if (!gSessionData->mSentSummary) {
+               return;
+       }
+       if (!mBuffer.isDone()) {
+               mBuffer.write(sender);
+       }
+}
diff --git a/tools/gator/daemon/FtraceSource.h b/tools/gator/daemon/FtraceSource.h
new file mode 100644 (file)
index 0000000..2391b88
--- /dev/null
@@ -0,0 +1,43 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FTRACESOURCE_H
+#define FTRACESOURCE_H
+
+#include <semaphore.h>
+#include <stdio.h>
+
+#include "Buffer.h"
+#include "Source.h"
+
+class FtraceSource : public Source {
+public:
+       FtraceSource(sem_t *senderSem);
+       ~FtraceSource();
+
+       bool prepare();
+       void run();
+       void interrupt();
+
+       bool isDone();
+       void write(Sender *sender);
+
+private:
+       void waitFor(const int bytes);
+
+       FILE *mFtraceFh;
+       Buffer mBuffer;
+       int mTid;
+       int mTracingOn;
+
+       // Intentionally unimplemented
+       FtraceSource(const FtraceSource &);
+       FtraceSource &operator=(const FtraceSource &);
+};
+
+#endif // FTRACESOURCE_H
diff --git a/tools/gator/daemon/Hwmon.cpp b/tools/gator/daemon/Hwmon.cpp
deleted file mode 100644 (file)
index 778f307..0000000
+++ /dev/null
@@ -1,342 +0,0 @@
-/**
- * Copyright (C) ARM Limited 2013-2014. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "Hwmon.h"
-
-#include "libsensors/sensors.h"
-
-#include "Buffer.h"
-#include "Counter.h"
-#include "Logging.h"
-#include "SessionData.h"
-
-class HwmonCounter {
-public:
-       HwmonCounter(HwmonCounter *next, const sensors_chip_name *chip, const sensors_feature *feature);
-       ~HwmonCounter();
-
-       HwmonCounter *getNext() const { return next; }
-       int getKey() const { return key; }
-       bool isEnabled() const { return enabled; }
-       const char *getName() const { return name; }
-       const char *getLabel() const { return label; }
-       const char *getTitle() const { return title; }
-       bool isDuplicate() const { return duplicate; }
-       const char *getDisplay() const { return display; }
-       const char *getUnit() const { return unit; }
-       int getModifier() const { return modifier; }
-
-       void setEnabled(const bool enabled) {
-               this->enabled = enabled;
-               // canRead will clear enabled if the counter is not readable
-               canRead();
-       }
-
-       double read();
-       bool canRead();
-
-private:
-       void init(const sensors_chip_name *chip, const sensors_feature *feature);
-
-       HwmonCounter *const next;
-       const int key;
-       int polled : 1,
-               readable : 1,
-               enabled : 1,
-               monotonic: 1,
-               duplicate : 1;
-
-       const sensors_chip_name *chip;
-       const sensors_feature *feature;
-
-       char *name;
-       char *label;
-       const char *title;
-       const char *display;
-       const char *unit;
-       int modifier;
-       double previous_value;
-
-       sensors_subfeature_type input;
-
-       // Intentionally unimplemented
-       HwmonCounter(const HwmonCounter &);
-       HwmonCounter &operator=(const HwmonCounter &);
-};
-
-HwmonCounter::HwmonCounter(HwmonCounter *next, const sensors_chip_name *chip, const sensors_feature *feature) : next(next), key(getEventKey()), polled(false), readable(false), enabled(false), duplicate(false), chip(chip), feature(feature) {
-
-       int len = sensors_snprintf_chip_name(NULL, 0, chip) + 1;
-       char *chip_name = new char[len];
-       sensors_snprintf_chip_name(chip_name, len, chip);
-
-       len = snprintf(NULL, 0, "hwmon_%s_%d", chip_name, feature->number) + 1;
-       name = new char[len];
-       snprintf(name, len, "hwmon_%s_%d", chip_name, feature->number);
-
-       delete [] chip_name;
-
-       label = sensors_get_label(chip, feature);
-
-       switch (feature->type) {
-       case SENSORS_FEATURE_IN:
-               title = "Voltage";
-               input = SENSORS_SUBFEATURE_IN_INPUT;
-               display = "average";
-               unit = "V";
-               modifier = 1000;
-               monotonic = false;
-               break;
-       case SENSORS_FEATURE_FAN:
-               title = "Fan";
-               input = SENSORS_SUBFEATURE_FAN_INPUT;
-               display = "average";
-               unit = "RPM";
-               modifier = 1;
-               monotonic = false;
-               break;
-       case SENSORS_FEATURE_TEMP:
-               title = "Temperature";
-               input = SENSORS_SUBFEATURE_TEMP_INPUT;
-               display = "maximum";
-               unit = "°C";
-               modifier = 1000;
-               monotonic = false;
-               break;
-       case SENSORS_FEATURE_POWER:
-               title = "Power";
-               input = SENSORS_SUBFEATURE_POWER_INPUT;
-               display = "average";
-               unit = "W";
-               modifier = 1000000;
-               monotonic = false;
-               break;
-       case SENSORS_FEATURE_ENERGY:
-               title = "Energy";
-               input = SENSORS_SUBFEATURE_ENERGY_INPUT;
-               display = "accumulate";
-               unit = "J";
-               modifier = 1000000;
-               monotonic = true;
-               break;
-       case SENSORS_FEATURE_CURR:
-               title = "Current";
-               input = SENSORS_SUBFEATURE_CURR_INPUT;
-               display = "average";
-               unit = "A";
-               modifier = 1000;
-               monotonic = false;
-               break;
-       case SENSORS_FEATURE_HUMIDITY:
-               title = "Humidity";
-               input = SENSORS_SUBFEATURE_HUMIDITY_INPUT;
-               display = "average";
-               unit = "%";
-               modifier = 1000;
-               monotonic = false;
-               break;
-       default:
-               logg->logError(__FILE__, __LINE__, "Unsupported hwmon feature %i", feature->type);
-               handleException();
-       }
-
-       for (HwmonCounter * counter = next; counter != NULL; counter = counter->getNext()) {
-               if (strcmp(label, counter->getLabel()) == 0 && strcmp(title, counter->getTitle()) == 0) {
-                       duplicate = true;
-                       counter->duplicate = true;
-                       break;
-               }
-       }
-}
-
-HwmonCounter::~HwmonCounter() {
-       free((void *)label);
-       delete [] name;
-}
-
-double HwmonCounter::read() {
-       double value;
-       double result;
-       const sensors_subfeature *subfeature;
-
-       // Keep in sync with canRead
-       subfeature = sensors_get_subfeature(chip, feature, input);
-       if (!subfeature) {
-               logg->logError(__FILE__, __LINE__, "No input value for hwmon sensor %s", label);
-               handleException();
-       }
-
-       if (sensors_get_value(chip, subfeature->number, &value) != 0) {
-               logg->logError(__FILE__, __LINE__, "Can't get input value for hwmon sensor %s", label);
-               handleException();
-       }
-
-       result = (monotonic ? value - previous_value : value);
-       previous_value = value;
-
-       return result;
-}
-
-bool HwmonCounter::canRead() {
-       if (!polled) {
-               double value;
-               const sensors_subfeature *subfeature;
-               bool result = true;
-
-               subfeature = sensors_get_subfeature(chip, feature, input);
-               if (!subfeature) {
-                       result = false;
-               } else {
-                       result = sensors_get_value(chip, subfeature->number, &value) == 0;
-               }
-
-               polled = true;
-               readable = result;
-       }
-
-       enabled &= readable;
-
-       return readable;
-}
-
-Hwmon::Hwmon() : counters(NULL) {
-}
-
-Hwmon::~Hwmon() {
-       while (counters != NULL) {
-               HwmonCounter * counter = counters;
-               counters = counter->getNext();
-               delete counter;
-       }
-       sensors_cleanup();
-}
-
-void Hwmon::setup() {
-       // hwmon does not currently work with perf
-       if (gSessionData->perf.isSetup()) {
-               return;
-       }
-
-       int err = sensors_init(NULL);
-       if (err) {
-               logg->logMessage("Failed to initialize libsensors! (%d)", err);
-               return;
-       }
-       sensors_sysfs_no_scaling = 1;
-
-       int chip_nr = 0;
-       const sensors_chip_name *chip;
-       while ((chip = sensors_get_detected_chips(NULL, &chip_nr))) {
-               int feature_nr = 0;
-               const sensors_feature *feature;
-               while ((feature = sensors_get_features(chip, &feature_nr))) {
-                       counters = new HwmonCounter(counters, chip, feature);
-               }
-       }
-}
-
-HwmonCounter *Hwmon::findCounter(const Counter &counter) const {
-       for (HwmonCounter * hwmonCounter = counters; hwmonCounter != NULL; hwmonCounter = hwmonCounter->getNext()) {
-               if (hwmonCounter->canRead() && strcmp(hwmonCounter->getName(), counter.getType()) == 0) {
-                       return hwmonCounter;
-               }
-       }
-
-       return NULL;
-}
-
-bool Hwmon::claimCounter(const Counter &counter) const {
-       return findCounter(counter) != NULL;
-}
-
-bool Hwmon::countersEnabled() const {
-       for (HwmonCounter * counter = counters; counter != NULL; counter = counter->getNext()) {
-               if (counter->isEnabled()) {
-                       return true;
-               }
-       }
-       return false;
-}
-
-void Hwmon::resetCounters() {
-       for (HwmonCounter * counter = counters; counter != NULL; counter = counter->getNext()) {
-               counter->setEnabled(false);
-       }
-}
-
-void Hwmon::setupCounter(Counter &counter) {
-       HwmonCounter *const hwmonCounter = findCounter(counter);
-       if (hwmonCounter == NULL) {
-               counter.setEnabled(false);
-               return;
-       }
-       hwmonCounter->setEnabled(true);
-       counter.setKey(hwmonCounter->getKey());
-}
-
-int Hwmon::writeCounters(mxml_node_t *root) const {
-       int count = 0;
-       for (HwmonCounter * counter = counters; counter != NULL; counter = counter->getNext()) {
-               if (!counter->canRead()) {
-                       continue;
-               }
-               mxml_node_t *node = mxmlNewElement(root, "counter");
-               mxmlElementSetAttr(node, "name", counter->getName());
-               ++count;
-       }
-
-       return count;
-}
-
-void Hwmon::writeEvents(mxml_node_t *root) const {
-       root = mxmlNewElement(root, "category");
-       mxmlElementSetAttr(root, "name", "hwmon");
-
-       char buf[1024];
-       for (HwmonCounter * counter = counters; counter != NULL; counter = counter->getNext()) {
-               if (!counter->canRead()) {
-                       continue;
-               }
-               mxml_node_t *node = mxmlNewElement(root, "event");
-               mxmlElementSetAttr(node, "counter", counter->getName());
-               mxmlElementSetAttr(node, "title", counter->getTitle());
-               if (counter->isDuplicate()) {
-                       mxmlElementSetAttrf(node, "name", "%s (0x%x)", counter->getLabel(), counter->getKey());
-               } else {
-                       mxmlElementSetAttr(node, "name", counter->getLabel());
-               }
-               mxmlElementSetAttr(node, "display", counter->getDisplay());
-               mxmlElementSetAttr(node, "units", counter->getUnit());
-               if (counter->getModifier() != 1) {
-                       mxmlElementSetAttrf(node, "modifier", "%d", counter->getModifier());
-               }
-               if (strcmp(counter->getDisplay(), "average") == 0 || strcmp(counter->getDisplay(), "maximum") == 0) {
-                       mxmlElementSetAttr(node, "average_selection", "yes");
-               }
-               snprintf(buf, sizeof(buf), "libsensors %s sensor %s (%s)", counter->getTitle(), counter->getLabel(), counter->getName());
-               mxmlElementSetAttr(node, "description", buf);
-       }
-}
-
-void Hwmon::start() {
-       for (HwmonCounter * counter = counters; counter != NULL; counter = counter->getNext()) {
-               if (!counter->isEnabled()) {
-                       continue;
-               }
-               counter->read();
-       }
-}
-
-void Hwmon::read(Buffer * const buffer) {
-       for (HwmonCounter * counter = counters; counter != NULL; counter = counter->getNext()) {
-               if (!counter->isEnabled()) {
-                       continue;
-               }
-               buffer->event(counter->getKey(), counter->read());
-       }
-}
diff --git a/tools/gator/daemon/Hwmon.h b/tools/gator/daemon/Hwmon.h
deleted file mode 100644 (file)
index a22a360..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Copyright (C) ARM Limited 2013-2014. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef        HWMON_H
-#define        HWMON_H
-
-#include "Driver.h"
-
-class Buffer;
-class HwmonCounter;
-
-class Hwmon : public Driver {
-public:
-       Hwmon();
-       ~Hwmon();
-
-       void setup();
-
-       bool claimCounter(const Counter &counter) const;
-       bool countersEnabled() const;
-       void resetCounters();
-       void setupCounter(Counter &counter);
-
-       int writeCounters(mxml_node_t *root) const;
-       void writeEvents(mxml_node_t *root) const;
-
-       void start();
-       void read(Buffer * buffer);
-
-private:
-       HwmonCounter *findCounter(const Counter &counter) const;
-
-       HwmonCounter *counters;
-
-       // Intentionally unimplemented
-       Hwmon(const Hwmon &);
-       Hwmon &operator=(const Hwmon &);
-};
-
-#endif // HWMON_H
diff --git a/tools/gator/daemon/HwmonDriver.cpp b/tools/gator/daemon/HwmonDriver.cpp
new file mode 100644 (file)
index 0000000..9d161ae
--- /dev/null
@@ -0,0 +1,245 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "HwmonDriver.h"
+
+#include "libsensors/sensors.h"
+
+#include "Logging.h"
+
+// feature->type to input map
+static sensors_subfeature_type getInput(const sensors_feature_type type) {
+       switch (type) {
+       case SENSORS_FEATURE_IN: return SENSORS_SUBFEATURE_IN_INPUT;
+       case SENSORS_FEATURE_FAN: return SENSORS_SUBFEATURE_FAN_INPUT;
+       case SENSORS_FEATURE_TEMP: return SENSORS_SUBFEATURE_TEMP_INPUT;
+       case SENSORS_FEATURE_POWER: return SENSORS_SUBFEATURE_POWER_INPUT;
+       case SENSORS_FEATURE_ENERGY: return SENSORS_SUBFEATURE_ENERGY_INPUT;
+       case SENSORS_FEATURE_CURR: return SENSORS_SUBFEATURE_CURR_INPUT;
+       case SENSORS_FEATURE_HUMIDITY: return SENSORS_SUBFEATURE_HUMIDITY_INPUT;
+       default:
+               logg->logError(__FILE__, __LINE__, "Unsupported hwmon feature %i", type);
+               handleException();
+       }
+};
+
+class HwmonCounter : public DriverCounter {
+public:
+       HwmonCounter(DriverCounter *next, char *const name, const sensors_chip_name *chip, const sensors_feature *feature);
+       ~HwmonCounter();
+
+       const char *getLabel() const { return label; }
+       const char *getTitle() const { return title; }
+       bool isDuplicate() const { return duplicate; }
+       const char *getDisplay() const { return display; }
+       const char *getCounterClass() const { return counter_class; }
+       const char *getUnit() const { return unit; }
+       int getModifier() const { return modifier; }
+
+       int64_t read();
+
+private:
+       void init(const sensors_chip_name *chip, const sensors_feature *feature);
+
+       const sensors_chip_name *chip;
+       const sensors_feature *feature;
+       char *label;
+       const char *title;
+       const char *display;
+       const char *counter_class;
+       const char *unit;
+       double previous_value;
+       int modifier;
+       int monotonic: 1,
+               duplicate : 1;
+
+       // Intentionally unimplemented
+       HwmonCounter(const HwmonCounter &);
+       HwmonCounter &operator=(const HwmonCounter &);
+};
+
+HwmonCounter::HwmonCounter(DriverCounter *next, char *const name, const sensors_chip_name *chip, const sensors_feature *feature) : DriverCounter(next, name), chip(chip), feature(feature), duplicate(false) {
+       label = sensors_get_label(chip, feature);
+
+       switch (feature->type) {
+       case SENSORS_FEATURE_IN:
+               title = "Voltage";
+               display = "maximum";
+               counter_class = "absolute";
+               unit = "V";
+               modifier = 1000;
+               monotonic = false;
+               break;
+       case SENSORS_FEATURE_FAN:
+               title = "Fan";
+               display = "average";
+               counter_class = "absolute";
+               unit = "RPM";
+               modifier = 1;
+               monotonic = false;
+               break;
+       case SENSORS_FEATURE_TEMP:
+               title = "Temperature";
+               display = "maximum";
+               counter_class = "absolute";
+               unit = "°C";
+               modifier = 1000;
+               monotonic = false;
+               break;
+       case SENSORS_FEATURE_POWER:
+               title = "Power";
+               display = "maximum";
+               counter_class = "absolute";
+               unit = "W";
+               modifier = 1000000;
+               monotonic = false;
+               break;
+       case SENSORS_FEATURE_ENERGY:
+               title = "Energy";
+               display = "accumulate";
+               counter_class = "delta";
+               unit = "J";
+               modifier = 1000000;
+               monotonic = true;
+               break;
+       case SENSORS_FEATURE_CURR:
+               title = "Current";
+               display = "maximum";
+               counter_class = "absolute";
+               unit = "A";
+               modifier = 1000;
+               monotonic = false;
+               break;
+       case SENSORS_FEATURE_HUMIDITY:
+               title = "Humidity";
+               display = "average";
+               counter_class = "absolute";
+               unit = "%";
+               modifier = 1000;
+               monotonic = false;
+               break;
+       default:
+               logg->logError(__FILE__, __LINE__, "Unsupported hwmon feature %i", feature->type);
+               handleException();
+       }
+
+       for (HwmonCounter * counter = static_cast<HwmonCounter *>(next); counter != NULL; counter = static_cast<HwmonCounter *>(counter->getNext())) {
+               if (strcmp(label, counter->getLabel()) == 0 && strcmp(title, counter->getTitle()) == 0) {
+                       duplicate = true;
+                       counter->duplicate = true;
+                       break;
+               }
+       }
+}
+
+HwmonCounter::~HwmonCounter() {
+       free((void *)label);
+}
+
+int64_t HwmonCounter::read() {
+       double value;
+       double result;
+       const sensors_subfeature *subfeature;
+
+       // Keep in sync with the read check in HwmonDriver::readEvents
+       subfeature = sensors_get_subfeature(chip, feature, getInput(feature->type));
+       if (!subfeature) {
+               logg->logError(__FILE__, __LINE__, "No input value for hwmon sensor %s", label);
+               handleException();
+       }
+
+       if (sensors_get_value(chip, subfeature->number, &value) != 0) {
+               logg->logError(__FILE__, __LINE__, "Can't get input value for hwmon sensor %s", label);
+               handleException();
+       }
+
+       result = (monotonic ? value - previous_value : value);
+       previous_value = value;
+
+       return result;
+}
+
+HwmonDriver::HwmonDriver() {
+}
+
+HwmonDriver::~HwmonDriver() {
+       sensors_cleanup();
+}
+
+void HwmonDriver::readEvents(mxml_node_t *const) {
+       int err = sensors_init(NULL);
+       if (err) {
+               logg->logMessage("Failed to initialize libsensors! (%d)", err);
+               return;
+       }
+       sensors_sysfs_no_scaling = 1;
+
+       int chip_nr = 0;
+       const sensors_chip_name *chip;
+       while ((chip = sensors_get_detected_chips(NULL, &chip_nr))) {
+               int feature_nr = 0;
+               const sensors_feature *feature;
+               while ((feature = sensors_get_features(chip, &feature_nr))) {
+                       // Keep in sync with HwmonCounter::read
+                       // Can this counter be read?
+                       double value;
+                       const sensors_subfeature *const subfeature = sensors_get_subfeature(chip, feature, getInput(feature->type));
+                       if ((subfeature == NULL) || (sensors_get_value(chip, subfeature->number, &value) != 0)) {
+                               continue;
+                       }
+
+                       // Get the name of the counter
+                       int len = sensors_snprintf_chip_name(NULL, 0, chip) + 1;
+                       char *chip_name = new char[len];
+                       sensors_snprintf_chip_name(chip_name, len, chip);
+                       len = snprintf(NULL, 0, "hwmon_%s_%d_%d", chip_name, chip_nr, feature->number) + 1;
+                       char *const name = new char[len];
+                       snprintf(name, len, "hwmon_%s_%d_%d", chip_name, chip_nr, feature->number);
+                       delete [] chip_name;
+
+                       setCounters(new HwmonCounter(getCounters(), name, chip, feature));
+               }
+       }
+}
+
+void HwmonDriver::writeEvents(mxml_node_t *root) const {
+       root = mxmlNewElement(root, "category");
+       mxmlElementSetAttr(root, "name", "hwmon");
+
+       char buf[1024];
+       for (HwmonCounter *counter = static_cast<HwmonCounter *>(getCounters()); counter != NULL; counter = static_cast<HwmonCounter *>(counter->getNext())) {
+               mxml_node_t *node = mxmlNewElement(root, "event");
+               mxmlElementSetAttr(node, "counter", counter->getName());
+               mxmlElementSetAttr(node, "title", counter->getTitle());
+               if (counter->isDuplicate()) {
+                       mxmlElementSetAttrf(node, "name", "%s (0x%x)", counter->getLabel(), counter->getKey());
+               } else {
+                       mxmlElementSetAttr(node, "name", counter->getLabel());
+               }
+               mxmlElementSetAttr(node, "display", counter->getDisplay());
+               mxmlElementSetAttr(node, "class", counter->getCounterClass());
+               mxmlElementSetAttr(node, "units", counter->getUnit());
+               if (counter->getModifier() != 1) {
+                       mxmlElementSetAttrf(node, "modifier", "%d", counter->getModifier());
+               }
+               if (strcmp(counter->getDisplay(), "average") == 0 || strcmp(counter->getDisplay(), "maximum") == 0) {
+                       mxmlElementSetAttr(node, "average_selection", "yes");
+               }
+               snprintf(buf, sizeof(buf), "libsensors %s sensor %s (%s)", counter->getTitle(), counter->getLabel(), counter->getName());
+               mxmlElementSetAttr(node, "description", buf);
+       }
+}
+
+void HwmonDriver::start() {
+       for (DriverCounter *counter = getCounters(); counter != NULL; counter = counter->getNext()) {
+               if (!counter->isEnabled()) {
+                       continue;
+               }
+               counter->read();
+       }
+}
diff --git a/tools/gator/daemon/HwmonDriver.h b/tools/gator/daemon/HwmonDriver.h
new file mode 100644 (file)
index 0000000..f28d825
--- /dev/null
@@ -0,0 +1,31 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef HWMONDRIVER_H
+#define HWMONDRIVER_H
+
+#include "Driver.h"
+
+class HwmonDriver : public PolledDriver {
+public:
+       HwmonDriver();
+       ~HwmonDriver();
+
+       void readEvents(mxml_node_t *const root);
+
+       void writeEvents(mxml_node_t *root) const;
+
+       void start();
+
+private:
+       // Intentionally unimplemented
+       HwmonDriver(const HwmonDriver &);
+       HwmonDriver &operator=(const HwmonDriver &);
+};
+
+#endif // HWMONDRIVER_H
index 9300002f3fb2a41126b1c97d7f136e9b545aea2d..fe9dc6a7e4f7027919743189dd52c5a9c1905266 100644 (file)
@@ -16,6 +16,7 @@
 #include "Counter.h"
 #include "DriverSource.h"
 #include "Logging.h"
+#include "SessionData.h"
 
 // Claim all the counters in /dev/gator/events
 bool KMod::claimCounter(const Counter &counter) const {
@@ -46,11 +47,19 @@ void KMod::resetCounters() {
        }
 }
 
+static const char ARM_MALI_MIDGARD[] = "ARM_Mali-Midgard_";
+static const char ARM_MALI_T[] = "ARM_Mali-T";
+
 void KMod::setupCounter(Counter &counter) {
        char base[128];
        char text[128];
        snprintf(base, sizeof(base), "/dev/gator/events/%s", counter.getType());
 
+       if ((strncmp(counter.getType(), ARM_MALI_MIDGARD, sizeof(ARM_MALI_MIDGARD) - 1) == 0 ||
+            strncmp(counter.getType(), ARM_MALI_T, sizeof(ARM_MALI_T) - 1) == 0)) {
+               mIsMaliCapture = true;
+       }
+
        snprintf(text, sizeof(text), "%s/enabled", base);
        int enabled = true;
        if (DriverSource::writeReadDriver(text, &enabled) || !enabled) {
@@ -58,10 +67,15 @@ void KMod::setupCounter(Counter &counter) {
                return;
        }
 
+       int value = 0;
        snprintf(text, sizeof(text), "%s/key", base);
-       int key = 0;
-       DriverSource::readIntDriver(text, &key);
-       counter.setKey(key);
+       DriverSource::readIntDriver(text, &value);
+       counter.setKey(value);
+
+       snprintf(text, sizeof(text), "%s/cores", base);
+       if (DriverSource::readIntDriver(text, &value) == 0) {
+               counter.setCores(value);
+       }
 
        snprintf(text, sizeof(text), "%s/event", base);
        DriverSource::writeDriver(text, counter.getEvent());
index fb7fc8a8f9c6200fc31a9fe190d36d3d309370df..900a60e87d24d605957fc58eb4653a1ffd7840d9 100644 (file)
@@ -14,7 +14,7 @@
 // Driver for the gator kernel module
 class KMod : public Driver {
 public:
-       KMod() {}
+       KMod() : mIsMaliCapture(false) {}
        ~KMod() {}
 
        bool claimCounter(const Counter &counter) const;
@@ -22,6 +22,11 @@ public:
        void setupCounter(Counter &counter);
 
        int writeCounters(mxml_node_t *root) const;
+
+       bool isMaliCapture() const { return mIsMaliCapture; }
+
+private:
+       bool mIsMaliCapture;
 };
 
 #endif // KMOD_H
index aadeccecf0ccf28b61c57ac79cc9f3efa1283a8d..25d281f8328b4d2b433f9ea037503baf626f4cdf 100644 (file)
@@ -6,8 +6,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef        __LOCAL_CAPTURE_H__
-#define        __LOCAL_CAPTURE_H__
+#ifndef __LOCAL_CAPTURE_H__
+#define __LOCAL_CAPTURE_H__
 
 struct ImageLinkList;
 
@@ -23,4 +23,4 @@ private:
        int removeDirAndAllContents(char* path);
 };
 
-#endif         //__LOCAL_CAPTURE_H__
+#endif //__LOCAL_CAPTURE_H__
index b8d3178950d6c5a119e69ac03b0faf706608bbaf..41ffa1a45151c814aa0de13971a55fc7b17e08be 100644 (file)
 #include <string.h>
 
 #ifdef WIN32
-#define MUTEX_INIT()   mLoggingMutex = CreateMutex(NULL, false, NULL);
-#define MUTEX_LOCK()   WaitForSingleObject(mLoggingMutex, 0xFFFFFFFF);
-#define MUTEX_UNLOCK() ReleaseMutex(mLoggingMutex);
-#define snprintf               _snprintf
+#define MUTEX_INIT()    mLoggingMutex = CreateMutex(NULL, false, NULL);
+#define MUTEX_LOCK()    WaitForSingleObject(mLoggingMutex, 0xFFFFFFFF);
+#define MUTEX_UNLOCK()  ReleaseMutex(mLoggingMutex);
+#define snprintf _snprintf
 #else
 #include <pthread.h>
-#define MUTEX_INIT()   pthread_mutex_init(&mLoggingMutex, NULL)
-#define MUTEX_LOCK()   pthread_mutex_lock(&mLoggingMutex)
-#define MUTEX_UNLOCK() pthread_mutex_unlock(&mLoggingMutex)
+#define MUTEX_INIT()    pthread_mutex_init(&mLoggingMutex, NULL)
+#define MUTEX_LOCK()    pthread_mutex_lock(&mLoggingMutex)
+#define MUTEX_UNLOCK()  pthread_mutex_unlock(&mLoggingMutex)
 #endif
 
 // Global thread-safe logging
@@ -40,7 +40,7 @@ Logging::~Logging() {
 }
 
 void Logging::logError(const char* file, int line, const char* fmt, ...) {
-       va_list args;
+       va_list args;
 
        MUTEX_LOCK();
        if (mDebug) {
@@ -61,7 +61,7 @@ void Logging::logError(const char* file, int line, const char* fmt, ...) {
 
 void Logging::logMessage(const char* fmt, ...) {
        if (mDebug) {
-               va_list args;
+               va_list args;
 
                MUTEX_LOCK();
                strcpy(mLogBuf, "INFO: ");
index 6ae328046989342db0fd4270896d72cbe090fe0e..09e93ff13f7a95b9f564da4c9f92e1f720df24df 100644 (file)
@@ -6,8 +6,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef        __LOGGING_H__
-#define        __LOGGING_H__
+#ifndef __LOGGING_H__
+#define __LOGGING_H__
 
 #include <pthread.h>
 
@@ -23,14 +23,14 @@ public:
        char* getLastMessage() {return mLogBuf;}
 
 private:
-       char    mErrBuf[4096]; // Arbitrarily large buffer to hold a string
-       char    mLogBuf[4096]; // Arbitrarily large buffer to hold a string
-       bool    mDebug;
-       pthread_mutex_t mLoggingMutex;
+       char mErrBuf[4096]; // Arbitrarily large buffer to hold a string
+       char mLogBuf[4096]; // Arbitrarily large buffer to hold a string
+       bool mDebug;
+       pthread_mutex_t mLoggingMutex;
 };
 
 extern Logging* logg;
 
 extern void handleException() __attribute__ ((noreturn));
 
-#endif         //__LOGGING_H__
+#endif //__LOGGING_H__
index 24ee94045470686ea7e2e8452f53a018c1c0bd58..27531b438b6391ebd1ff08dcca57e66e090e119f 100644 (file)
@@ -8,14 +8,11 @@
 # targets run 'make SOFTFLOAT=1 SYSROOT=/path/to/sysroot', see
 # README_Streamline.txt for more details
 
-CPP = $(CROSS_COMPILE)g++
-GCC = $(CROSS_COMPILE)gcc
-
-# -mthumb-interwork is required for interworking to ARM or Thumb stdlibc
-CFLAGS += -mthumb-interwork
+CC = $(CROSS_COMPILE)gcc
+CXX = $(CROSS_COMPILE)g++
 
 ifeq ($(SOFTFLOAT),1)
-       CFLAGS += -marm -march=armv4t -mfloat-abi=soft
+       CPPFLAGS += -marm -mthumb-interwork -march=armv4t -mfloat-abi=soft
        LDFLAGS += -marm -march=armv4t -mfloat-abi=soft
 endif
 ifneq ($(SYSROOT),)
index 10b4b4a71ab1676805ed0baaf940d597918341e1..efd1fa0021829be161e019dacaed29e634088152 100644 (file)
@@ -4,12 +4,9 @@
 #
 
 # Uncomment and define CROSS_COMPILE if it is not already defined
-# CROSS_COMPILE=/path/to/cross-compiler/arm-linux-gnueabihf-
-# NOTE: This toolchain uses the hardfloat abi by default. For non-hardfloat
-# targets it is necessary to add options
-# '-marm -march=armv4t -mfloat-abi=soft'.
+# CROSS_COMPILE=/path/to/cross-compiler/aarch64-linux-gnu-
 
-CPP = $(CROSS_COMPILE)g++
-GCC = $(CROSS_COMPILE)gcc
+CC = $(CROSS_COMPILE)gcc
+CXX = $(CROSS_COMPILE)g++
 
 include common.mk
diff --git a/tools/gator/daemon/MaliVideoDriver.cpp b/tools/gator/daemon/MaliVideoDriver.cpp
new file mode 100644 (file)
index 0000000..5eef264
--- /dev/null
@@ -0,0 +1,191 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "MaliVideoDriver.h"
+
+#include <unistd.h>
+
+#include "Buffer.h"
+#include "Counter.h"
+#include "Logging.h"
+#include "SessionData.h"
+
+// From instr/src/mve_instr_comm_protocol.h
+typedef enum mve_instr_configuration_type {
+       MVE_INSTR_RAW         = 1 << 0,
+       MVE_INSTR_COUNTERS    = 1 << 1,
+       MVE_INSTR_EVENTS      = 1 << 2,
+       MVE_INSTR_ACTIVITIES  = 1 << 3,
+
+       // Raw always pushed regardless
+       MVE_INSTR_PULL        = 1 << 12,
+       // Raw always unpacked regardless
+       MVE_INSTR_PACKED_COMM = 1 << 13,
+       // Don’t send ACKt response
+       MVE_INSTR_NO_AUTO_ACK   = 1 << 14,
+} mve_instr_configuration_type_t;
+
+static const char COUNTER[] = "ARM_Mali-V500_cnt";
+static const char EVENT[] = "ARM_Mali-V500_evn";
+static const char ACTIVITY[] = "ARM_Mali-V500_act";
+
+class MaliVideoCounter : public DriverCounter {
+public:
+       MaliVideoCounter(DriverCounter *next, const char *name, const MaliVideoCounterType type, const int id) : DriverCounter(next, name), mType(type), mId(id) {
+       }
+
+       ~MaliVideoCounter() {
+       }
+
+       MaliVideoCounterType getType() const { return mType; }
+       int getId() const { return mId; }
+
+private:
+       const MaliVideoCounterType mType;
+       // Mali Video id
+       const int mId;
+};
+
+MaliVideoDriver::MaliVideoDriver() {
+}
+
+MaliVideoDriver::~MaliVideoDriver() {
+}
+
+void MaliVideoDriver::readEvents(mxml_node_t *const xml) {
+       mxml_node_t *node = xml;
+       while (true) {
+               node = mxmlFindElement(node, xml, "event", NULL, NULL, MXML_DESCEND);
+               if (node == NULL) {
+                       break;
+               }
+               const char *counter = mxmlElementGetAttr(node, "counter");
+               if (counter == NULL) {
+                       // Ignore
+               } else if (strncmp(counter, COUNTER, sizeof(COUNTER) - 1) == 0) {
+                       const int i = strtol(counter + sizeof(COUNTER) - 1, NULL, 10);
+                       setCounters(new MaliVideoCounter(getCounters(), strdup(counter), MVCT_COUNTER, i));
+               } else if (strncmp(counter, EVENT, sizeof(EVENT) - 1) == 0) {
+                       const int i = strtol(counter + sizeof(EVENT) - 1, NULL, 10);
+                       setCounters(new MaliVideoCounter(getCounters(), strdup(counter), MVCT_EVENT, i));
+               } else if (strncmp(counter, ACTIVITY, sizeof(ACTIVITY) - 1) == 0) {
+                       const int i = strtol(counter + sizeof(ACTIVITY) - 1, NULL, 10);
+                       setCounters(new MaliVideoCounter(getCounters(), strdup(counter), MVCT_ACTIVITY, i));
+               }
+       }
+}
+
+int MaliVideoDriver::writeCounters(mxml_node_t *root) const {
+       if (access("/dev/mv500", F_OK) != 0) {
+               return 0;
+       }
+
+       return super::writeCounters(root);
+}
+
+void MaliVideoDriver::marshalEnable(const MaliVideoCounterType type, char *const buf, const size_t bufsize, int &pos) {
+       // size
+       int numEnabled = 0;
+       for (MaliVideoCounter *counter = static_cast<MaliVideoCounter *>(getCounters()); counter != NULL; counter = static_cast<MaliVideoCounter *>(counter->getNext())) {
+               if (counter->isEnabled() && (counter->getType() == type)) {
+                       ++numEnabled;
+               }
+       }
+       Buffer::packInt(buf, bufsize, pos, numEnabled*sizeof(uint32_t));
+       for (MaliVideoCounter *counter = static_cast<MaliVideoCounter *>(getCounters()); counter != NULL; counter = static_cast<MaliVideoCounter *>(counter->getNext())) {
+               if (counter->isEnabled() && (counter->getType() == type)) {
+                       Buffer::packInt(buf, bufsize, pos, counter->getId());
+               }
+       }
+}
+
+static bool writeAll(const int mveUds, const char *const buf, const int pos) {
+       int written = 0;
+       while (written < pos) {
+               size_t bytes = ::write(mveUds, buf + written, pos - written);
+               if (bytes <= 0) {
+                       logg->logMessage("%s(%s:%i): write failed", __FUNCTION__, __FILE__, __LINE__);
+                       return false;
+               }
+               written += bytes;
+       }
+
+       return true;
+}
+
+bool MaliVideoDriver::start(const int mveUds) {
+       char buf[256];
+       int pos = 0;
+
+       // code - MVE_INSTR_STARTUP
+       buf[pos++] = 'C';
+       buf[pos++] = 'L';
+       buf[pos++] = 'N';
+       buf[pos++] = 'T';
+       // size
+       Buffer::packInt(buf, sizeof(buf), pos, sizeof(uint32_t));
+       // client_version_number
+       Buffer::packInt(buf, sizeof(buf), pos, 1);
+
+       // code - MVE_INSTR_CONFIGURE
+       buf[pos++] = 'C';
+       buf[pos++] = 'N';
+       buf[pos++] = 'F';
+       buf[pos++] = 'G';
+       // size
+       Buffer::packInt(buf, sizeof(buf), pos, 5*sizeof(uint32_t));
+       // configuration
+       Buffer::packInt(buf, sizeof(buf), pos, MVE_INSTR_COUNTERS | MVE_INSTR_EVENTS | MVE_INSTR_ACTIVITIES | MVE_INSTR_PACKED_COMM);
+       // communication_protocol_version
+       Buffer::packInt(buf, sizeof(buf), pos, 1);
+       // data_protocol_version
+       Buffer::packInt(buf, sizeof(buf), pos, 1);
+       // sample_rate - convert samples/second to ms/sample
+       Buffer::packInt(buf, sizeof(buf), pos, 1000/gSessionData->mSampleRate);
+       // live_rate - convert ns/flush to ms/flush
+       Buffer::packInt(buf, sizeof(buf), pos, gSessionData->mLiveRate/1000000);
+
+       // code - MVE_INSTR_ENABLE_COUNTERS
+       buf[pos++] = 'C';
+       buf[pos++] = 'F';
+       buf[pos++] = 'G';
+       buf[pos++] = 'c';
+       marshalEnable(MVCT_COUNTER, buf, sizeof(buf), pos);
+
+       // code - MVE_INSTR_ENABLE_EVENTS
+       buf[pos++] = 'C';
+       buf[pos++] = 'F';
+       buf[pos++] = 'G';
+       buf[pos++] = 'e';
+       marshalEnable(MVCT_EVENT, buf, sizeof(buf), pos);
+
+       // code - MVE_INSTR_ENABLE_ACTIVITIES
+       buf[pos++] = 'C';
+       buf[pos++] = 'F';
+       buf[pos++] = 'G';
+       buf[pos++] = 'a';
+       marshalEnable(MVCT_ACTIVITY, buf, sizeof(buf), pos);
+
+       return writeAll(mveUds, buf, pos);
+}
+
+void MaliVideoDriver::stop(const int mveUds) {
+       char buf[8];
+       int pos = 0;
+
+       // code - MVE_INSTR_STOP
+       buf[pos++] = 'S';
+       buf[pos++] = 'T';
+       buf[pos++] = 'O';
+       buf[pos++] = 'P';
+       marshalEnable(MVCT_COUNTER, buf, sizeof(buf), pos);
+
+       writeAll(mveUds, buf, pos);
+
+       close(mveUds);
+}
diff --git a/tools/gator/daemon/MaliVideoDriver.h b/tools/gator/daemon/MaliVideoDriver.h
new file mode 100644 (file)
index 0000000..204a57a
--- /dev/null
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MALIVIDEODRIVER_H
+#define MALIVIDEODRIVER_H
+
+#include "Driver.h"
+
+class MaliVideoCounter;
+
+enum MaliVideoCounterType {
+       MVCT_COUNTER,
+       MVCT_EVENT,
+       MVCT_ACTIVITY,
+};
+
+class MaliVideoDriver : public SimpleDriver {
+private:
+       typedef SimpleDriver super;
+
+public:
+       MaliVideoDriver();
+       ~MaliVideoDriver();
+
+       void readEvents(mxml_node_t *const root);
+
+       int writeCounters(mxml_node_t *root) const;
+
+       bool start(const int mveUds);
+       void stop(const int mveUds);
+
+private:
+       void marshalEnable(const MaliVideoCounterType type, char *const buf, const size_t bufsize, int &pos);
+
+       // Intentionally unimplemented
+       MaliVideoDriver(const MaliVideoDriver &);
+       MaliVideoDriver &operator=(const MaliVideoDriver &);
+};
+
+#endif // MALIVIDEODRIVER_H
diff --git a/tools/gator/daemon/MemInfoDriver.cpp b/tools/gator/daemon/MemInfoDriver.cpp
new file mode 100644 (file)
index 0000000..cce15c1
--- /dev/null
@@ -0,0 +1,93 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "MemInfoDriver.h"
+
+#include "Logging.h"
+#include "SessionData.h"
+
+class MemInfoCounter : public DriverCounter {
+public:
+       MemInfoCounter(DriverCounter *next, char *const name, int64_t *const value);
+       ~MemInfoCounter();
+
+       int64_t read();
+
+private:
+       int64_t *const mValue;
+
+       // Intentionally unimplemented
+       MemInfoCounter(const MemInfoCounter &);
+       MemInfoCounter &operator=(const MemInfoCounter &);
+};
+
+MemInfoCounter::MemInfoCounter(DriverCounter *next, char *const name, int64_t *const value) : DriverCounter(next, name), mValue(value) {
+}
+
+MemInfoCounter::~MemInfoCounter() {
+}
+
+int64_t MemInfoCounter::read() {
+       return *mValue;
+}
+
+MemInfoDriver::MemInfoDriver() : mBuf(), mMemUsed(0), mMemFree(0), mBuffers(0) {
+}
+
+MemInfoDriver::~MemInfoDriver() {
+}
+
+void MemInfoDriver::readEvents(mxml_node_t *const) {
+       // Only for use with perf
+       if (!gSessionData->perf.isSetup()) {
+               return;
+       }
+
+       setCounters(new MemInfoCounter(getCounters(), strdup("Linux_meminfo_memused2"), &mMemUsed));
+       setCounters(new MemInfoCounter(getCounters(), strdup("Linux_meminfo_memfree"), &mMemFree));
+       setCounters(new MemInfoCounter(getCounters(), strdup("Linux_meminfo_bufferram"), &mBuffers));
+}
+
+void MemInfoDriver::read(Buffer *const buffer) {
+       if (!countersEnabled()) {
+               return;
+       }
+
+       if (!mBuf.read("/proc/meminfo")) {
+               logg->logError(__FILE__, __LINE__, "Failed to read /proc/meminfo");
+               handleException();
+       }
+
+       char *key = mBuf.getBuf();
+       char *colon;
+       int64_t memTotal = 0;
+       while ((colon = strchr(key, ':')) != NULL) {
+               char *end = strchr(colon + 1, '\n');
+               if (end != NULL) {
+                       *end = '\0';
+               }
+               *colon = '\0';
+
+               if (strcmp(key, "MemTotal") == 0) {
+                       memTotal = strtoll(colon + 1, NULL, 10) << 10;
+               } else if (strcmp(key, "MemFree") == 0) {
+                       mMemFree = strtoll(colon + 1, NULL, 10) << 10;
+               } else if (strcmp(key, "Buffers") == 0) {
+                       mBuffers = strtoll(colon + 1, NULL, 10) << 10;
+               }
+
+               if (end == NULL) {
+                       break;
+               }
+               key = end + 1;
+       }
+
+       mMemUsed = memTotal - mMemFree;
+
+       super::read(buffer);
+}
diff --git a/tools/gator/daemon/MemInfoDriver.h b/tools/gator/daemon/MemInfoDriver.h
new file mode 100644 (file)
index 0000000..eb1b041
--- /dev/null
@@ -0,0 +1,37 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MEMINFODRIVER_H
+#define MEMINFODRIVER_H
+
+#include "Driver.h"
+#include "DynBuf.h"
+
+class MemInfoDriver : public PolledDriver {
+private:
+       typedef PolledDriver super;
+
+public:
+       MemInfoDriver();
+       ~MemInfoDriver();
+
+       void readEvents(mxml_node_t *const root);
+       void read(Buffer *const buffer);
+
+private:
+       DynBuf mBuf;
+       int64_t mMemUsed;
+       int64_t mMemFree;
+       int64_t mBuffers;
+
+       // Intentionally unimplemented
+       MemInfoDriver(const MemInfoDriver &);
+       MemInfoDriver &operator=(const MemInfoDriver &);
+};
+
+#endif // MEMINFODRIVER_H
index 90d5c47706c73fb4e74f5a8be82d26948099ca29..74f22ee29fec7dcb776a7999e34353e1100a8a6d 100644 (file)
@@ -9,6 +9,7 @@
 #include "Monitor.h"
 
 #include <errno.h>
+#include <fcntl.h>
 #include <string.h>
 #include <unistd.h>
 
@@ -18,18 +19,38 @@ Monitor::Monitor() : mFd(-1) {
 }
 
 Monitor::~Monitor() {
-       if (mFd >= -1) {
-               close(mFd);
+       if (mFd >= 0) {
+               ::close(mFd);
+       }
+}
+
+void Monitor::close() {
+       if (mFd >= 0) {
+               ::close(mFd);
+               mFd = -1;
        }
 }
 
 bool Monitor::init() {
+#ifdef EPOLL_CLOEXEC
+       mFd = epoll_create1(EPOLL_CLOEXEC);
+#else
        mFd = epoll_create(16);
+#endif
        if (mFd < 0) {
                logg->logMessage("%s(%s:%i): epoll_create1 failed", __FUNCTION__, __FILE__, __LINE__);
                return false;
        }
 
+#ifndef EPOLL_CLOEXEC
+  int fdf = fcntl(mFd, F_GETFD);
+  if ((fdf == -1) || (fcntl(mFd, F_SETFD, fdf | FD_CLOEXEC) != 0)) {
+               logg->logMessage("%s(%s:%i): fcntl failed", __FUNCTION__, __FILE__, __LINE__);
+    ::close(mFd);
+    return -1;
+  }
+#endif
+
        return true;
 }
 
index 6e268b6e1bed2e09f624d2483aaa98cdce1b4d27..7194e0e4ca509a43782a3d17437e0c19ac7fbe8f 100644 (file)
@@ -16,6 +16,7 @@ public:
        Monitor();
        ~Monitor();
 
+       void close();
        bool init();
        bool add(const int fd);
        int wait(struct epoll_event *const events, int maxevents, int timeout);
diff --git a/tools/gator/daemon/NetDriver.cpp b/tools/gator/daemon/NetDriver.cpp
new file mode 100644 (file)
index 0000000..e75c069
--- /dev/null
@@ -0,0 +1,129 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+// Define to get format macros from inttypes.h
+#define __STDC_FORMAT_MACROS
+
+#include "NetDriver.h"
+
+#include <inttypes.h>
+
+#include "Logging.h"
+#include "SessionData.h"
+
+class NetCounter : public DriverCounter {
+public:
+       NetCounter(DriverCounter *next, char *const name, int64_t *const value);
+       ~NetCounter();
+
+       int64_t read();
+
+private:
+       int64_t *const mValue;
+       int64_t mPrev;
+
+       // Intentionally unimplemented
+       NetCounter(const NetCounter &);
+       NetCounter &operator=(const NetCounter &);
+};
+
+NetCounter::NetCounter(DriverCounter *next, char *const name, int64_t *const value) : DriverCounter(next, name), mValue(value), mPrev(0) {
+}
+
+NetCounter::~NetCounter() {
+}
+
+int64_t NetCounter::read() {
+       int64_t result = *mValue - mPrev;
+       mPrev = *mValue;
+       return result;
+}
+
+NetDriver::NetDriver() : mBuf(), mReceiveBytes(0), mTransmitBytes(0) {
+}
+
+NetDriver::~NetDriver() {
+}
+
+void NetDriver::readEvents(mxml_node_t *const) {
+       // Only for use with perf
+       if (!gSessionData->perf.isSetup()) {
+               return;
+       }
+
+       setCounters(new NetCounter(getCounters(), strdup("Linux_net_rx"), &mReceiveBytes));
+       setCounters(new NetCounter(getCounters(), strdup("Linux_net_tx"), &mTransmitBytes));
+}
+
+bool NetDriver::doRead() {
+       if (!countersEnabled()) {
+               return true;
+       }
+
+       if (!mBuf.read("/proc/net/dev")) {
+               return false;
+       }
+
+       // Skip the header
+       char *key;
+       if (((key = strchr(mBuf.getBuf(), '\n')) == NULL) ||
+                       ((key = strchr(key + 1, '\n')) == NULL)) {
+               return false;
+       }
+       key = key + 1;
+
+       mReceiveBytes = 0;
+       mTransmitBytes = 0;
+
+       char *colon;
+       while ((colon = strchr(key, ':')) != NULL) {
+               char *end = strchr(colon + 1, '\n');
+               if (end != NULL) {
+                       *end = '\0';
+               }
+               *colon = '\0';
+
+               int64_t receiveBytes;
+               int64_t transmitBytes;
+               const int count = sscanf(colon + 1, " %" SCNu64 " %*u %*u %*u %*u %*u %*u %*u %" SCNu64, &receiveBytes, &transmitBytes);
+               if (count != 2) {
+                       return false;
+               }
+               mReceiveBytes += receiveBytes;
+               mTransmitBytes += transmitBytes;
+
+               if (end == NULL) {
+                       break;
+               }
+               key = end + 1;
+       }
+
+       return true;
+}
+
+void NetDriver::start() {
+       if (!doRead()) {
+               logg->logError(__FILE__, __LINE__, "Unable to read network stats");
+               handleException();
+       }
+       // Initialize previous values
+       for (DriverCounter *counter = getCounters(); counter != NULL; counter = counter->getNext()) {
+               if (!counter->isEnabled()) {
+                       continue;
+               }
+               counter->read();
+       }
+}
+
+void NetDriver::read(Buffer *const buffer) {
+       if (!doRead()) {
+               logg->logError(__FILE__, __LINE__, "Unable to read network stats");
+               handleException();
+       }
+       super::read(buffer);
+}
diff --git a/tools/gator/daemon/NetDriver.h b/tools/gator/daemon/NetDriver.h
new file mode 100644 (file)
index 0000000..50ff850
--- /dev/null
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef NETDRIVER_H
+#define NETDRIVER_H
+
+#include "Driver.h"
+#include "DynBuf.h"
+
+class NetDriver : public PolledDriver {
+private:
+       typedef PolledDriver super;
+
+public:
+       NetDriver();
+       ~NetDriver();
+
+       void readEvents(mxml_node_t *const root);
+       void start();
+       void read(Buffer *const buffer);
+
+private:
+       bool doRead();
+
+       DynBuf mBuf;
+       int64_t mReceiveBytes;
+       int64_t mTransmitBytes;
+
+       // Intentionally unimplemented
+       NetDriver(const NetDriver &);
+       NetDriver &operator=(const NetDriver &);
+};
+
+#endif // NETDRIVER_H
index 26e4768f39342440d0c25a81a2bdd8c579d352a6..aa0ce4929916eb57d1637ff63acf5a07af87548b 100644 (file)
@@ -9,15 +9,16 @@
 #include "OlySocket.h"
 
 #include <stdio.h>
+#include <string.h>
 #ifdef WIN32
 #include <Winsock2.h>
 #include <ws2tcpip.h>
 #else
 #include <netinet/in.h>
-#include <sys/socket.h>
 #include <sys/un.h>
 #include <unistd.h>
 #include <netdb.h>
+#include <fcntl.h>
 #endif
 
 #include "Logging.h"
 #define SHUTDOWN_RX_TX SHUT_RDWR
 #endif
 
+int socket_cloexec(int domain, int type, int protocol) {
+#ifdef SOCK_CLOEXEC
+  return socket(domain, type | SOCK_CLOEXEC, protocol);
+#else
+  int sock = socket(domain, type, protocol);
+#ifdef FD_CLOEXEC
+  if (sock < 0) {
+    return -1;
+  }
+  int fdf = fcntl(sock, F_GETFD);
+  if ((fdf == -1) || (fcntl(sock, F_SETFD, fdf | FD_CLOEXEC) != 0)) {
+    close(sock);
+    return -1;
+  }
+#endif
+  return sock;
+#endif
+}
+
+int accept_cloexec(int sockfd, struct sockaddr *addr, socklen_t *addrlen) {
+  int sock;
+#ifdef SOCK_CLOEXEC
+  sock = accept4(sockfd, addr, addrlen, SOCK_CLOEXEC);
+  if (sock >= 0) {
+    return sock;
+  }
+  // accept4 with SOCK_CLOEXEC may not work on all kernels, so fallback
+#endif
+  sock = accept(sockfd, addr, addrlen);
+#ifdef FD_CLOEXEC
+  if (sock < 0) {
+    return -1;
+  }
+  int fdf = fcntl(sock, F_GETFD);
+  if ((fdf == -1) || (fcntl(sock, F_SETFD, fdf | FD_CLOEXEC) != 0)) {
+    close(sock);
+    return -1;
+  }
+#endif
+  return sock;
+}
+
 OlyServerSocket::OlyServerSocket(int port) {
 #ifdef WIN32
   WSADATA wsaData;
@@ -43,30 +86,30 @@ OlyServerSocket::OlyServerSocket(int port) {
   createServerSocket(port);
 }
 
-OlySocket::OlySocket(int port, const char* host) {
-  createClientSocket(host, port);
-}
-
 OlySocket::OlySocket(int socketID) : mSocketID(socketID) {
 }
 
 #ifndef WIN32
 
-OlyServerSocket::OlyServerSocket(const char* path) {
+#define MIN(A, B) ({ \
+  const __typeof__(A) __a = A; \
+  const __typeof__(B) __b = B; \
+  __a > __b ? __b : __a; \
+})
+
+OlyServerSocket::OlyServerSocket(const char* path, const size_t pathSize) {
   // Create socket
-  mFDServer = socket(PF_UNIX, SOCK_STREAM, 0);
+  mFDServer = socket_cloexec(PF_UNIX, SOCK_STREAM, 0);
   if (mFDServer < 0) {
     logg->logError(__FILE__, __LINE__, "Error creating server socket");
     handleException();
   }
 
-  unlink(path);
-
   // Create sockaddr_in structure, ensuring non-populated fields are zero
   struct sockaddr_un sockaddr;
   memset((void*)&sockaddr, 0, sizeof(sockaddr));
   sockaddr.sun_family = AF_UNIX;
-  strncpy(sockaddr.sun_path, path, sizeof(sockaddr.sun_path) - 1);
+  memcpy(sockaddr.sun_path, path, MIN(pathSize, sizeof(sockaddr.sun_path)));
   sockaddr.sun_path[sizeof(sockaddr.sun_path) - 1] = '\0';
 
   // Bind the socket to an address
@@ -82,24 +125,25 @@ OlyServerSocket::OlyServerSocket(const char* path) {
   }
 }
 
-OlySocket::OlySocket(const char* path) {
-  mSocketID = socket(PF_UNIX, SOCK_STREAM, 0);
-  if (mSocketID < 0) {
-    return;
+int OlySocket::connect(const char* path, const size_t pathSize) {
+  int fd = socket_cloexec(PF_UNIX, SOCK_STREAM, 0);
+  if (fd < 0) {
+    return -1;
   }
 
   // Create sockaddr_in structure, ensuring non-populated fields are zero
   struct sockaddr_un sockaddr;
   memset((void*)&sockaddr, 0, sizeof(sockaddr));
   sockaddr.sun_family = AF_UNIX;
-  strncpy(sockaddr.sun_path, path, sizeof(sockaddr.sun_path) - 1);
+  memcpy(sockaddr.sun_path, path, MIN(pathSize, sizeof(sockaddr.sun_path)));
   sockaddr.sun_path[sizeof(sockaddr.sun_path) - 1] = '\0';
 
-  if (connect(mSocketID, (const struct sockaddr*)&sockaddr, sizeof(sockaddr)) < 0) {
-    close(mSocketID);
-    mSocketID = -1;
-    return;
+  if (::connect(fd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr)) < 0) {
+    close(fd);
+    return -1;
   }
+
+  return fd;
 }
 
 #endif
@@ -137,55 +181,14 @@ void OlyServerSocket::closeServerSocket() {
   mFDServer = 0;
 }
 
-void OlySocket::createClientSocket(const char* hostname, int portno) {
-#ifdef WIN32
-  // TODO: Implement for Windows
-#else
-  char buf[32];
-  struct addrinfo hints, *res, *res0;
-
-  snprintf(buf, sizeof(buf), "%d", portno);
-  mSocketID = -1;
-  memset((void*)&hints, 0, sizeof(hints));
-  hints.ai_family = PF_UNSPEC;
-  hints.ai_socktype = SOCK_STREAM;
-
-  if (getaddrinfo(hostname, buf, &hints, &res0)) {
-    logg->logError(__FILE__, __LINE__, "Client socket failed to get address info for %s", hostname);
-    handleException();
-  }
-  for (res=res0; res!=NULL; res = res->ai_next) {
-    if ( res->ai_family != PF_INET || res->ai_socktype != SOCK_STREAM ) {
-      continue;
-    }
-    mSocketID = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
-    if (mSocketID < 0) {
-      continue;
-    }
-    if (connect(mSocketID, res->ai_addr, res->ai_addrlen) < 0) {
-      close(mSocketID);
-      mSocketID = -1;
-    }
-    if (mSocketID > 0) {
-      break;
-    }
-  }
-  freeaddrinfo(res0);
-  if (mSocketID <= 0) {
-    logg->logError(__FILE__, __LINE__, "Could not connect to client socket. Ensure ARM Streamline is running.");
-    handleException();
-  }
-#endif
-}
-
 void OlyServerSocket::createServerSocket(int port) {
   int family = AF_INET6;
 
   // Create socket
-  mFDServer = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP);
+  mFDServer = socket_cloexec(PF_INET6, SOCK_STREAM, IPPROTO_TCP);
   if (mFDServer < 0) {
     family = AF_INET;
-    mFDServer = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
+    mFDServer = socket_cloexec(PF_INET, SOCK_STREAM, IPPROTO_TCP);
     if (mFDServer < 0) {
       logg->logError(__FILE__, __LINE__, "Error creating server socket");
       handleException();
@@ -229,7 +232,7 @@ int OlyServerSocket::acceptConnection() {
   }
 
   // Accept a connection, note that this call blocks until a client connects
-  socketID = accept(mFDServer, NULL, NULL);
+  socketID = accept_cloexec(mFDServer, NULL, NULL);
   if (socketID < 0) {
     logg->logError(__FILE__, __LINE__, "Socket acceptance failed");
     handleException();
index eab786b304bf3907cfe361623751d8c7018126a9..6b53b01fc3ee6e6c9221e219f26aca43a28882a9 100644 (file)
@@ -9,13 +9,21 @@
 #ifndef __OLY_SOCKET_H__
 #define __OLY_SOCKET_H__
 
+#include <stddef.h>
+
+#ifdef WIN32
+typedef socklen_t int;
+#else
+#include <sys/socket.h>
+#endif
+
 class OlySocket {
 public:
-  OlySocket(int port, const char* hostname);
-  OlySocket(int socketID);
 #ifndef WIN32
-  OlySocket(const char* path);
+  static int connect(const char* path, const size_t pathSize);
 #endif
+
+  OlySocket(int socketID);
   ~OlySocket();
 
   void closeSocket();
@@ -29,25 +37,28 @@ public:
 
 private:
   int mSocketID;
-
-  void createClientSocket(const char* hostname, int port);
 };
 
 class OlyServerSocket {
 public:
   OlyServerSocket(int port);
 #ifndef WIN32
-  OlyServerSocket(const char* path);
+  OlyServerSocket(const char* path, const size_t pathSize);
 #endif
   ~OlyServerSocket();
 
   int acceptConnection();
   void closeServerSocket();
 
+  int getFd() { return mFDServer; }
+
 private:
   int mFDServer;
 
   void createServerSocket(int port);
 };
 
+int socket_cloexec(int domain, int type, int protocol);
+int accept_cloexec(int sockfd, struct sockaddr *addr, socklen_t *addrlen);
+
 #endif //__OLY_SOCKET_H__
index 5fad583f7bd095661d213d1f3d8f113b3bf63743..f127c996d43b9c6559c1ad0fd25faec11017e2a6 100644 (file)
@@ -20,6 +20,7 @@ PerfBuffer::PerfBuffer() {
        for (int cpu = 0; cpu < ARRAY_LENGTH(mBuf); ++cpu) {
                mBuf[cpu] = MAP_FAILED;
                mDiscard[cpu] = false;
+               mFds[cpu] = -1;
        }
 }
 
@@ -31,8 +32,8 @@ PerfBuffer::~PerfBuffer() {
        }
 }
 
-bool PerfBuffer::useFd(const int cpu, const int fd, const int groupFd) {
-       if (fd == groupFd) {
+bool PerfBuffer::useFd(const int cpu, const int fd) {
+       if (mFds[cpu] < 0) {
                if (mBuf[cpu] != MAP_FAILED) {
                        logg->logMessage("%s(%s:%i): cpu %i already online or not correctly cleaned up", __FUNCTION__, __FILE__, __LINE__, cpu);
                        return false;
@@ -44,6 +45,7 @@ bool PerfBuffer::useFd(const int cpu, const int fd, const int groupFd) {
                        logg->logMessage("%s(%s:%i): mmap failed", __FUNCTION__, __FILE__, __LINE__);
                        return false;
                }
+               mFds[cpu] = fd;
 
                // Check the version
                struct perf_event_mmap_page *pemp = static_cast<struct perf_event_mmap_page *>(mBuf[cpu]);
@@ -57,7 +59,7 @@ bool PerfBuffer::useFd(const int cpu, const int fd, const int groupFd) {
                        return false;
                }
 
-               if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, groupFd) < 0) {
+               if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, mFds[cpu]) < 0) {
                        logg->logMessage("%s(%s:%i): ioctl failed", __FUNCTION__, __FILE__, __LINE__);
                        return false;
                }
@@ -89,6 +91,41 @@ bool PerfBuffer::isEmpty() {
        return true;
 }
 
+static void compressAndSend(const int cpu, const __u64 head, __u64 tail, const uint8_t *const b, Sender *const sender) {
+       // Pick a big size but something smaller than the chunkSize in Sender::writeData which is 100k
+       char buf[1<<16];
+       int writePos = 0;
+       const int typeLength = gSessionData->mLocalCapture ? 0 : 1;
+
+       while (head > tail) {
+               writePos = 0;
+               if (!gSessionData->mLocalCapture) {
+                       buf[writePos++] = RESPONSE_APC_DATA;
+               }
+               // Reserve space for size
+               writePos += sizeof(uint32_t);
+               Buffer::packInt(buf, sizeof(buf), writePos, FRAME_PERF);
+               Buffer::packInt(buf, sizeof(buf), writePos, cpu);
+
+               while (head > tail) {
+                       const int count = reinterpret_cast<const struct perf_event_header *>(b + (tail & BUF_MASK))->size/sizeof(uint64_t);
+                       // Can this whole message be written as Streamline assumes events are not split between frames
+                       if (sizeof(buf) <= writePos + count*Buffer::MAXSIZE_PACK64) {
+                               break;
+                       }
+                       for (int i = 0; i < count; ++i) {
+                               // Must account for message size
+                               Buffer::packInt64(buf, sizeof(buf), writePos, *reinterpret_cast<const uint64_t *>(b + (tail & BUF_MASK)));
+                               tail += sizeof(uint64_t);
+                       }
+               }
+
+               // Write size
+               Buffer::writeLEInt(reinterpret_cast<unsigned char *>(buf + typeLength), writePos - typeLength - sizeof(uint32_t));
+               sender->writeData(buf, writePos, RESPONSE_APC_DATA);
+       }
+}
+
 bool PerfBuffer::send(Sender *const sender) {
        for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
                if (mBuf[cpu] == MAP_FAILED) {
@@ -102,26 +139,7 @@ bool PerfBuffer::send(Sender *const sender) {
 
                if (head > tail) {
                        const uint8_t *const b = static_cast<uint8_t *>(mBuf[cpu]) + gSessionData->mPageSize;
-                       const int offset = gSessionData->mLocalCapture ? 1 : 0;
-                       unsigned char header[7];
-                       header[0] = RESPONSE_APC_DATA;
-                       Buffer::writeLEInt(header + 1, head - tail + sizeof(header) - 5);
-                       // Should use real packing functions
-                       header[5] = FRAME_PERF;
-                       header[6] = cpu;
-
-                       // Write header
-                       sender->writeData(reinterpret_cast<const char *>(&header) + offset, sizeof(header) - offset, RESPONSE_APC_DATA);
-
-                       // Write data
-                       if ((head & ~BUF_MASK) == (tail & ~BUF_MASK)) {
-                               // Not wrapped
-                               sender->writeData(reinterpret_cast<const char *>(b + (tail & BUF_MASK)), head - tail, RESPONSE_APC_DATA);
-                       } else {
-                               // Wrapped
-                               sender->writeData(reinterpret_cast<const char *>(b + (tail & BUF_MASK)), BUF_SIZE - (tail & BUF_MASK), RESPONSE_APC_DATA);
-                               sender->writeData(reinterpret_cast<const char *>(b), head & BUF_MASK, RESPONSE_APC_DATA);
-                       }
+                       compressAndSend(cpu, head, tail, b, sender);
 
                        // Update tail with the data read
                        pemp->data_tail = head;
@@ -131,6 +149,7 @@ bool PerfBuffer::send(Sender *const sender) {
                        munmap(mBuf[cpu], gSessionData->mPageSize + BUF_SIZE);
                        mBuf[cpu] = MAP_FAILED;
                        mDiscard[cpu] = false;
+                       mFds[cpu] = -1;
                        logg->logMessage("%s(%s:%i): Unmaped cpu %i", __FUNCTION__, __FILE__, __LINE__, cpu);
                }
        }
index 278a3b9d6db7d0a117be461a42a6d1eab6374219..25a10625a9e8006b255b4c4a0183e741b0620e9e 100644 (file)
@@ -21,7 +21,7 @@ public:
        PerfBuffer();
        ~PerfBuffer();
 
-       bool useFd(const int cpu, const int fd, const int groupFd);
+       bool useFd(const int cpu, const int fd);
        void discard(const int cpu);
        bool isEmpty();
        bool send(Sender *const sender);
@@ -30,6 +30,8 @@ private:
        void *mBuf[NR_CPUS];
        // After the buffer is flushed it should be unmaped
        bool mDiscard[NR_CPUS];
+       // fd that corresponds to the mBuf
+       int mFds[NR_CPUS];
 
        // Intentionally undefined
        PerfBuffer(const PerfBuffer &);
index 8e25c22f6798534317115c12f2fa8ecccc252a2f..ee90284cee41434cef36690e753a5089039092b4 100644 (file)
@@ -11,6 +11,7 @@
 #include <dirent.h>
 #include <sys/utsname.h>
 #include <time.h>
+#include <unistd.h>
 
 #include "Buffer.h"
 #include "Config.h"
@@ -21,6 +22,7 @@
 #include "Logging.h"
 #include "PerfGroup.h"
 #include "SessionData.h"
+#include "Setup.h"
 
 #define PERF_DEVICES "/sys/bus/event_source/devices"
 
@@ -30,7 +32,7 @@
 struct gator_cpu {
        const int cpuid;
        // Human readable name
-       const char core_name[32];
+       const char *const core_name;
        // gatorfs event and Perf PMU name
        const char *const pmnc_name;
        const int pmnc_counters;
@@ -46,7 +48,6 @@ static const struct gator_cpu gator_cpus[] = {
        { 0xc07, "Cortex-A7",    "ARMv7_Cortex_A7",  4 },
        { 0xc08, "Cortex-A8",    "ARMv7_Cortex_A8",  4 },
        { 0xc09, "Cortex-A9",    "ARMv7_Cortex_A9",  6 },
-       { 0xc0d, "Cortex-A12",   "ARMv7_Cortex_A12", 6 },
        { 0xc0f, "Cortex-A15",   "ARMv7_Cortex_A15", 6 },
        { 0xc0e, "Cortex-A17",   "ARMv7_Cortex_A17", 6 },
        { 0x00f, "Scorpion",     "Scorpion",         4 },
@@ -62,85 +63,91 @@ static const struct gator_cpu gator_cpus[] = {
 static const char OLD_PMU_PREFIX[] = "ARMv7 Cortex-";
 static const char NEW_PMU_PREFIX[] = "ARMv7_Cortex_";
 
-class PerfCounter {
+struct uncore_counter {
+       // Perf PMU name
+       const char *const perfName;
+       // gatorfs event name
+       const char *const gatorName;
+       const int count;
+};
+
+static const struct uncore_counter uncore_counters[] = {
+       { "CCI_400", "CCI_400", 4 },
+       { "CCI_400-r1", "CCI_400-r1", 4 },
+       { "ccn", "ARM_CCN_5XX", 8 },
+};
+
+class PerfCounter : public DriverCounter {
 public:
-       PerfCounter(PerfCounter *next, const char *name, uint32_t type, uint64_t config) : mNext(next), mName(name), mType(type), mCount(0), mKey(getEventKey()), mConfig(config), mEnabled(false) {}
+       PerfCounter(DriverCounter *next, const char *name, uint32_t type, uint64_t config, bool perCpu) : DriverCounter(next, name), mType(type), mCount(0), mConfig(config), mPerCpu(perCpu) {}
+
        ~PerfCounter() {
-               delete [] mName;
        }
 
-       PerfCounter *getNext() const { return mNext; }
-       const char *getName() const { return mName; }
        uint32_t getType() const { return mType; }
        int getCount() const { return mCount; }
        void setCount(const int count) { mCount = count; }
-       int getKey() const { return mKey; }
        uint64_t getConfig() const { return mConfig; }
        void setConfig(const uint64_t config) { mConfig = config; }
-       bool isEnabled() const { return mEnabled; }
-       void setEnabled(const bool enabled) { mEnabled = enabled; }
+       bool isPerCpu() const { return mPerCpu; }
 
 private:
-       PerfCounter *const mNext;
-       const char *const mName;
        const uint32_t mType;
        int mCount;
-       const int mKey;
        uint64_t mConfig;
-       bool mEnabled;
+       bool mPerCpu;
 };
 
-PerfDriver::PerfDriver() : mCounters(NULL), mIsSetup(false) {
+PerfDriver::PerfDriver() : mIsSetup(false), mLegacySupport(false) {
 }
 
 PerfDriver::~PerfDriver() {
-       while (mCounters != NULL) {
-               PerfCounter *counter = mCounters;
-               mCounters = counter->getNext();
-               delete counter;
-       }
 }
 
 void PerfDriver::addCpuCounters(const char *const counterName, const int type, const int numCounters) {
        int len = snprintf(NULL, 0, "%s_ccnt", counterName) + 1;
        char *name = new char[len];
        snprintf(name, len, "%s_ccnt", counterName);
-       mCounters = new PerfCounter(mCounters, name, type, -1);
+       setCounters(new PerfCounter(getCounters(), name, type, -1, true));
 
        for (int j = 0; j < numCounters; ++j) {
                len = snprintf(NULL, 0, "%s_cnt%d", counterName, j) + 1;
                name = new char[len];
                snprintf(name, len, "%s_cnt%d", counterName, j);
-               mCounters = new PerfCounter(mCounters, name, type, -1);
+               setCounters(new PerfCounter(getCounters(), name, type, -1, true));
        }
 }
 
-// From include/generated/uapi/linux/version.h
-#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+void PerfDriver::addUncoreCounters(const char *const counterName, const int type, const int numCounters) {
+       int len = snprintf(NULL, 0, "%s_ccnt", counterName) + 1;
+       char *name = new char[len];
+       snprintf(name, len, "%s_ccnt", counterName);
+       setCounters(new PerfCounter(getCounters(), name, type, -1, false));
+
+       for (int j = 0; j < numCounters; ++j) {
+               len = snprintf(NULL, 0, "%s_cnt%d", counterName, j) + 1;
+               name = new char[len];
+               snprintf(name, len, "%s_cnt%d", counterName, j);
+               setCounters(new PerfCounter(getCounters(), name, type, -1, false));
+       }
+}
 
 bool PerfDriver::setup() {
        // Check the kernel version
-       struct utsname utsname;
-       if (uname(&utsname) != 0) {
-               logg->logMessage("%s(%s:%i): uname failed", __FUNCTION__, __FILE__, __LINE__);
+       int release[3];
+       if (!getLinuxVersion(release)) {
+               logg->logMessage("%s(%s:%i): getLinuxVersion failed", __FUNCTION__, __FILE__, __LINE__);
                return false;
        }
 
-       int release[3] = { 0, 0, 0 };
-       int part = 0;
-       char *ch = utsname.release;
-       while (*ch >= '0' && *ch <= '9' && part < ARRAY_LENGTH(release)) {
-               release[part] = 10*release[part] + *ch - '0';
-
-               ++ch;
-               if (*ch == '.') {
-                       ++part;
-                       ++ch;
-               }
+       if (KERNEL_VERSION(release[0], release[1], release[2]) < KERNEL_VERSION(3, 4, 0)) {
+               logg->logMessage("%s(%s:%i): Unsupported kernel version", __FUNCTION__, __FILE__, __LINE__);
+               return false;
        }
+       mLegacySupport = KERNEL_VERSION(release[0], release[1], release[2]) < KERNEL_VERSION(3, 12, 0);
 
-       if (KERNEL_VERSION(release[0], release[1], release[2]) < KERNEL_VERSION(3, 12, 0)) {
-               logg->logMessage("%s(%s:%i): Unsupported kernel version", __FUNCTION__, __FILE__, __LINE__);
+       if (access(EVENTS_PATH, R_OK) != 0) {
+               logg->logMessage("%s(%s:%i): " EVENTS_PATH " does not exist, is CONFIG_TRACING and CONFIG_CONTEXT_SWITCH_TRACER enabled?", __FUNCTION__, __FILE__, __LINE__);
                return false;
        }
 
@@ -155,12 +162,14 @@ bool PerfDriver::setup() {
        struct dirent *dirent;
        while ((dirent = readdir(dir)) != NULL) {
                for (int i = 0; i < ARRAY_LENGTH(gator_cpus); ++i) {
+                       const struct gator_cpu *const gator_cpu = &gator_cpus[i];
+
                        // Do the names match exactly?
-                       if (strcmp(dirent->d_name, gator_cpus[i].pmnc_name) != 0 &&
-                                       // Do these names match but have the old vs new prefix?
-                           (strncmp(dirent->d_name, OLD_PMU_PREFIX, sizeof(OLD_PMU_PREFIX) - 1) != 0 ||
-                            strncmp(gator_cpus[i].pmnc_name, NEW_PMU_PREFIX, sizeof(NEW_PMU_PREFIX) - 1) != 0 ||
-                            strcmp(dirent->d_name + sizeof(OLD_PMU_PREFIX) - 1, gator_cpus[i].pmnc_name + sizeof(NEW_PMU_PREFIX) - 1) != 0)) {
+                       if (strcasecmp(gator_cpu->pmnc_name, dirent->d_name) != 0 &&
+                           // Do these names match but have the old vs new prefix?
+                           ((strncasecmp(dirent->d_name, OLD_PMU_PREFIX, sizeof(OLD_PMU_PREFIX) - 1) != 0 ||
+                             strncasecmp(gator_cpu->pmnc_name, NEW_PMU_PREFIX, sizeof(NEW_PMU_PREFIX) - 1) != 0 ||
+                             strcasecmp(dirent->d_name + sizeof(OLD_PMU_PREFIX) - 1, gator_cpu->pmnc_name + sizeof(NEW_PMU_PREFIX) - 1) != 0))) {
                                continue;
                        }
 
@@ -172,7 +181,24 @@ bool PerfDriver::setup() {
                        }
 
                        foundCpu = true;
-                       addCpuCounters(gator_cpus[i].pmnc_name, type, gator_cpus[i].pmnc_counters);
+                       logg->logMessage("Adding cpu counters for %s", gator_cpu->pmnc_name);
+                       addCpuCounters(gator_cpu->pmnc_name, type, gator_cpu->pmnc_counters);
+               }
+
+               for (int i = 0; i < ARRAY_LENGTH(uncore_counters); ++i) {
+                       if (strcmp(dirent->d_name, uncore_counters[i].perfName) != 0) {
+                               continue;
+                       }
+
+                       int type;
+                       char buf[256];
+                       snprintf(buf, sizeof(buf), PERF_DEVICES "/%s/type", dirent->d_name);
+                       if (DriverSource::readIntDriver(buf, &type) != 0) {
+                               continue;
+                       }
+
+                       logg->logMessage("Adding uncore counters for %s", uncore_counters[i].gatorName);
+                       addUncoreCounters(uncore_counters[i].gatorName, type, uncore_counters[i].count);
                }
        }
        closedir(dir);
@@ -185,6 +211,7 @@ bool PerfDriver::setup() {
                        }
 
                        foundCpu = true;
+                       logg->logMessage("Adding cpu counters (based on cpuid) for %s", gator_cpus[i].pmnc_name);
                        addCpuCounters(gator_cpus[i].pmnc_name, PERF_TYPE_RAW, gator_cpus[i].pmnc_counters);
                }
        }
@@ -203,31 +230,20 @@ bool PerfDriver::setup() {
 
        id = getTracepointId("irq/softirq_exit", &printb);
        if (id >= 0) {
-               mCounters = new PerfCounter(mCounters, "Linux_irq_softirq", PERF_TYPE_TRACEPOINT, id);
+               setCounters(new PerfCounter(getCounters(), "Linux_irq_softirq", PERF_TYPE_TRACEPOINT, id, true));
        }
 
        id = getTracepointId("irq/irq_handler_exit", &printb);
        if (id >= 0) {
-               mCounters = new PerfCounter(mCounters, "Linux_irq_irq", PERF_TYPE_TRACEPOINT, id);
+               setCounters(new PerfCounter(getCounters(), "Linux_irq_irq", PERF_TYPE_TRACEPOINT, id, true));
        }
 
-       //Linux_block_rq_wr
-       //Linux_block_rq_rd
-       //Linux_net_rx
-       //Linux_net_tx
-
        id = getTracepointId(SCHED_SWITCH, &printb);
        if (id >= 0) {
-               mCounters = new PerfCounter(mCounters, "Linux_sched_switch", PERF_TYPE_TRACEPOINT, id);
+               setCounters(new PerfCounter(getCounters(), "Linux_sched_switch", PERF_TYPE_TRACEPOINT, id, true));
        }
 
-       //Linux_meminfo_memused
-       //Linux_meminfo_memfree
-       //Linux_meminfo_bufferram
-       //Linux_power_cpu_freq
-       //Linux_power_cpu_idle
-
-       mCounters = new PerfCounter(mCounters, "Linux_cpu_wait_contention", TYPE_DERIVED, -1);
+       setCounters(new PerfCounter(getCounters(), "Linux_cpu_wait_contention", TYPE_DERIVED, -1, false));
 
        //Linux_cpu_wait_io
 
@@ -250,57 +266,48 @@ bool PerfDriver::summary(Buffer *const buffer) {
                logg->logMessage("%s(%s:%i): clock_gettime failed", __FUNCTION__, __FILE__, __LINE__);
                return false;
        }
-       const int64_t timestamp = (int64_t)ts.tv_sec * 1000000000L + ts.tv_nsec;
+       const int64_t timestamp = (int64_t)ts.tv_sec * NS_PER_S + ts.tv_nsec;
 
-       if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
-               logg->logMessage("%s(%s:%i): clock_gettime failed", __FUNCTION__, __FILE__, __LINE__);
-               return false;
-       }
-       const int64_t uptime = (int64_t)ts.tv_sec * 1000000000L + ts.tv_nsec;
+       const uint64_t monotonicStarted = getTime();
+       gSessionData->mMonotonicStarted = monotonicStarted;
 
-       buffer->summary(timestamp, uptime, 0, buf);
+       buffer->summary(monotonicStarted, timestamp, monotonicStarted, monotonicStarted, buf);
 
        for (int i = 0; i < gSessionData->mCores; ++i) {
-               int j;
-               for (j = 0; j < ARRAY_LENGTH(gator_cpus); ++j) {
-                       if (gator_cpus[j].cpuid == gSessionData->mCpuIds[i]) {
-                               break;
-                       }
-               }
-               if (gator_cpus[j].cpuid == gSessionData->mCpuIds[i]) {
-                       buffer->coreName(i, gSessionData->mCpuIds[i], gator_cpus[j].core_name);
-               } else {
-                       snprintf(buf, sizeof(buf), "Unknown (0x%.3x)", gSessionData->mCpuIds[i]);
-                       buffer->coreName(i, gSessionData->mCpuIds[i], buf);
-               }
+               coreName(monotonicStarted, buffer, i);
        }
-       buffer->commit(1);
+       buffer->commit(monotonicStarted);
 
        return true;
 }
 
-PerfCounter *PerfDriver::findCounter(const Counter &counter) const {
-       for (PerfCounter * perfCounter = mCounters; perfCounter != NULL; perfCounter = perfCounter->getNext()) {
-               if (strcmp(perfCounter->getName(), counter.getType()) == 0) {
-                       return perfCounter;
-               }
+void PerfDriver::coreName(const uint32_t startTime, Buffer *const buffer, const int cpu) {
+       // Don't send information on a cpu we know nothing about
+       if (gSessionData->mCpuIds[cpu] == -1) {
+               return;
        }
 
-       return NULL;
-}
-
-bool PerfDriver::claimCounter(const Counter &counter) const {
-       return findCounter(counter) != NULL;
-}
-
-void PerfDriver::resetCounters() {
-       for (PerfCounter * counter = mCounters; counter != NULL; counter = counter->getNext()) {
-               counter->setEnabled(false);
+       int j;
+       for (j = 0; j < ARRAY_LENGTH(gator_cpus); ++j) {
+               if (gator_cpus[j].cpuid == gSessionData->mCpuIds[cpu]) {
+                       break;
+               }
+       }
+       if (gator_cpus[j].cpuid == gSessionData->mCpuIds[cpu]) {
+               buffer->coreName(startTime, cpu, gSessionData->mCpuIds[cpu], gator_cpus[j].core_name);
+       } else {
+               char buf[32];
+               if (gSessionData->mCpuIds[cpu] == -1) {
+                       snprintf(buf, sizeof(buf), "Unknown");
+               } else {
+                       snprintf(buf, sizeof(buf), "Unknown (0x%.3x)", gSessionData->mCpuIds[cpu]);
+               }
+               buffer->coreName(startTime, cpu, gSessionData->mCpuIds[cpu], buf);
        }
 }
 
 void PerfDriver::setupCounter(Counter &counter) {
-       PerfCounter *const perfCounter = findCounter(counter);
+       PerfCounter *const perfCounter = static_cast<PerfCounter *>(findCounter(counter));
        if (perfCounter == NULL) {
                counter.setEnabled(false);
                return;
@@ -315,21 +322,10 @@ void PerfDriver::setupCounter(Counter &counter) {
        counter.setKey(perfCounter->getKey());
 }
 
-int PerfDriver::writeCounters(mxml_node_t *root) const {
-       int count = 0;
-       for (PerfCounter * counter = mCounters; counter != NULL; counter = counter->getNext()) {
-               mxml_node_t *node = mxmlNewElement(root, "counter");
-               mxmlElementSetAttr(node, "name", counter->getName());
-               ++count;
-       }
-
-       return count;
-}
-
-bool PerfDriver::enable(PerfGroup *group, Buffer *const buffer) const {
-       for (PerfCounter * counter = mCounters; counter != NULL; counter = counter->getNext()) {
+bool PerfDriver::enable(const uint64_t currTime, PerfGroup *const group, Buffer *const buffer) const {
+       for (PerfCounter *counter = static_cast<PerfCounter *>(getCounters()); counter != NULL; counter = static_cast<PerfCounter *>(counter->getNext())) {
                if (counter->isEnabled() && (counter->getType() != TYPE_DERIVED)) {
-                       if (!group->add(buffer, counter->getKey(), counter->getType(), counter->getConfig(), counter->getCount(), 0, 0)) {
+                       if (!group->add(currTime, buffer, counter->getKey(), counter->getType(), counter->getConfig(), counter->getCount(), counter->getCount() > 0 ? PERF_SAMPLE_TID | PERF_SAMPLE_IP : 0, counter->isPerCpu() ? PERF_GROUP_PER_CPU : 0)) {
                                logg->logMessage("%s(%s:%i): PerfGroup::add failed", __FUNCTION__, __FILE__, __LINE__);
                                return false;
                        }
index 3181b74f55705114365199142c14bd6fa48a7f3c..846203a9e18b3def479ea20c58e3cc488d384fb6 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef PERFDRIVER_H
 #define PERFDRIVER_H
 
+#include <stdint.h>
+
 #include "Driver.h"
 
 // If debugfs is not mounted at /sys/kernel/debug, update DEBUGFS_PATH
 #define EVENTS_PATH DEBUGFS_PATH "/tracing/events"
 
 #define SCHED_SWITCH "sched/sched_switch"
+#define CPU_IDLE "power/cpu_idle"
 
 class Buffer;
 class DynBuf;
-class PerfCounter;
 class PerfGroup;
 
-class PerfDriver : public Driver {
+class PerfDriver : public SimpleDriver {
 public:
        PerfDriver();
        ~PerfDriver();
 
+       bool getLegacySupport() const { return mLegacySupport; }
+
        bool setup();
        bool summary(Buffer *const buffer);
+       void coreName(const uint32_t startTime, Buffer *const buffer, const int cpu);
        bool isSetup() const { return mIsSetup; }
 
-       bool claimCounter(const Counter &counter) const;
-       void resetCounters();
        void setupCounter(Counter &counter);
 
-       int writeCounters(mxml_node_t *root) const;
-
-       bool enable(PerfGroup *group, Buffer *const buffer) const;
+       bool enable(const uint64_t currTime, PerfGroup *const group, Buffer *const buffer) const;
 
        static long long getTracepointId(const char *const name, DynBuf *const printb);
 
 private:
-       PerfCounter *findCounter(const Counter &counter) const;
        void addCpuCounters(const char *const counterName, const int type, const int numCounters);
+       void addUncoreCounters(const char *const counterName, const int type, const int numCounters);
 
-       PerfCounter *mCounters;
        bool mIsSetup;
+       bool mLegacySupport;
 
        // Intentionally undefined
        PerfDriver(const PerfDriver &);
index faf5fcaf15e60280b7f3533fb37b1eb5b2806bda..4fd960a9058c5ebcd194b20fbb5f70979efbee34 100644 (file)
@@ -9,6 +9,7 @@
 #include "PerfGroup.h"
 
 #include <errno.h>
+#include <fcntl.h>
 #include <string.h>
 #include <sys/ioctl.h>
 #include <sys/syscall.h>
@@ -23,7 +24,9 @@
 #define DEFAULT_PEA_ARGS(pea, additionalSampleType) \
        pea.size = sizeof(pea); \
        /* Emit time, read_format below, group leader id, and raw tracepoint info */ \
-       pea.sample_type = PERF_SAMPLE_TIME | PERF_SAMPLE_READ | PERF_SAMPLE_IDENTIFIER | additionalSampleType; \
+       pea.sample_type = (gSessionData->perf.getLegacySupport() \
+                                                                                ? PERF_SAMPLE_TID | PERF_SAMPLE_IP | PERF_SAMPLE_TIME | PERF_SAMPLE_READ | PERF_SAMPLE_ID \
+                                                                                : PERF_SAMPLE_TIME | PERF_SAMPLE_READ | PERF_SAMPLE_IDENTIFIER ) | additionalSampleType; \
        /* Emit emit value in group format */ \
        pea.read_format = PERF_FORMAT_ID | PERF_FORMAT_GROUP; \
        /* start out disabled */ \
        /* have a sampling interrupt happen when we cross the wakeup_watermark boundary */ \
        pea.watermark = 1; \
        /* Be conservative in flush size as only one buffer set is monitored */ \
-       pea.wakeup_watermark = 3 * BUF_SIZE / 4
+       pea.wakeup_watermark = BUF_SIZE / 2
 
 static int sys_perf_event_open(struct perf_event_attr *const attr, const pid_t pid, const int cpu, const int group_fd, const unsigned long flags) {
-       return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
+       int fd = syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
+       if (fd < 0) {
+               return -1;
+       }
+       int fdf = fcntl(fd, F_GETFD);
+       if ((fdf == -1) || (fcntl(fd, F_SETFD, fdf | FD_CLOEXEC) != 0)) {
+               close(fd);
+               return -1;
+       }
+       return fd;
 }
 
 PerfGroup::PerfGroup(PerfBuffer *const pb) : mPb(pb) {
        memset(&mAttrs, 0, sizeof(mAttrs));
+       memset(&mPerCpu, 0, sizeof(mPerCpu));
        memset(&mKeys, -1, sizeof(mKeys));
        memset(&mFds, -1, sizeof(mFds));
 }
@@ -51,7 +64,7 @@ PerfGroup::~PerfGroup() {
        }
 }
 
-bool PerfGroup::add(Buffer *const buffer, const int key, const __u32 type, const __u64 config, const __u64 sample, const __u64 sampleType, const int flags) {
+bool PerfGroup::add(const uint64_t currTime, Buffer *const buffer, const int key, const __u32 type, const __u64 config, const __u64 sample, const __u64 sampleType, const int flags) {
        int i;
        for (i = 0; i < ARRAY_LENGTH(mKeys); ++i) {
                if (mKeys[i] < 0) {
@@ -75,15 +88,16 @@ bool PerfGroup::add(Buffer *const buffer, const int key, const __u32 type, const
        mAttrs[i].freq = (flags & PERF_GROUP_FREQ ? 1 : 0);
        mAttrs[i].task = (flags & PERF_GROUP_TASK ? 1 : 0);
        mAttrs[i].sample_id_all = (flags & PERF_GROUP_SAMPLE_ID_ALL ? 1 : 0);
+       mPerCpu[i] = (flags & PERF_GROUP_PER_CPU);
 
        mKeys[i] = key;
 
-       buffer->pea(&mAttrs[i], key);
+       buffer->pea(currTime, &mAttrs[i], key);
 
        return true;
 }
 
-bool PerfGroup::prepareCPU(const int cpu) {
+int PerfGroup::prepareCPU(const int cpu, Monitor *const monitor) {
        logg->logMessage("%s(%s:%i): Onlining cpu %i", __FUNCTION__, __FILE__, __LINE__, cpu);
 
        for (int i = 0; i < ARRAY_LENGTH(mKeys); ++i) {
@@ -91,29 +105,42 @@ bool PerfGroup::prepareCPU(const int cpu) {
                        continue;
                }
 
+               if ((cpu != 0) && !mPerCpu[i]) {
+                       continue;
+               }
+
                const int offset = i * gSessionData->mCores;
                if (mFds[cpu + offset] >= 0) {
                        logg->logMessage("%s(%s:%i): cpu already online or not correctly cleaned up", __FUNCTION__, __FILE__, __LINE__);
-                       return false;
+                       return PG_FAILURE;
                }
 
-               logg->logMessage("%s(%s:%i): perf_event_open cpu: %i type: %lli config: %lli sample: %lli sample_type: %lli", __FUNCTION__, __FILE__, __LINE__, cpu, (long long)mAttrs[i].type, (long long)mAttrs[i].config, (long long)mAttrs[i].sample_period, (long long)mAttrs[i].sample_type);
+               logg->logMessage("%s(%s:%i): perf_event_open cpu: %i type: %lli config: %lli sample: %lli sample_type: 0x%llx pinned: %i mmap: %i comm: %i freq: %i task: %i sample_id_all: %i", __FUNCTION__, __FILE__, __LINE__, cpu, (long long)mAttrs[i].type, (long long)mAttrs[i].config, (long long)mAttrs[i].sample_period, (long long)mAttrs[i].sample_type, mAttrs[i].pinned, mAttrs[i].mmap, mAttrs[i].comm, mAttrs[i].freq, mAttrs[i].task, mAttrs[i].sample_id_all);
                mFds[cpu + offset] = sys_perf_event_open(&mAttrs[i], -1, cpu, i == 0 ? -1 : mFds[cpu], i == 0 ? 0 : PERF_FLAG_FD_OUTPUT);
                if (mFds[cpu + offset] < 0) {
                        logg->logMessage("%s(%s:%i): failed %s", __FUNCTION__, __FILE__, __LINE__, strerror(errno));
+                       if (errno == ENODEV) {
+                               return PG_CPU_OFFLINE;
+                       }
                        continue;
                }
 
-               if (!mPb->useFd(cpu, mFds[cpu + offset], mFds[cpu])) {
+               if (!mPb->useFd(cpu, mFds[cpu + offset])) {
                        logg->logMessage("%s(%s:%i): PerfBuffer::useFd failed", __FUNCTION__, __FILE__, __LINE__);
-                       return false;
+                       return PG_FAILURE;
+               }
+
+
+               if (!monitor->add(mFds[cpu + offset])) {
+                 logg->logMessage("%s(%s:%i): Monitor::add failed", __FUNCTION__, __FILE__, __LINE__);
+                 return PG_FAILURE;
                }
        }
 
-       return true;
+       return PG_SUCCESS;
 }
 
-int PerfGroup::onlineCPU(const int cpu, const bool start, Buffer *const buffer, Monitor *const monitor) {
+int PerfGroup::onlineCPU(const uint64_t currTime, const int cpu, const bool start, Buffer *const buffer) {
        __u64 ids[ARRAY_LENGTH(mKeys)];
        int coreKeys[ARRAY_LENGTH(mKeys)];
        int idCount = 0;
@@ -125,30 +152,41 @@ int PerfGroup::onlineCPU(const int cpu, const bool start, Buffer *const buffer,
                }
 
                coreKeys[idCount] = mKeys[i];
-               if (ioctl(fd, PERF_EVENT_IOC_ID, &ids[idCount]) != 0) {
+               if (!gSessionData->perf.getLegacySupport() && ioctl(fd, PERF_EVENT_IOC_ID, &ids[idCount]) != 0 &&
+                               // Workaround for running 32-bit gatord on 64-bit systems, kernel patch in the works
+                               ioctl(fd, (PERF_EVENT_IOC_ID & ~IOCSIZE_MASK) | (8 << _IOC_SIZESHIFT), &ids[idCount]) != 0) {
                        logg->logMessage("%s(%s:%i): ioctl failed", __FUNCTION__, __FILE__, __LINE__);
-                       return false;
+                       return 0;
                }
                ++idCount;
        }
 
-       if (!monitor->add(mFds[cpu])) {
-               logg->logMessage("%s(%s:%i): Monitor::add failed", __FUNCTION__, __FILE__, __LINE__);
-               return false;
+       if (!gSessionData->perf.getLegacySupport()) {
+               buffer->keys(currTime, idCount, ids, coreKeys);
+       } else {
+               char buf[1024];
+               ssize_t bytes = read(mFds[cpu], buf, sizeof(buf));
+               if (bytes < 0) {
+                       logg->logMessage("read failed");
+                       return 0;
+               }
+               buffer->keysOld(currTime, idCount, coreKeys, bytes, buf);
        }
 
-       buffer->keys(idCount, ids, coreKeys);
-
        if (start) {
                for (int i = 0; i < ARRAY_LENGTH(mKeys); ++i) {
                        int offset = i * gSessionData->mCores + cpu;
-                       if (mFds[offset] >= 0 && ioctl(mFds[offset], PERF_EVENT_IOC_ENABLE) < 0) {
+                       if (mFds[offset] >= 0 && ioctl(mFds[offset], PERF_EVENT_IOC_ENABLE, 0) < 0) {
                                logg->logMessage("%s(%s:%i): ioctl failed", __FUNCTION__, __FILE__, __LINE__);
-                               return false;
+                               return 0;
                        }
                }
        }
 
+       if (idCount == 0) {
+               logg->logMessage("%s(%s:%i): no events came online", __FUNCTION__, __FILE__, __LINE__);
+       }
+
        return idCount;
 }
 
@@ -157,7 +195,7 @@ bool PerfGroup::offlineCPU(const int cpu) {
 
        for (int i = 0; i < ARRAY_LENGTH(mKeys); ++i) {
                int offset = i * gSessionData->mCores + cpu;
-               if (mFds[offset] >= 0 && ioctl(mFds[offset], PERF_EVENT_IOC_DISABLE) < 0) {
+               if (mFds[offset] >= 0 && ioctl(mFds[offset], PERF_EVENT_IOC_DISABLE, 0) < 0) {
                        logg->logMessage("%s(%s:%i): ioctl failed", __FUNCTION__, __FILE__, __LINE__);
                        return false;
                }
@@ -183,7 +221,7 @@ bool PerfGroup::offlineCPU(const int cpu) {
 
 bool PerfGroup::start() {
        for (int pos = 0; pos < ARRAY_LENGTH(mFds); ++pos) {
-               if (mFds[pos] >= 0 && ioctl(mFds[pos], PERF_EVENT_IOC_ENABLE) < 0) {
+               if (mFds[pos] >= 0 && ioctl(mFds[pos], PERF_EVENT_IOC_ENABLE, 0) < 0) {
                        logg->logMessage("%s(%s:%i): ioctl failed", __FUNCTION__, __FILE__, __LINE__);
                        goto fail;
                }
@@ -200,7 +238,7 @@ bool PerfGroup::start() {
 void PerfGroup::stop() {
        for (int pos = ARRAY_LENGTH(mFds) - 1; pos >= 0; --pos) {
                if (mFds[pos] >= 0) {
-                       ioctl(mFds[pos], PERF_EVENT_IOC_DISABLE);
+                       ioctl(mFds[pos], PERF_EVENT_IOC_DISABLE, 0);
                }
        }
 }
index af496d41334c311b185ece1880a1b5021468a222..f7b3d725bac71d924ca466f11549ce9ddea669b8 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef PERF_GROUP
 #define PERF_GROUP
 
+#include <stdint.h>
+
 // Use a snapshot of perf_event.h as it may be more recent than what is on the target and if not newer features won't be supported anyways
 #include "k/perf_event.h"
 
@@ -24,6 +26,13 @@ enum PerfGroupFlags {
        PERF_GROUP_FREQ          = 1 << 2,
        PERF_GROUP_TASK          = 1 << 3,
        PERF_GROUP_SAMPLE_ID_ALL = 1 << 4,
+       PERF_GROUP_PER_CPU       = 1 << 5,
+};
+
+enum {
+       PG_SUCCESS = 0,
+       PG_FAILURE,
+       PG_CPU_OFFLINE,
 };
 
 class PerfGroup {
@@ -31,11 +40,11 @@ public:
        PerfGroup(PerfBuffer *const pb);
        ~PerfGroup();
 
-       bool add(Buffer *const buffer, const int key, const __u32 type, const __u64 config, const __u64 sample, const __u64 sampleType, const int flags);
+       bool add(const uint64_t currTime, Buffer *const buffer, const int key, const __u32 type, const __u64 config, const __u64 sample, const __u64 sampleType, const int flags);
        // Safe to call concurrently
-       bool prepareCPU(const int cpu);
+       int prepareCPU(const int cpu, Monitor *const monitor);
        // Not safe to call concurrently. Returns the number of events enabled
-       int onlineCPU(const int cpu, const bool start, Buffer *const buffer, Monitor *const monitor);
+       int onlineCPU(const uint64_t currTime, const int cpu, const bool start, Buffer *const buffer);
        bool offlineCPU(int cpu);
        bool start();
        void stop();
@@ -43,6 +52,7 @@ public:
 private:
        // +1 for the group leader
        struct perf_event_attr mAttrs[MAX_PERFORMANCE_COUNTERS + 1];
+       bool mPerCpu[MAX_PERFORMANCE_COUNTERS + 1];
        int mKeys[MAX_PERFORMANCE_COUNTERS + 1];
        int mFds[NR_CPUS * (MAX_PERFORMANCE_COUNTERS + 1)];
        PerfBuffer *const mPb;
index 1f1cb1988f0017dcc223eef47c6a291ff6dd5d12..193b7789a290bb1583db307568ec94a2043599ed 100644 (file)
@@ -8,8 +8,14 @@
 
 #include "PerfSource.h"
 
+#include <dirent.h>
 #include <errno.h>
+#include <signal.h>
 #include <string.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
 #include <unistd.h>
 
 #include "Child.h"
 #include "Proc.h"
 #include "SessionData.h"
 
-#define MS_PER_US 1000000
+#ifndef SCHED_RESET_ON_FORK
+#define SCHED_RESET_ON_FORK 0x40000000
+#endif
 
 extern Child *child;
 
-static bool sendTracepointFormat(Buffer *const buffer, const char *const name, DynBuf *const printb, DynBuf *const b) {
+static bool sendTracepointFormat(const uint64_t currTime, Buffer *const buffer, const char *const name, DynBuf *const printb, DynBuf *const b) {
        if (!printb->printf(EVENTS_PATH "/%s/format", name)) {
                logg->logMessage("%s(%s:%i): DynBuf::printf failed", __FUNCTION__, __FILE__, __LINE__);
                return false;
@@ -32,47 +40,115 @@ static bool sendTracepointFormat(Buffer *const buffer, const char *const name, D
                logg->logMessage("%s(%s:%i): DynBuf::read failed", __FUNCTION__, __FILE__, __LINE__);
                return false;
        }
-       buffer->format(b->getLength(), b->getBuf());
+       buffer->format(currTime, b->getLength(), b->getBuf());
 
        return true;
 }
 
-PerfSource::PerfSource(sem_t *senderSem, sem_t *startProfile) : mSummary(0, FRAME_SUMMARY, 1024, senderSem), mBuffer(0, FRAME_PERF_ATTRS, 1024*1024, senderSem), mCountersBuf(), mCountersGroup(&mCountersBuf), mMonitor(), mUEvent(), mSenderSem(senderSem), mStartProfile(startProfile), mInterruptFd(-1), mIsDone(false) {
-       long l = sysconf(_SC_PAGE_SIZE);
-       if (l < 0) {
-               logg->logError(__FILE__, __LINE__, "Unable to obtain the page size");
+static void *syncFunc(void *arg)
+{
+       struct timespec ts;
+       int64_t nextTime = gSessionData->mMonotonicStarted;
+       int err;
+       (void)arg;
+
+       prctl(PR_SET_NAME, (unsigned long)&"gatord-sync", 0, 0, 0);
+
+       // Mask all signals so that this thread will not be woken up
+       {
+               sigset_t set;
+               if (sigfillset(&set) != 0) {
+                       logg->logError(__FILE__, __LINE__, "sigfillset failed");
+                       handleException();
+               }
+               if ((err = pthread_sigmask(SIG_SETMASK, &set, NULL)) != 0) {
+                       logg->logError(__FILE__, __LINE__, "pthread_sigmask failed");
+                       handleException();
+               }
+       }
+
+       for (;;) {
+               if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts) != 0) {
+                       logg->logError(__FILE__, __LINE__, "clock_gettime failed");
+                       handleException();
+               }
+               const int64_t currTime = ts.tv_sec * NS_PER_S + ts.tv_nsec;
+
+               // Wake up once a second
+               nextTime += NS_PER_S;
+
+               // Always sleep more than 1 ms, hopefully things will line up better next time
+               const int64_t sleepTime = max(nextTime - currTime, (int64_t)(NS_PER_MS + 1));
+               ts.tv_sec = sleepTime/NS_PER_S;
+               ts.tv_nsec = sleepTime % NS_PER_S;
+
+               err = nanosleep(&ts, NULL);
+               if (err != 0) {
+                       fprintf(stderr, "clock_nanosleep failed: %s\n", strerror(err));
+                       return NULL;
+               }
+       }
+
+       return NULL;
+}
+
+static long getMaxCoreNum() {
+       DIR *dir = opendir("/sys/devices/system/cpu");
+       if (dir == NULL) {
+               logg->logError(__FILE__, __LINE__, "Unable to determine the number of cores on the target, opendir failed");
                handleException();
        }
-       gSessionData->mPageSize = static_cast<int>(l);
 
-       l = sysconf(_SC_NPROCESSORS_CONF);
-       if (l < 0) {
-               logg->logError(__FILE__, __LINE__, "Unable to obtain the number of cores");
+       long maxCoreNum = -1;
+       struct dirent *dirent;
+       while ((dirent = readdir(dir)) != NULL) {
+               if (strncmp(dirent->d_name, "cpu", 3) == 0) {
+                       char *endptr;
+                       errno = 0;
+                       long coreNum = strtol(dirent->d_name + 3, &endptr, 10);
+                       if ((errno == 0) && (*endptr == '\0') && (coreNum >= maxCoreNum)) {
+                               maxCoreNum = coreNum + 1;
+                       }
+               }
+       }
+       closedir(dir);
+
+       if (maxCoreNum < 1) {
+               logg->logError(__FILE__, __LINE__, "Unable to determine the number of cores on the target, no cpu# directories found");
                handleException();
        }
-       gSessionData->mCores = static_cast<int>(l);
-}
 
-PerfSource::~PerfSource() {
+       if (maxCoreNum >= NR_CPUS) {
+               logg->logError(__FILE__, __LINE__, "Too many cores on the target, please increase NR_CPUS in Config.h");
+               handleException();
+       }
+
+       return maxCoreNum;
 }
 
-struct PrepareParallelArgs {
-       PerfGroup *pg;
-       int cpu;
-};
+PerfSource::PerfSource(sem_t *senderSem, sem_t *startProfile) : mSummary(0, FRAME_SUMMARY, 1024, senderSem), mBuffer(0, FRAME_PERF_ATTRS, 1024*1024, senderSem), mCountersBuf(), mCountersGroup(&mCountersBuf), mIdleGroup(&mCountersBuf), mMonitor(), mUEvent(), mSenderSem(senderSem), mStartProfile(startProfile), mInterruptFd(-1), mIsDone(false) {
+       long l = sysconf(_SC_PAGE_SIZE);
+       if (l < 0) {
+               logg->logError(__FILE__, __LINE__, "Unable to obtain the page size");
+               handleException();
+       }
+       gSessionData->mPageSize = static_cast<int>(l);
+       gSessionData->mCores = static_cast<int>(getMaxCoreNum());
+}
 
-void *prepareParallel(void *arg) {
-       const PrepareParallelArgs *const args = (PrepareParallelArgs *)arg;
-       args->pg->prepareCPU(args->cpu);
-       return NULL;
+PerfSource::~PerfSource() {
 }
 
 bool PerfSource::prepare() {
        DynBuf printb;
        DynBuf b1;
-       DynBuf b2;
-       DynBuf b3;
        long long schedSwitchId;
+       long long cpuIdleId;
+
+       const uint64_t currTime = getTime();
+
+       // Reread cpuinfo since cores may have changed since startup
+       gSessionData->readCpuInfo();
 
        if (0
                        || !mMonitor.init()
@@ -80,76 +156,146 @@ bool PerfSource::prepare() {
                        || !mMonitor.add(mUEvent.getFd())
 
                        || (schedSwitchId = PerfDriver::getTracepointId(SCHED_SWITCH, &printb)) < 0
-                       || !sendTracepointFormat(&mBuffer, SCHED_SWITCH, &printb, &b1)
+                       || !sendTracepointFormat(currTime, &mBuffer, SCHED_SWITCH, &printb, &b1)
+
+                       || (cpuIdleId = PerfDriver::getTracepointId(CPU_IDLE, &printb)) < 0
+                       || !sendTracepointFormat(currTime, &mBuffer, CPU_IDLE, &printb, &b1)
 
                        // Only want RAW but not IP on sched_switch and don't want TID on SAMPLE_ID
-                       || !mCountersGroup.add(&mBuffer, 100/**/, PERF_TYPE_TRACEPOINT, schedSwitchId, 1, PERF_SAMPLE_RAW, PERF_GROUP_MMAP | PERF_GROUP_COMM | PERF_GROUP_TASK | PERF_GROUP_SAMPLE_ID_ALL)
+                       || !mCountersGroup.add(currTime, &mBuffer, 100/**/, PERF_TYPE_TRACEPOINT, schedSwitchId, 1, PERF_SAMPLE_RAW, PERF_GROUP_MMAP | PERF_GROUP_COMM | PERF_GROUP_TASK | PERF_GROUP_SAMPLE_ID_ALL | PERF_GROUP_PER_CPU)
+                       || !mIdleGroup.add(currTime, &mBuffer, 101/**/, PERF_TYPE_TRACEPOINT, cpuIdleId, 1, PERF_SAMPLE_RAW, PERF_GROUP_PER_CPU)
 
                        // Only want TID and IP but not RAW on timer
-                       || (gSessionData->mSampleRate > 0 && !gSessionData->mIsEBS && !mCountersGroup.add(&mBuffer, 99/**/, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK, 1000000000UL / gSessionData->mSampleRate, PERF_SAMPLE_TID | PERF_SAMPLE_IP, 0))
+                       || (gSessionData->mSampleRate > 0 && !gSessionData->mIsEBS && !mCountersGroup.add(currTime, &mBuffer, 102/**/, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK, 1000000000UL / gSessionData->mSampleRate, PERF_SAMPLE_TID | PERF_SAMPLE_IP, PERF_GROUP_PER_CPU))
 
-                       || !gSessionData->perf.enable(&mCountersGroup, &mBuffer)
+                       || !gSessionData->perf.enable(currTime, &mCountersGroup, &mBuffer)
                        || 0) {
-               logg->logMessage("%s(%s:%i): perf setup failed, are you running Linux 3.12 or later?", __FUNCTION__, __FILE__, __LINE__);
-               return false;
-       }
-
-       if (!gSessionData->perf.summary(&mSummary)) {
-               logg->logMessage("%s(%s:%i): PerfDriver::summary failed", __FUNCTION__, __FILE__, __LINE__);
+               logg->logMessage("%s(%s:%i): perf setup failed, are you running Linux 3.4 or later?", __FUNCTION__, __FILE__, __LINE__);
                return false;
        }
 
-       {
-               // Run prepareCPU in parallel as perf_event_open can take more than 1 sec in some cases
-               pthread_t threads[NR_CPUS];
-               PrepareParallelArgs args[NR_CPUS];
-               for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
-                       args[cpu].pg = &mCountersGroup;
-                       args[cpu].cpu = cpu;
-                       if (pthread_create(&threads[cpu], NULL, prepareParallel, &args[cpu]) != 0) {
-                               logg->logMessage("%s(%s:%i): pthread_create failed", __FUNCTION__, __FILE__, __LINE__);
-                               return false;
-                       }
+       for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
+               const int result = mCountersGroup.prepareCPU(cpu, &mMonitor);
+               if ((result != PG_SUCCESS) && (result != PG_CPU_OFFLINE)) {
+                       logg->logError(__FILE__, __LINE__, "PerfGroup::prepareCPU on mCountersGroup failed");
+                       handleException();
                }
-               for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
-                       if (pthread_join(threads[cpu], NULL) != 0) {
-                               logg->logMessage("%s(%s:%i): pthread_join failed", __FUNCTION__, __FILE__, __LINE__);
-                               return false;
-                       }
+       }
+       for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
+               const int result = mIdleGroup.prepareCPU(cpu, &mMonitor);
+               if ((result != PG_SUCCESS) && (result != PG_CPU_OFFLINE)) {
+                       logg->logError(__FILE__, __LINE__, "PerfGroup::prepareCPU on mIdleGroup failed");
+                       handleException();
                }
        }
 
        int numEvents = 0;
        for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
-               numEvents += mCountersGroup.onlineCPU(cpu, false, &mBuffer, &mMonitor);
+               numEvents += mCountersGroup.onlineCPU(currTime, cpu, false, &mBuffer);
+       }
+       for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
+               numEvents += mIdleGroup.onlineCPU(currTime, cpu, false, &mBuffer);
        }
        if (numEvents <= 0) {
                logg->logMessage("%s(%s:%i): PerfGroup::onlineCPU failed on all cores", __FUNCTION__, __FILE__, __LINE__);
                return false;
        }
 
-       // Start events before reading proc to avoid race conditions
-       if (!mCountersGroup.start()) {
-               logg->logMessage("%s(%s:%i): PerfGroup::start failed", __FUNCTION__, __FILE__, __LINE__);
-               return false;
+       // Send the summary right before the start so that the monotonic delta is close to the start time
+       if (!gSessionData->perf.summary(&mSummary)) {
+         logg->logError(__FILE__, __LINE__, "PerfDriver::summary failed", __FUNCTION__, __FILE__, __LINE__);
+         handleException();
        }
 
-       if (!readProc(&mBuffer, &printb, &b1, &b2, &b3)) {
-               logg->logMessage("%s(%s:%i): readProc failed", __FUNCTION__, __FILE__, __LINE__);
-               return false;
+       // Start the timer thread to used to sync perf and monotonic raw times
+       pthread_t syncThread;
+       if (pthread_create(&syncThread, NULL, syncFunc, NULL)) {
+         logg->logError(__FILE__, __LINE__, "pthread_create failed", __FUNCTION__, __FILE__, __LINE__);
+         handleException();
+       }
+       struct sched_param param;
+       param.sched_priority = sched_get_priority_max(SCHED_FIFO);
+       if (pthread_setschedparam(syncThread, SCHED_FIFO | SCHED_RESET_ON_FORK, &param) != 0) {
+         logg->logError(__FILE__, __LINE__, "pthread_setschedparam failed");
+         handleException();
        }
 
-       mBuffer.commit(1);
+       mBuffer.commit(currTime);
 
        return true;
 }
 
+struct ProcThreadArgs {
+       Buffer *mBuffer;
+       uint64_t mCurrTime;
+       bool mIsDone;
+};
+
+void *procFunc(void *arg) {
+       DynBuf printb;
+       DynBuf b;
+       const ProcThreadArgs *const args = (ProcThreadArgs *)arg;
+
+       prctl(PR_SET_NAME, (unsigned long)&"gatord-proc", 0, 0, 0);
+
+       // Gator runs at a high priority, reset the priority to the default
+       if (setpriority(PRIO_PROCESS, syscall(__NR_gettid), 0) == -1) {
+               logg->logError(__FILE__, __LINE__, "setpriority failed");
+               handleException();
+       }
+
+       if (!readProcMaps(args->mCurrTime, args->mBuffer, &printb, &b)) {
+               logg->logError(__FILE__, __LINE__, "readProcMaps failed");
+               handleException();
+       }
+       args->mBuffer->commit(args->mCurrTime);
+
+       if (!readKallsyms(args->mCurrTime, args->mBuffer, &args->mIsDone)) {
+               logg->logError(__FILE__, __LINE__, "readKallsyms failed");
+               handleException();
+       }
+       args->mBuffer->commit(args->mCurrTime);
+
+       return NULL;
+}
+
 static const char CPU_DEVPATH[] = "/devices/system/cpu/cpu";
 
 void PerfSource::run() {
        int pipefd[2];
+       pthread_t procThread;
+       ProcThreadArgs procThreadArgs;
+
+       {
+               DynBuf printb;
+               DynBuf b1;
+               DynBuf b2;
 
-       if (pipe(pipefd) != 0) {
+               const uint64_t currTime = getTime();
+
+               // Start events before reading proc to avoid race conditions
+               if (!mCountersGroup.start() || !mIdleGroup.start()) {
+                       logg->logError(__FILE__, __LINE__, "PerfGroup::start failed", __FUNCTION__, __FILE__, __LINE__);
+                       handleException();
+               }
+
+               if (!readProcComms(currTime, &mBuffer, &printb, &b1, &b2)) {
+                       logg->logError(__FILE__, __LINE__, "readProcComms failed");
+                       handleException();
+               }
+               mBuffer.commit(currTime);
+
+               // Postpone reading kallsyms as on android adb gets too backed up and data is lost
+               procThreadArgs.mBuffer = &mBuffer;
+               procThreadArgs.mCurrTime = currTime;
+               procThreadArgs.mIsDone = false;
+               if (pthread_create(&procThread, NULL, procFunc, &procThreadArgs)) {
+                       logg->logError(__FILE__, __LINE__, "pthread_create failed", __FUNCTION__, __FILE__, __LINE__);
+                       handleException();
+               }
+       }
+
+       if (pipe_cloexec(pipefd) != 0) {
                logg->logError(__FILE__, __LINE__, "pipe failed");
                handleException();
        }
@@ -162,7 +308,7 @@ void PerfSource::run() {
 
        int timeout = -1;
        if (gSessionData->mLiveRate > 0) {
-               timeout = gSessionData->mLiveRate/MS_PER_US;
+               timeout = gSessionData->mLiveRate/NS_PER_MS;
        }
 
        sem_post(mStartProfile);
@@ -175,10 +321,11 @@ void PerfSource::run() {
                        logg->logError(__FILE__, __LINE__, "Monitor::wait failed");
                        handleException();
                }
+               const uint64_t currTime = getTime();
 
                for (int i = 0; i < ready; ++i) {
                        if (events[i].data.fd == mUEvent.getFd()) {
-                               if (!handleUEvent()) {
+                               if (!handleUEvent(currTime)) {
                                        logg->logError(__FILE__, __LINE__, "PerfSource::handleUEvent failed");
                                        handleException();
                                }
@@ -197,6 +344,9 @@ void PerfSource::run() {
                }
        }
 
+       procThreadArgs.mIsDone = true;
+       pthread_join(procThread, NULL);
+       mIdleGroup.stop();
        mCountersGroup.stop();
        mBuffer.setDone();
        mIsDone = true;
@@ -209,7 +359,7 @@ void PerfSource::run() {
        close(pipefd[1]);
 }
 
-bool PerfSource::handleUEvent() {
+bool PerfSource::handleUEvent(const uint64_t currTime) {
        UEventResult result;
        if (!mUEvent.read(&result)) {
                logg->logMessage("%s(%s:%i): UEvent::Read failed", __FUNCTION__, __FILE__, __LINE__);
@@ -228,14 +378,41 @@ bool PerfSource::handleUEvent() {
                        logg->logMessage("%s(%s:%i): strtol failed", __FUNCTION__, __FILE__, __LINE__);
                        return false;
                }
+
+               if (cpu >= gSessionData->mCores) {
+                       logg->logError(__FILE__, __LINE__, "Only %i cores are expected but core %i reports %s", gSessionData->mCores, cpu, result.mAction);
+                       handleException();
+               }
+
                if (strcmp(result.mAction, "online") == 0) {
+                       mBuffer.onlineCPU(currTime, currTime - gSessionData->mMonotonicStarted, cpu);
                        // Only call onlineCPU if prepareCPU succeeded
-                       const bool result = mCountersGroup.prepareCPU(cpu) &&
-                               mCountersGroup.onlineCPU(cpu, true, &mBuffer, &mMonitor);
-                       mBuffer.commit(1);
+                       bool result = false;
+                       int err = mCountersGroup.prepareCPU(cpu, &mMonitor);
+                       if (err == PG_CPU_OFFLINE) {
+                               result = true;
+                       } else if (err == PG_SUCCESS) {
+                               if (mCountersGroup.onlineCPU(currTime, cpu, true, &mBuffer)) {
+                                       err = mIdleGroup.prepareCPU(cpu, &mMonitor);
+                                       if (err == PG_CPU_OFFLINE) {
+                                               result = true;
+                                       } else if (err == PG_SUCCESS) {
+                                               if (mIdleGroup.onlineCPU(currTime, cpu, true, &mBuffer)) {
+                                                       result = true;
+                                               }
+                                       }
+                               }
+                       }
+                       mBuffer.commit(currTime);
+
+                       gSessionData->readCpuInfo();
+                       gSessionData->perf.coreName(currTime, &mSummary, cpu);
+                       mSummary.commit(currTime);
                        return result;
                } else if (strcmp(result.mAction, "offline") == 0) {
-                       return mCountersGroup.offlineCPU(cpu);
+                       const bool result = mCountersGroup.offlineCPU(cpu) && mIdleGroup.offlineCPU(cpu);
+                       mBuffer.offlineCPU(currTime, currTime - gSessionData->mMonotonicStarted, cpu);
+                       return result;
                }
        }
 
@@ -260,6 +437,7 @@ bool PerfSource::isDone () {
 void PerfSource::write (Sender *sender) {
        if (!mSummary.isDone()) {
                mSummary.write(sender);
+               gSessionData->mSentSummary = true;
        }
        if (!mBuffer.isDone()) {
                mBuffer.write(sender);
index 3f471c8de414609129867b5c48958c2ee1bfabfe..ce1eafe8e9532091ebcfb6fc6e65d06baae36b22 100644 (file)
@@ -33,12 +33,13 @@ public:
        void write(Sender *sender);
 
 private:
-       bool handleUEvent();
+       bool handleUEvent(const uint64_t currTime);
 
        Buffer mSummary;
        Buffer mBuffer;
        PerfBuffer mCountersBuf;
        PerfGroup mCountersGroup;
+       PerfGroup mIdleGroup;
        Monitor mMonitor;
        UEvent mUEvent;
        sem_t *const mSenderSem;
index e0b9e2259cf939d5b13170b4be445b7b37d9633d..e6b26b1199fa9d997f16a5305871e5515caf7bc9 100644 (file)
 
 #include <dirent.h>
 #include <errno.h>
+#include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <unistd.h>
 
 #include "Buffer.h"
 #include "DynBuf.h"
 #include "Logging.h"
+#include "SessionData.h"
 
 struct ProcStat {
        // From linux-dev/include/linux/sched.h
@@ -57,17 +60,64 @@ static bool readProcStat(ProcStat *const ps, const char *const pathname, DynBuf
        return true;
 }
 
-static bool readProcTask(Buffer *const buffer, const int pid, const char *const image, DynBuf *const printb, DynBuf *const b) {
+static const char APP_PROCESS[] = "app_process";
+
+static const char *readProcExe(DynBuf *const printb, const int pid, const int tid, DynBuf *const b) {
+       if (tid == -1 ? !printb->printf("/proc/%i/exe", pid)
+                       : !printb->printf("/proc/%i/task/%i/exe", pid, tid)) {
+               logg->logMessage("%s(%s:%i): DynBuf::printf failed", __FUNCTION__, __FILE__, __LINE__);
+               return NULL;
+       }
+
+       const int err = b->readlink(printb->getBuf());
+       const char *image;
+       if (err == 0) {
+               image = strrchr(b->getBuf(), '/');
+               if (image == NULL) {
+                       image = b->getBuf();
+               } else {
+                       ++image;
+               }
+       } else if (err == -ENOENT) {
+               // readlink /proc/[pid]/exe returns ENOENT for kernel threads
+               image = "\0";
+       } else {
+               logg->logMessage("%s(%s:%i): DynBuf::readlink failed", __FUNCTION__, __FILE__, __LINE__);
+               return NULL;
+       }
+
+       // Android apps are run by app_process but the cmdline is changed to reference the actual app name
+       // On 64-bit android app_process can be app_process32 or app_process64
+       if (strncmp(image, APP_PROCESS, sizeof(APP_PROCESS) - 1) != 0) {
+               return image;
+       }
+
+       if (tid == -1 ? !printb->printf("/proc/%i/cmdline", pid)
+                       : !printb->printf("/proc/%i/task/%i/cmdline", pid, tid)) {
+               logg->logMessage("%s(%s:%i): DynBuf::printf failed", __FUNCTION__, __FILE__, __LINE__);
+               return NULL;
+       }
+
+       if (!b->read(printb->getBuf())) {
+               logg->logMessage("%s(%s:%i): DynBuf::read failed, likely because the thread exited", __FUNCTION__, __FILE__, __LINE__);
+               return NULL;
+       }
+
+       return b->getBuf();
+}
+
+static bool readProcTask(const uint64_t currTime, Buffer *const buffer, const int pid, DynBuf *const printb, DynBuf *const b1, DynBuf *const b2) {
        bool result = false;
 
-       if (!b->printf("/proc/%i/task", pid)) {
+       if (!b1->printf("/proc/%i/task", pid)) {
                logg->logMessage("%s(%s:%i): DynBuf::printf failed", __FUNCTION__, __FILE__, __LINE__);
                return result;
        }
-       DIR *task = opendir(b->getBuf());
+       DIR *task = opendir(b1->getBuf());
        if (task == NULL) {
                logg->logMessage("%s(%s:%i): opendir failed", __FUNCTION__, __FILE__, __LINE__);
-               return result;
+               // This is not a fatal error - the thread just doesn't exist any more
+               return true;
        }
 
        struct dirent *dirent;
@@ -84,12 +134,18 @@ static bool readProcTask(Buffer *const buffer, const int pid, const char *const
                        goto fail;
                }
                ProcStat ps;
-               if (!readProcStat(&ps, printb->getBuf(), b)) {
+               if (!readProcStat(&ps, printb->getBuf(), b1)) {
                        logg->logMessage("%s(%s:%i): readProcStat failed", __FUNCTION__, __FILE__, __LINE__);
                        goto fail;
                }
 
-               buffer->comm(pid, tid, image, ps.comm);
+               const char *const image = readProcExe(printb, pid, tid, b2);
+               if (image == NULL) {
+                       logg->logMessage("%s(%s:%i): readImage failed", __FUNCTION__, __FILE__, __LINE__);
+                       goto fail;
+               }
+
+               buffer->comm(currTime, pid, tid, image, ps.comm);
        }
 
        result = true;
@@ -100,7 +156,7 @@ static bool readProcTask(Buffer *const buffer, const int pid, const char *const
        return result;
 }
 
-bool readProc(Buffer *const buffer, DynBuf *const printb, DynBuf *const b1, DynBuf *const b2, DynBuf *const b3) {
+bool readProcComms(const uint64_t currTime, Buffer *const buffer, DynBuf *const printb, DynBuf *const b1, DynBuf *const b2) {
        bool result = false;
 
        DIR *proc = opendir("/proc");
@@ -128,46 +184,59 @@ bool readProc(Buffer *const buffer, DynBuf *const printb, DynBuf *const b1, DynB
                        goto fail;
                }
 
-               if (!printb->printf("/proc/%i/exe", pid)) {
-                       logg->logMessage("%s(%s:%i): DynBuf::printf failed", __FUNCTION__, __FILE__, __LINE__);
-                       goto fail;
-               }
-               const int err = b1->readlink(printb->getBuf());
-               const char *image;
-               if (err == 0) {
-                       image = strrchr(b1->getBuf(), '/');
+               if (ps.numThreads <= 1) {
+                       const char *const image = readProcExe(printb, pid, -1, b1);
                        if (image == NULL) {
-                               image = b1->getBuf();
-                       } else {
-                               ++image;
+                               logg->logMessage("%s(%s:%i): readImage failed", __FUNCTION__, __FILE__, __LINE__);
+                               goto fail;
                        }
-               } else if (err == -ENOENT) {
-                       // readlink /proc/[pid]/exe returns ENOENT for kernel threads
-                       image = "\0";
+
+                       buffer->comm(currTime, pid, pid, image, ps.comm);
                } else {
-                       logg->logMessage("%s(%s:%i): DynBuf::readlink failed", __FUNCTION__, __FILE__, __LINE__);
-                       goto fail;
+                       if (!readProcTask(currTime, buffer, pid, printb, b1, b2)) {
+                               logg->logMessage("%s(%s:%i): readProcTask failed", __FUNCTION__, __FILE__, __LINE__);
+                               goto fail;
+                       }
+               }
+       }
+
+       result = true;
+
+ fail:
+       closedir(proc);
+
+       return result;
+}
+
+bool readProcMaps(const uint64_t currTime, Buffer *const buffer, DynBuf *const printb, DynBuf *const b) {
+       bool result = false;
+
+       DIR *proc = opendir("/proc");
+       if (proc == NULL) {
+               logg->logMessage("%s(%s:%i): opendir failed", __FUNCTION__, __FILE__, __LINE__);
+               return result;
+       }
+
+       struct dirent *dirent;
+       while ((dirent = readdir(proc)) != NULL) {
+               char *endptr;
+               const int pid = strtol(dirent->d_name, &endptr, 10);
+               if (*endptr != '\0') {
+                       // Ignore proc items that are not integers like ., cpuinfo, etc...
+                       continue;
                }
 
                if (!printb->printf("/proc/%i/maps", pid)) {
                        logg->logMessage("%s(%s:%i): DynBuf::printf failed", __FUNCTION__, __FILE__, __LINE__);
                        goto fail;
                }
-               if (!b2->read(printb->getBuf())) {
+               if (!b->read(printb->getBuf())) {
                        logg->logMessage("%s(%s:%i): DynBuf::read failed, likely because the process exited", __FUNCTION__, __FILE__, __LINE__);
                        // This is not a fatal error - the process just doesn't exist any more
                        continue;
                }
 
-               buffer->maps(pid, pid, b2->getBuf());
-               if (ps.numThreads <= 1) {
-                       buffer->comm(pid, pid, image, ps.comm);
-               } else {
-                       if (!readProcTask(buffer, pid, image, printb, b3)) {
-                               logg->logMessage("%s(%s:%i): readProcTask failed", __FUNCTION__, __FILE__, __LINE__);
-                               goto fail;
-                       }
-               }
+               buffer->maps(currTime, pid, pid, b->getBuf());
        }
 
        result = true;
@@ -177,3 +246,67 @@ bool readProc(Buffer *const buffer, DynBuf *const printb, DynBuf *const b1, DynB
 
        return result;
 }
+
+bool readKallsyms(const uint64_t currTime, Buffer *const buffer, const bool *const isDone) {
+       int fd = ::open("/proc/kallsyms", O_RDONLY | O_CLOEXEC);
+
+       if (fd < 0) {
+               logg->logMessage("%s(%s:%i): open failed", __FUNCTION__, __FILE__, __LINE__);
+               return true;
+       };
+
+       char buf[1<<12];
+       ssize_t pos = 0;
+       while (gSessionData->mSessionIsActive && !ACCESS_ONCE(*isDone)) {
+               // Assert there is still space in the buffer
+               if (sizeof(buf) - pos - 1 == 0) {
+                       logg->logError(__FILE__, __LINE__, "no space left in buffer");
+                       handleException();
+               }
+
+               {
+                       // -1 to reserve space for \0
+                       const ssize_t bytes = ::read(fd, buf + pos, sizeof(buf) - pos - 1);
+                       if (bytes < 0) {
+                               logg->logError(__FILE__, __LINE__, "read failed", __FUNCTION__, __FILE__, __LINE__);
+                               handleException();
+                       }
+                       if (bytes == 0) {
+                               // Assert the buffer is empty
+                               if (pos != 0) {
+                                       logg->logError(__FILE__, __LINE__, "buffer not empty on eof");
+                                       handleException();
+                               }
+                               break;
+                       }
+                       pos += bytes;
+               }
+
+               ssize_t newline;
+               // Find the last '\n'
+               for (newline = pos - 1; newline >= 0; --newline) {
+                       if (buf[newline] == '\n') {
+                               const char was = buf[newline + 1];
+                               buf[newline + 1] = '\0';
+                               buffer->kallsyms(currTime, buf);
+                               // Sleep 3 ms to avoid sending out too much data too quickly
+                               usleep(3000);
+                               buf[0] = was;
+                               // Assert the memory regions do not overlap
+                               if (pos - newline >= newline + 1) {
+                                       logg->logError(__FILE__, __LINE__, "memcpy src and dst overlap");
+                                       handleException();
+                               }
+                               if (pos - newline - 2 > 0) {
+                                       memcpy(buf + 1, buf + newline + 2, pos - newline - 2);
+                               }
+                               pos -= newline + 1;
+                               break;
+                       }
+               }
+       }
+
+       close(fd);
+
+       return true;
+}
index 057b6109848a7bd224495746aa3fd74ca53ef938..2a1a7cbc1e9981829c323e1a7208b201ae8182f2 100644 (file)
@@ -9,9 +9,13 @@
 #ifndef PROC_H
 #define PROC_H
 
+#include <stdint.h>
+
 class Buffer;
 class DynBuf;
 
-bool readProc(Buffer *const buffer, DynBuf *const printb, DynBuf *const b1, DynBuf *const b2, DynBuf *const b3);
+bool readProcComms(const uint64_t currTime, Buffer *const buffer, DynBuf *const printb, DynBuf *const b1, DynBuf *const b2);
+bool readProcMaps(const uint64_t currTime, Buffer *const buffer, DynBuf *const printb, DynBuf *const b);
+bool readKallsyms(const uint64_t currTime, Buffer *const buffer, const bool *const isDone);
 
 #endif // PROC_H
index 3a981a6427be01ebbe0aa525cbb8b65ebed91184..8a54a6678974a76b9b3991c3924793301dead028 100644 (file)
@@ -65,18 +65,13 @@ void Sender::createDataFile(char* apcDir) {
 
        mDataFileName = (char*)malloc(strlen(apcDir) + 12);
        sprintf(mDataFileName, "%s/0000000000", apcDir);
-       mDataFile = fopen(mDataFileName, "wb");
+       mDataFile = fopen_cloexec(mDataFileName, "wb");
        if (!mDataFile) {
                logg->logError(__FILE__, __LINE__, "Failed to open binary file: %s", mDataFileName);
                handleException();
        }
 }
 
-template<typename T>
-inline T min(const T a, const T b) {
-       return (a < b ? a : b);
-}
-
 void Sender::writeData(const char* data, int length, int type) {
        if (length < 0 || (data == NULL && length > 0)) {
                return;
index 4c359dba82f87b79be729583ea6a5d2ccc8bc1d8..5aa911713820cf347901715430e39b8e0cf01806 100644 (file)
@@ -6,8 +6,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef        __SENDER_H__
-#define        __SENDER_H__
+#ifndef __SENDER_H__
+#define __SENDER_H__
 
 #include <stdio.h>
 #include <pthread.h>
@@ -39,4 +39,4 @@ private:
        Sender &operator=(const Sender &);
 };
 
-#endif         //__SENDER_H__
+#endif //__SENDER_H__
index c169299af8720d0c16da229a0939264a81c21500..0e65d7842647d95c48cc1f5950ed550a4a368062 100644 (file)
@@ -8,14 +8,31 @@
 
 #include "SessionData.h"
 
+#include <fcntl.h>
 #include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
 
-#include "SessionXML.h"
+#include "CPUFreqDriver.h"
+#include "DiskIODriver.h"
+#include "FSDriver.h"
+#include "HwmonDriver.h"
 #include "Logging.h"
+#include "MemInfoDriver.h"
+#include "NetDriver.h"
+#include "SessionXML.h"
+
+#define CORE_NAME_UNKNOWN "unknown"
 
 SessionData* gSessionData = NULL;
 
 SessionData::SessionData() {
+       usDrivers[0] = new HwmonDriver();
+       usDrivers[1] = new FSDriver();
+       usDrivers[2] = new MemInfoDriver();
+       usDrivers[3] = new NetDriver();
+       usDrivers[4] = new CPUFreqDriver();
+       usDrivers[5] = new DiskIODriver();
        initialize();
 }
 
@@ -27,15 +44,32 @@ void SessionData::initialize() {
        mSessionIsActive = false;
        mLocalCapture = false;
        mOneShot = false;
+       mSentSummary = false;
+       mAllowCommands = false;
+       const size_t cpuIdSize = sizeof(int)*NR_CPUS;
+       // Share mCpuIds across all instances of gatord
+       mCpuIds = (int *)mmap(NULL, cpuIdSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+       if (mCpuIds == MAP_FAILED) {
+               logg->logError(__FILE__, __LINE__, "Unable to mmap shared memory for cpuids");
+               handleException();
+       }
+       memset(mCpuIds, -1, cpuIdSize);
+       strcpy(mCoreName, CORE_NAME_UNKNOWN);
+       readModel();
        readCpuInfo();
+       mImages = NULL;
        mConfigurationXMLPath = NULL;
        mSessionXMLPath = NULL;
        mEventsXMLPath = NULL;
        mTargetPath = NULL;
        mAPCDir = NULL;
+       mCaptureWorkingDir = NULL;
+       mCaptureCommand = NULL;
+       mCaptureUser = NULL;
        mSampleRate = 0;
        mLiveRate = 0;
        mDuration = 0;
+       mMonotonicStarted = -1;
        mBacktraceDepth = 0;
        mTotalBufferSize = 0;
        // sysconf(_SC_NPROCESSORS_CONF) is unreliable on 2.6 Android, get the value from the kernel module
@@ -61,7 +95,6 @@ void SessionData::parseSessionXML(char* xmlString) {
                handleException();
        }
        mBacktraceDepth = session.parameters.call_stack_unwinding == true ? 128 : 0;
-       mDuration = session.parameters.duration;
 
        // Determine buffer size (in MB) based on buffer mode
        mOneShot = true;
@@ -79,22 +112,38 @@ void SessionData::parseSessionXML(char* xmlString) {
                handleException();
        }
 
-       mImages = session.parameters.images;
        // Convert milli- to nanoseconds
        mLiveRate = session.parameters.live_rate * (int64_t)1000000;
        if (mLiveRate > 0 && mLocalCapture) {
                logg->logMessage("Local capture is not compatable with live, disabling live");
                mLiveRate = 0;
        }
+
+       if (!mAllowCommands && (mCaptureCommand != NULL)) {
+               logg->logError(__FILE__, __LINE__, "Running a command during a capture is not currently allowed. Please restart gatord with the -a flag.");
+               handleException();
+       }
+}
+
+void SessionData::readModel() {
+       FILE *fh = fopen("/proc/device-tree/model", "rb");
+       if (fh == NULL) {
+               return;
+       }
+
+       char buf[256];
+       if (fgets(buf, sizeof(buf), fh) != NULL) {
+               strcpy(mCoreName, buf);
+       }
+
+       fclose(fh);
 }
 
 void SessionData::readCpuInfo() {
        char temp[256]; // arbitrarily large amount
-       strcpy(mCoreName, "unknown");
-       memset(&mCpuIds, -1, sizeof(mCpuIds));
        mMaxCpuId = -1;
 
-       FILE* f = fopen("/proc/cpuinfo", "r");  
+       FILE *f = fopen("/proc/cpuinfo", "r");
        if (f == NULL) {
                logg->logMessage("Error opening /proc/cpuinfo\n"
                        "The core name in the captured xml file will be 'unknown'.");
@@ -102,10 +151,19 @@ void SessionData::readCpuInfo() {
        }
 
        bool foundCoreName = false;
-       int processor = 0;
+       int processor = -1;
        while (fgets(temp, sizeof(temp), f)) {
-               if (strlen(temp) > 0) {
-                       temp[strlen(temp) - 1] = 0;     // Replace the line feed with a null
+               const size_t len = strlen(temp);
+
+               if (len == 1) {
+                       // New section, clear the processor. Streamline will not know the cpus if the pre Linux 3.8 format of cpuinfo is encountered but also that no incorrect information will be transmitted.
+                       processor = -1;
+                       continue;
+               }
+
+               if (len > 0) {
+                       // Replace the line feed with a null
+                       temp[len - 1] = '\0';
                }
 
                const bool foundHardware = strstr(temp, "Hardware") != 0;
@@ -120,17 +178,22 @@ void SessionData::readCpuInfo() {
                        }
                        position += 2;
 
-                       if (foundHardware) {
+                       if (foundHardware && (strcmp(mCoreName, CORE_NAME_UNKNOWN) == 0)) {
                                strncpy(mCoreName, position, sizeof(mCoreName));
                                mCoreName[sizeof(mCoreName) - 1] = 0; // strncpy does not guarantee a null-terminated string
                                foundCoreName = true;
                        }
 
                        if (foundCPUPart) {
-                               mCpuIds[processor] = strtol(position, NULL, 0);
+                               const int cpuId = strtol(position, NULL, 0);
                                // If this does not have the full topology in /proc/cpuinfo, mCpuIds[0] may not have the 1 CPU part emitted - this guarantees it's in mMaxCpuId
-                               if (mCpuIds[processor] > mMaxCpuId) {
-                                       mMaxCpuId = mCpuIds[processor];
+                               if (cpuId > mMaxCpuId) {
+                                       mMaxCpuId = cpuId;
+                               }
+                               if (processor >= NR_CPUS) {
+                                       logg->logMessage("Too many processors, please increase NR_CPUS");
+                               } else if (processor >= 0) {
+                                       mCpuIds[processor] = cpuId;
                                }
                        }
 
@@ -142,18 +205,57 @@ void SessionData::readCpuInfo() {
 
        if (!foundCoreName) {
                logg->logMessage("Could not determine core name from /proc/cpuinfo\n"
-                                                "The core name in the captured xml file will be 'unknown'.");
+                                "The core name in the captured xml file will be 'unknown'.");
        }
        fclose(f);
- }
+}
+
+uint64_t getTime() {
+       struct timespec ts;
+       if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts) != 0) {
+               logg->logError(__FILE__, __LINE__, "Failed to get uptime");
+               handleException();
+       }
+       return (NS_PER_S*ts.tv_sec + ts.tv_nsec);
+}
 
 int getEventKey() {
        // key 0 is reserved as a timestamp
        // key 1 is reserved as the marker for thread specific counters
+       // key 2 is reserved as the marker for core
        // Odd keys are assigned by the driver, even keys by the daemon
-       static int key = 2;
+       static int key = 4;
 
        const int ret = key;
        key += 2;
        return ret;
 }
+
+int pipe_cloexec(int pipefd[2]) {
+       if (pipe(pipefd) != 0) {
+               return -1;
+       }
+
+       int fdf;
+       if (((fdf = fcntl(pipefd[0], F_GETFD)) == -1) || (fcntl(pipefd[0], F_SETFD, fdf | FD_CLOEXEC) != 0) ||
+                       ((fdf = fcntl(pipefd[1], F_GETFD)) == -1) || (fcntl(pipefd[1], F_SETFD, fdf | FD_CLOEXEC) != 0)) {
+               close(pipefd[0]);
+               close(pipefd[1]);
+               return -1;
+       }
+       return 0;
+}
+
+FILE *fopen_cloexec(const char *path, const char *mode) {
+       FILE *fh = fopen(path, mode);
+       if (fh == NULL) {
+               return NULL;
+       }
+       int fd = fileno(fh);
+       int fdf = fcntl(fd, F_GETFD);
+       if ((fdf == -1) || (fcntl(fd, F_SETFD, fdf | FD_CLOEXEC) != 0)) {
+               fclose(fh);
+               return NULL;
+       }
+       return fh;
+}
index ea34240e2df7c3ec271bf9b6917d7c45ba4037f8..ed282af4a869d9b34913dfec4f6fa61c095d3eec 100644 (file)
 
 #include <stdint.h>
 
+#include "AnnotateListener.h"
 #include "Config.h"
 #include "Counter.h"
-#include "Hwmon.h"
+#include "FtraceDriver.h"
+#include "KMod.h"
+#include "MaliVideoDriver.h"
 #include "PerfDriver.h"
 
-#define PROTOCOL_VERSION       18
-#define PROTOCOL_DEV           1000    // Differentiates development versions (timestamp) from release versions
+#define PROTOCOL_VERSION 20
+// Differentiates development versions (timestamp) from release versions
+#define PROTOCOL_DEV 1000
+
+#define NS_PER_S 1000000000LL
+#define NS_PER_MS 1000000LL
+#define NS_PER_US 1000LL
 
 struct ImageLinkList {
        char* path;
@@ -32,32 +40,46 @@ public:
        ~SessionData();
        void initialize();
        void parseSessionXML(char* xmlString);
+       void readModel();
+       void readCpuInfo();
 
-       Hwmon hwmon;
+       PolledDriver *usDrivers[6];
+       KMod kmod;
        PerfDriver perf;
+       MaliVideoDriver maliVideo;
+       FtraceDriver ftraceDriver;
+       AnnotateListener annotateListener;
 
        char mCoreName[MAX_STRING_LEN];
        struct ImageLinkList *mImages;
-       char* mConfigurationXMLPath;
-       char* mSessionXMLPath;
-       char* mEventsXMLPath;
-       char* mTargetPath;
-       char* mAPCDir;
+       char *mConfigurationXMLPath;
+       char *mSessionXMLPath;
+       char *mEventsXMLPath;
+       char *mTargetPath;
+       char *mAPCDir;
+       char *mCaptureWorkingDir;
+       char *mCaptureCommand;
+       char *mCaptureUser;
 
        bool mWaitingOnCommand;
        bool mSessionIsActive;
        bool mLocalCapture;
-       bool mOneShot;          // halt processing of the driver data until profiling is complete or the buffer is filled
+       // halt processing of the driver data until profiling is complete or the buffer is filled
+       bool mOneShot;
        bool mIsEBS;
-       
+       bool mSentSummary;
+       bool mAllowCommands;
+
+       int64_t mMonotonicStarted;
        int mBacktraceDepth;
-       int mTotalBufferSize;   // number of MB to use for the entire collection buffer
+       // number of MB to use for the entire collection buffer
+       int mTotalBufferSize;
        int mSampleRate;
        int64_t mLiveRate;
        int mDuration;
        int mCores;
        int mPageSize;
-       int mCpuIds[NR_CPUS];
+       int *mCpuIds;
        int mMaxCpuId;
 
        // PMU Counters
@@ -65,8 +87,6 @@ public:
        Counter mCounters[MAX_PERFORMANCE_COUNTERS];
 
 private:
-       void readCpuInfo();
-
        // Intentionally unimplemented
        SessionData(const SessionData &);
        SessionData &operator=(const SessionData &);
@@ -74,6 +94,9 @@ private:
 
 extern SessionData* gSessionData;
 
+uint64_t getTime();
 int getEventKey();
+int pipe_cloexec(int pipefd[2]);
+FILE *fopen_cloexec(const char *path, const char *mode);
 
 #endif // SESSION_DATA_H
index 55b2f92807092ee071a2cfdbd8c36b43f435b590..dea4c8f299ecc1d8661654b41153b67533a74fae 100644 (file)
 #include "OlyUtility.h"
 #include "SessionData.h"
 
-static const char*     TAG_SESSION = "session";
-static const char*     TAG_IMAGE       = "image";
-
-static const char*     ATTR_VERSION            = "version";            
-static const char*     ATTR_CALL_STACK_UNWINDING = "call_stack_unwinding";
-static const char*     ATTR_BUFFER_MODE        = "buffer_mode";
-static const char*     ATTR_SAMPLE_RATE        = "sample_rate";        
-static const char*     ATTR_DURATION           = "duration";
-static const char*     ATTR_PATH               = "path";
-static const char*     ATTR_LIVE_RATE      = "live_rate";
+static const char *TAG_SESSION = "session";
+static const char *TAG_IMAGE   = "image";
+
+static const char *ATTR_VERSION              = "version";
+static const char *ATTR_CALL_STACK_UNWINDING = "call_stack_unwinding";
+static const char *ATTR_BUFFER_MODE          = "buffer_mode";
+static const char *ATTR_SAMPLE_RATE          = "sample_rate";
+static const char *ATTR_DURATION             = "duration";
+static const char *ATTR_PATH                 = "path";
+static const char *ATTR_LIVE_RATE            = "live_rate";
+static const char *ATTR_CAPTURE_WORKING_DIR  = "capture_working_dir";
+static const char *ATTR_CAPTURE_COMMAND      = "capture_command";
+static const char *ATTR_CAPTURE_USER         = "capture_user";
 
 SessionXML::SessionXML(const char *str) {
        parameters.buffer_mode[0] = 0;
        parameters.sample_rate[0] = 0;
-       parameters.duration = 0;
        parameters.call_stack_unwinding = false;
        parameters.live_rate = 0;
-       parameters.images = NULL;
-       mPath = 0;
-       mSessionXML = (const char *)str;
+       mSessionXML = str;
        logg->logMessage(mSessionXML);
 }
 
 SessionXML::~SessionXML() {
-       if (mPath != 0) {
-               free((char *)mSessionXML);
-       }
 }
 
 void SessionXML::parse() {
@@ -79,10 +76,13 @@ void SessionXML::sessionTag(mxml_node_t *tree, mxml_node_t *node) {
                strncpy(parameters.sample_rate, mxmlElementGetAttr(node, ATTR_SAMPLE_RATE), sizeof(parameters.sample_rate));
                parameters.sample_rate[sizeof(parameters.sample_rate) - 1] = 0; // strncpy does not guarantee a null-terminated string
        }
+       if (mxmlElementGetAttr(node, ATTR_CAPTURE_WORKING_DIR)) gSessionData->mCaptureWorkingDir = strdup(mxmlElementGetAttr(node, ATTR_CAPTURE_WORKING_DIR));
+       if (mxmlElementGetAttr(node, ATTR_CAPTURE_COMMAND)) gSessionData->mCaptureCommand = strdup(mxmlElementGetAttr(node, ATTR_CAPTURE_COMMAND));
+       if (mxmlElementGetAttr(node, ATTR_CAPTURE_USER)) gSessionData->mCaptureUser = strdup(mxmlElementGetAttr(node, ATTR_CAPTURE_USER));
 
        // integers/bools
        parameters.call_stack_unwinding = util->stringToBool(mxmlElementGetAttr(node, ATTR_CALL_STACK_UNWINDING), false);
-       if (mxmlElementGetAttr(node, ATTR_DURATION)) parameters.duration = strtol(mxmlElementGetAttr(node, ATTR_DURATION), NULL, 10);
+       if (mxmlElementGetAttr(node, ATTR_DURATION)) gSessionData->mDuration = strtol(mxmlElementGetAttr(node, ATTR_DURATION), NULL, 10);
        if (mxmlElementGetAttr(node, ATTR_LIVE_RATE)) parameters.live_rate = strtol(mxmlElementGetAttr(node, ATTR_LIVE_RATE), NULL, 10);
 
        // parse subtags
@@ -106,6 +106,6 @@ void SessionXML::sessionImage(mxml_node_t *node) {
        image = (struct ImageLinkList *)malloc(sizeof(struct ImageLinkList));
        image->path = (char*)malloc(length + 1);
        image->path = strdup(mxmlElementGetAttr(node, ATTR_PATH));
-       image->next = parameters.images;
-       parameters.images = image;
+       image->next = gSessionData->mImages;
+       gSessionData->mImages = image;
 }
index e146094a4d1751c260e356075fd36b322b1db849..53965749c74b054005e7cee0ff17676aeb8696ba 100644 (file)
 struct ImageLinkList;
 
 struct ConfigParameters {
-       char buffer_mode[64];   // buffer mode, "streaming", "low", "normal", "high" defines oneshot and buffer size
-       char sample_rate[64];   // capture mode, "high", "normal", or "low"
-       int duration;           // length of profile in seconds
-       bool call_stack_unwinding;      // whether stack unwinding is performed
+       // buffer mode, "streaming", "low", "normal", "high" defines oneshot and buffer size
+       char buffer_mode[64];
+       // capture mode, "high", "normal", or "low"
+       char sample_rate[64];
+       // whether stack unwinding is performed
+       bool call_stack_unwinding;
        int live_rate;
-       struct ImageLinkList *images;   // linked list of image strings
 };
 
 class SessionXML {
@@ -30,7 +31,6 @@ public:
        ConfigParameters parameters;
 private:
        const char *mSessionXML;
-       const char *mPath;
        void sessionTag(mxml_node_t *tree, mxml_node_t *node);
        void sessionImage(mxml_node_t *node);
 
diff --git a/tools/gator/daemon/Setup.cpp b/tools/gator/daemon/Setup.cpp
new file mode 100644 (file)
index 0000000..d4ce032
--- /dev/null
@@ -0,0 +1,232 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "Setup.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "Config.h"
+#include "DynBuf.h"
+#include "Logging.h"
+
+bool getLinuxVersion(int version[3]) {
+       // Check the kernel version
+       struct utsname utsname;
+       if (uname(&utsname) != 0) {
+               logg->logMessage("%s(%s:%i): uname failed", __FUNCTION__, __FILE__, __LINE__);
+               return false;
+       }
+
+       version[0] = 0;
+       version[1] = 0;
+       version[2] = 0;
+
+       int part = 0;
+       char *ch = utsname.release;
+       while (*ch >= '0' && *ch <= '9' && part < 3) {
+               version[part] = 10*version[part] + *ch - '0';
+
+               ++ch;
+               if (*ch == '.') {
+                       ++part;
+                       ++ch;
+               }
+       }
+
+       return true;
+}
+
+static int pgrep_gator(DynBuf *const printb) {
+       DynBuf b;
+
+       DIR *proc = opendir("/proc");
+       if (proc == NULL) {
+               logg->logError(__FILE__, __LINE__, "gator: error: opendir failed");
+               handleException();
+       }
+
+       int self = getpid();
+
+       struct dirent *dirent;
+       while ((dirent = readdir(proc)) != NULL) {
+               char *endptr;
+               const int pid = strtol(dirent->d_name, &endptr, 10);
+               if (*endptr != '\0' || (pid == self)) {
+                       // Ignore proc items that are not integers like ., cpuinfo, etc...
+                       continue;
+               }
+
+               if (!printb->printf("/proc/%i/stat", pid)) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: DynBuf::printf failed");
+                       handleException();
+               }
+
+               if (!b.read(printb->getBuf())) {
+                       // This is not a fatal error - the thread just doesn't exist any more
+                       continue;
+               }
+
+               char *comm = strchr(b.getBuf(), '(');
+               if (comm == NULL) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: parsing stat begin failed");
+                       handleException();
+               }
+               ++comm;
+               char *const str = strrchr(comm, ')');
+               if (str == NULL) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: parsing stat end failed");
+                       handleException();
+               }
+               *str = '\0';
+
+               if (strncmp(comm, "gator", 5) == 0) {
+                       // Assume there is only one gator process
+                       return pid;
+               }
+       }
+
+       closedir(proc);
+
+       return -1;
+}
+
+int update(const char *const gatorPath) {
+       printf("gator: starting\n");
+
+       int version[3];
+       if (!getLinuxVersion(version)) {
+               logg->logError(__FILE__, __LINE__, "gator: error: getLinuxVersion failed");
+               handleException();
+       }
+
+       if (KERNEL_VERSION(version[0], version[1], version[2]) < KERNEL_VERSION(2, 6, 32)) {
+               logg->logError(__FILE__, __LINE__, "gator: error: Streamline can't automatically setup gator as this kernel version is not supported. Please upgrade the kernel on your device.");
+               handleException();
+       }
+
+       if (KERNEL_VERSION(version[0], version[1], version[2]) < KERNEL_VERSION(3, 4, 0)) {
+               logg->logError(__FILE__, __LINE__, "gator: error: Streamline can't automatically setup gator as gator.ko is required for this version of Linux. Please build gator.ko and gatord and install them on your device.");
+               handleException();
+       }
+
+       if (access("/sys/module/gator", F_OK) == 0) {
+               logg->logError(__FILE__, __LINE__, "gator: error: Streamline has detected that the gator kernel module is loaded on your device. Please build an updated version of gator.ko and gatord and install them on your device.");
+               handleException();
+       }
+
+       if (geteuid() != 0) {
+               printf("gator: trying sudo\n");
+               execlp("sudo", "sudo", gatorPath, "-u", NULL);
+               // Streamline will provide the password if needed
+
+               printf("gator: trying su\n");
+               char buf[1<<10];
+               snprintf(buf, sizeof(buf), "%s -u", gatorPath);
+               execlp("su", "su", "-", "-c", buf, NULL);
+               // Streamline will provide the password if needed
+
+               logg->logError(__FILE__, __LINE__, "gator: error: Streamline was unable to sudo to root on your device. Please double check passwords, ensure sudo or su work with this user or try a different username.");
+               handleException();
+       }
+       printf("gator: now root\n");
+
+       // setenforce 0 not needed for userspace gator
+
+       // Kill existing gator
+       DynBuf gatorStatPath;
+       int gator_main = pgrep_gator(&gatorStatPath);
+       if (gator_main > 0) {
+               if (kill(gator_main, SIGTERM) != 0) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: kill SIGTERM failed");
+                       handleException();
+               }
+               for (int i = 0; ; ++i) {
+                       if (access(gatorStatPath.getBuf(), F_OK) != 0) {
+                               break;
+                       }
+                       if (i == 5) {
+                               if (kill(gator_main, SIGKILL) != 0) {
+                                       logg->logError(__FILE__, __LINE__, "gator: error: kill SIGKILL failed");
+                                       handleException();
+                               }
+                       } else if (i >= 10) {
+                               logg->logError(__FILE__, __LINE__, "gator: error: unable to kill running gator");
+                               handleException();
+                       }
+                       sleep(1);
+               }
+       }
+       printf("gator: no gatord running\n");
+
+       rename("gatord", "gatord.old");
+       rename("gator.ko", "gator.ko.old");
+
+       // Rename gatord.YYYYMMDDHHMMSSMMMM to gatord
+       char *newGatorPath = strdup(gatorPath);
+       char *dot = strrchr(newGatorPath, '.');
+       if (dot != NULL) {
+               *dot = '\0';
+               if (rename(gatorPath, newGatorPath) != 0) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: rename failed");
+                       handleException();
+               }
+       }
+
+       // Fork and start gatord (redirect stdout and stderr)
+       int child = fork();
+       if (child < 0) {
+               logg->logError(__FILE__, __LINE__, "gator: error: fork failed");
+               handleException();
+       } else if (child == 0) {
+               int inFd = open("/dev/null", O_RDONLY | O_CLOEXEC);
+               if (inFd < 0) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: open of /dev/null failed");
+                       handleException();
+               }
+               int outFd = open("gatord.out", O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0600);
+               if (outFd < 0) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: open of gatord.out failed");
+                       handleException();
+               }
+               int errFd = open("gatord.err", O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0600);
+               if (errFd < 0) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: open of gatord.err failed");
+                       handleException();
+               }
+               if (dup2(inFd, STDIN_FILENO) < 0) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: dup2 for stdin failed");
+                       handleException();
+               }
+               if (dup2(outFd, STDOUT_FILENO) < 0) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: dup2 for stdout failed");
+                       handleException();
+               }
+               if (dup2(errFd, STDERR_FILENO) < 0) {
+                       logg->logError(__FILE__, __LINE__, "gator: error: dup2 for stderr failed");
+                       handleException();
+               }
+               execlp(newGatorPath, newGatorPath, "-a", NULL);
+               logg->logError(__FILE__, __LINE__, "gator: error: execlp failed");
+               handleException();
+       }
+
+       printf("gator: done\n");
+
+       return 0;
+}
diff --git a/tools/gator/daemon/Setup.h b/tools/gator/daemon/Setup.h
new file mode 100644 (file)
index 0000000..280d611
--- /dev/null
@@ -0,0 +1,18 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SETUP_H
+#define SETUP_H
+
+// From include/generated/uapi/linux/version.h
+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+
+bool getLinuxVersion(int version[3]);
+int update(const char *const gatorPath);
+
+#endif // SETUP_H
index caa665e67193a9f88f378c290ce9b74d3aad3f91..2b61eaeb290d620888c46a2d5db14be0a801ebd2 100644 (file)
@@ -266,7 +266,7 @@ void StreamlineSetup::writeConfiguration(char* xml) {
        { ConfigurationXML configuration; }
 
        if (gSessionData->mCounterOverflow > 0) {
-               logg->logError(__FILE__, __LINE__, "Only %i performance counters counters are permitted, %i are selected", MAX_PERFORMANCE_COUNTERS, gSessionData->mCounterOverflow);
+               logg->logError(__FILE__, __LINE__, "Only %i performance counters are permitted, %i are selected", MAX_PERFORMANCE_COUNTERS, gSessionData->mCounterOverflow);
                handleException();
        }
 }
index 74bb197e35ff2a563b10b53397ad5c47d8ac4b1e..623e14f2b64a7c1bb68521464fc2f8e89dd8f8c5 100644 (file)
@@ -6,8 +6,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef        __STREAMLINE_SETUP_H__
-#define        __STREAMLINE_SETUP_H__
+#ifndef __STREAMLINE_SETUP_H__
+#define __STREAMLINE_SETUP_H__
 
 #include <stdint.h>
 #include <string.h>
@@ -21,7 +21,7 @@ enum {
        COMMAND_APC_START   = 2,
        COMMAND_APC_STOP    = 3,
        COMMAND_DISCONNECT  = 4,
-       COMMAND_PING            = 5
+       COMMAND_PING        = 5
 };
 
 class StreamlineSetup {
@@ -47,4 +47,4 @@ private:
        StreamlineSetup &operator=(const StreamlineSetup &);
 };
 
-#endif         //__STREAMLINE_SETUP_H__
+#endif //__STREAMLINE_SETUP_H__
index d977cd080b4080e9fd091edb60625bf715b0656e..f94a995393e844f5675ac1e67b160855281033fb 100644 (file)
@@ -8,13 +8,14 @@
 
 #include "UEvent.h"
 
-#include <sys/socket.h>
-#include <linux/netlink.h>
 #include <string.h>
-
+#include <sys/socket.h>
 #include <unistd.h>
 
+#include <linux/netlink.h>
+
 #include "Logging.h"
+#include "OlySocket.h"
 
 static const char EMPTY[] = "";
 static const char ACTION[] = "ACTION=";
@@ -31,7 +32,7 @@ UEvent::~UEvent() {
 }
 
 bool UEvent::init() {
-       mFd = socket(PF_NETLINK, SOCK_RAW, NETLINK_KOBJECT_UEVENT);
+       mFd = socket_cloexec(PF_NETLINK, SOCK_RAW, NETLINK_KOBJECT_UEVENT);
        if (mFd < 0) {
                logg->logMessage("%s(%s:%i): socket failed", __FUNCTION__, __FILE__, __LINE__);
                return false;
index debe69636cffbd5d083a3e2b7c5ad56c2453f750..4a9b22f4b555d66e39057004fe6d3ebdb6188b93 100644 (file)
@@ -16,9 +16,6 @@
 #include "Logging.h"
 #include "SessionData.h"
 
-#define NS_PER_S ((uint64_t)1000000000)
-#define NS_PER_US 1000
-
 extern Child *child;
 
 UserSpaceSource::UserSpaceSource(sem_t *senderSem) : mBuffer(0, FRAME_BLOCK_COUNTER, gSessionData->mTotalBufferSize*1024*1024, senderSem) {
@@ -34,30 +31,28 @@ bool UserSpaceSource::prepare() {
 void UserSpaceSource::run() {
        prctl(PR_SET_NAME, (unsigned long)&"gatord-counters", 0, 0, 0);
 
-       gSessionData->hwmon.start();
+       for (int i = 0; i < ARRAY_LENGTH(gSessionData->usDrivers); ++i) {
+               gSessionData->usDrivers[i]->start();
+       }
 
        int64_t monotonic_started = 0;
        while (monotonic_started <= 0) {
                usleep(10);
 
-               if (DriverSource::readInt64Driver("/dev/gator/started", &monotonic_started) == -1) {
-                       logg->logError(__FILE__, __LINE__, "Error reading gator driver start time");
-                       handleException();
+               if (gSessionData->perf.isSetup()) {
+                       monotonic_started = gSessionData->mMonotonicStarted;
+               } else {
+                       if (DriverSource::readInt64Driver("/dev/gator/started", &monotonic_started) == -1) {
+                               logg->logError(__FILE__, __LINE__, "Error reading gator driver start time");
+                               handleException();
+                       }
+                       gSessionData->mMonotonicStarted = monotonic_started;
                }
        }
 
        uint64_t next_time = 0;
        while (gSessionData->mSessionIsActive) {
-               struct timespec ts;
-#ifndef CLOCK_MONOTONIC_RAW
-               // Android doesn't have this defined but it was added in Linux 2.6.28
-#define CLOCK_MONOTONIC_RAW 4
-#endif
-               if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts) != 0) {
-                       logg->logError(__FILE__, __LINE__, "Failed to get uptime");
-                       handleException();
-               }
-               const uint64_t curr_time = (NS_PER_S*ts.tv_sec + ts.tv_nsec) - monotonic_started;
+               const uint64_t curr_time = getTime() - monotonic_started;
                // Sample ten times a second ignoring gSessionData->mSampleRate
                next_time += NS_PER_S/10;//gSessionData->mSampleRate;
                if (next_time < curr_time) {
@@ -66,7 +61,9 @@ void UserSpaceSource::run() {
                }
 
                if (mBuffer.eventHeader(curr_time)) {
-                       gSessionData->hwmon.read(&mBuffer);
+                       for (int i = 0; i < ARRAY_LENGTH(gSessionData->usDrivers); ++i) {
+                               gSessionData->usDrivers[i]->read(&mBuffer);
+                       }
                        // Only check after writing all counters so that time and corresponding counters appear in the same frame
                        mBuffer.check(curr_time);
                }
index fb5889d26ffbe9d4c7dcd431d34a3bfcaee11e6f..9b3666016dc5b5836c6c4bdf9bf933c4af82e53a 100644 (file)
@@ -14,7 +14,7 @@
 #include "Buffer.h"
 #include "Source.h"
 
-// User space counters - currently just hwmon
+// User space counters
 class UserSpaceSource : public Source {
 public:
        UserSpaceSource(sem_t *senderSem);
diff --git a/tools/gator/daemon/c++.cpp b/tools/gator/daemon/c++.cpp
new file mode 100644 (file)
index 0000000..6041e5e
--- /dev/null
@@ -0,0 +1,40 @@
+/**
+ * Minimal set of C++ functions so that libstdc++ is not required
+ *
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+void operator delete(void *ptr) {
+  if (ptr != NULL) {
+    free(ptr);
+  }
+}
+
+void operator delete[](void *ptr) {
+  operator delete(ptr);
+}
+
+void *operator new(size_t size) {
+  void *ptr = malloc(size == 0 ? 1 : size);
+  if (ptr == NULL) {
+    abort();
+  }
+  return ptr;
+}
+
+void *operator new[](size_t size) {
+  return operator new(size);
+}
+
+extern "C"
+void __cxa_pure_virtual() {
+  printf("pure virtual method called\n");
+  abort();
+}
index d9dc14606b0716082cddfc4d5e95dd1d18dd9a39..769a92e51a35d002ebaa9bb82e50e112b87ac81e 100644 (file)
@@ -5,16 +5,17 @@
 # -Werror treats warnings as errors
 # -std=c++0x is the planned new c++ standard
 # -std=c++98 is the 1998 c++ standard
-CFLAGS += -O3 -Wall -fno-exceptions -pthread -MMD -DETCDIR=\"/etc\" -Ilibsensors
+CPPFLAGS += -O3 -Wall -fno-exceptions -pthread -MMD -DETCDIR=\"/etc\" -Ilibsensors
 CXXFLAGS += -fno-rtti -Wextra # -Weffc++
 ifeq ($(WERROR),1)
-       CFLAGS += -Werror
+       CPPFLAGS += -Werror
 endif
 # -s strips the binary of debug info
 LDFLAGS += -s
+LDLIBS += -lrt -lm -pthread
 TARGET = gatord
 C_SRC = $(wildcard mxml/*.c) $(wildcard libsensors/*.c)
-CPP_SRC = $(wildcard *.cpp)
+CXX_SRC = $(wildcard *.cpp)
 
 all: $(TARGET)
 
@@ -35,14 +36,15 @@ libsensors/conf-parse.c: ;
        ./escape $< > $@
 
 %.o: %.c
-       $(GCC) -c $(CFLAGS) -o $@ $<
+       $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
 
 %.o: %.cpp
-       $(CPP) -c $(CFLAGS) $(CXXFLAGS) -o $@ $<
+       $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c -o $@ $<
 
-$(TARGET): $(CPP_SRC:%.cpp=%.o) $(C_SRC:%.c=%.o)
-       $(CPP) $(LDFLAGS) -o $@ $^ -lrt -pthread
+$(TARGET): $(CXX_SRC:%.cpp=%.o) $(C_SRC:%.c=%.o)
+       $(CC) $(LDFLAGS) $^ $(LDLIBS) -o $@
 
+# Intentionally ignore CC as a native binary is required
 escape: escape.c
        gcc $^ -o $@
 
index 5bf096cb2a459ab586fede46931231406a97e976..086eca1e804e805059660350a55809e6d0f7b625 100644 (file)
   <configuration counter="ARMv7_Cortex_A9_cnt2" event="0x07"/>
   <configuration counter="ARMv7_Cortex_A9_cnt3" event="0x03"/>
   <configuration counter="ARMv7_Cortex_A9_cnt4" event="0x04"/>
-  <configuration counter="ARMv7_Cortex_A12_ccnt" event="0xff"/>
-  <configuration counter="ARMv7_Cortex_A12_cnt0" event="0x08"/>
-  <configuration counter="ARMv7_Cortex_A12_cnt1" event="0x16"/>
-  <configuration counter="ARMv7_Cortex_A12_cnt2" event="0x10"/>
-  <configuration counter="ARMv7_Cortex_A12_cnt3" event="0x19"/>
   <configuration counter="ARMv7_Cortex_A15_ccnt" event="0xff"/>
   <configuration counter="ARMv7_Cortex_A15_cnt0" event="0x8"/>
   <configuration counter="ARMv7_Cortex_A15_cnt1" event="0x16"/>
   <configuration counter="ARMv7_Cortex_A15_cnt2" event="0x10"/>
   <configuration counter="ARMv7_Cortex_A15_cnt3" event="0x19"/>
+  <configuration counter="ARMv7_Cortex_A17_ccnt" event="0xff"/>
+  <configuration counter="ARMv7_Cortex_A17_cnt0" event="0x08"/>
+  <configuration counter="ARMv7_Cortex_A17_cnt1" event="0x16"/>
+  <configuration counter="ARMv7_Cortex_A17_cnt2" event="0x10"/>
+  <configuration counter="ARMv7_Cortex_A17_cnt3" event="0x19"/>
   <configuration counter="ARM_Cortex-A53_ccnt" event="0x11"/>
   <configuration counter="ARM_Cortex-A53_cnt0" event="0x8"/>
   <configuration counter="ARM_Cortex-A53_cnt1" event="0x16"/>
   <configuration counter="Linux_block_rq_wr"/>
   <configuration counter="Linux_block_rq_rd"/>
   <configuration counter="Linux_meminfo_memused"/>
+  <configuration counter="Linux_meminfo_memused2"/>
   <configuration counter="Linux_meminfo_memfree"/>
   <configuration counter="Linux_power_cpu_freq"/>
+  <configuration counter="ARM_Mali-4xx_fragment"/>
+  <configuration counter="ARM_Mali-4xx_vertex"/>
+  <configuration counter="ARM_Mali-Midgard_fragment" cores="1"/>
+  <configuration counter="ARM_Mali-Midgard_vertex" cores="1"/>
+  <configuration counter="ARM_Mali-Midgard_opencl" cores="1"/>
+  <configuration counter="ARM_Mali-T60x_GPU_ACTIVE"/>
+  <configuration counter="ARM_Mali-T60x_JS0_ACTIVE"/>
+  <configuration counter="ARM_Mali-T60x_JS1_ACTIVE"/>
+  <configuration counter="ARM_Mali-T60x_JS2_ACTIVE"/>
+  <configuration counter="ARM_Mali-T62x_GPU_ACTIVE"/>
+  <configuration counter="ARM_Mali-T62x_JS0_ACTIVE"/>
+  <configuration counter="ARM_Mali-T62x_JS1_ACTIVE"/>
+  <configuration counter="ARM_Mali-T62x_JS2_ACTIVE"/>
+  <configuration counter="ARM_Mali-T72x_GPU_ACTIVE"/>
+  <configuration counter="ARM_Mali-T72x_JS0_ACTIVE"/>
+  <configuration counter="ARM_Mali-T72x_JS1_ACTIVE"/>
+  <configuration counter="ARM_Mali-T72x_JS2_ACTIVE"/>
+  <configuration counter="ARM_Mali-T76x_GPU_ACTIVE"/>
+  <configuration counter="ARM_Mali-T76x_JS0_ACTIVE"/>
+  <configuration counter="ARM_Mali-T76x_JS1_ACTIVE"/>
+  <configuration counter="ARM_Mali-T76x_JS2_ACTIVE"/>
   <configuration counter="L2C-310_cnt0" event="0x1"/>
 </configurations>
index c54aa1c3e75d9be24d0febffefbb8f13e80d13a9..2b0863aaf4250058edc835e45fe84208900bb99f 100644 (file)
@@ -6,7 +6,7 @@
  * published by the Free Software Foundation.
  */
 
-/* 
+/*
  * The Makefile in the daemon folder builds and executes 'escape'
  * 'escape' creates configuration_xml.h from configuration.xml and events_xml.h from events-*.xml
  * these genereated xml files are then #included and built as part of the gatord binary
index 4fa77117d2d89e37de41a0e11c1d259133df4e3b..20002efd1543f6210ce10aea20ab01bd0e85bdb9 100644 (file)
@@ -1,7 +1,6 @@
-  <counter_set name="cci-400_cnt" count="4"/>
-  <category name="CCI-400" counter_set="cci-400_cnt" per_cpu="no" supports_event_based_sampling="yes">
-    <event counter="cci-400_ccnt" event="0xff" title="CCI-400 Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
-
+  <counter_set name="CCI_400_cnt" count="4"/>
+  <category name="CCI-400" counter_set="CCI_400_cnt" per_cpu="no" supports_event_based_sampling="yes">
+    <event counter="CCI_400_ccnt" event="0xff" title="CCI-400 Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
     <option_set name="Slave">
       <option event_delta="0x00" name="S0" description="Slave interface 0"/>
       <option event_delta="0x20" name="S1" description="Slave interface 1"/>
@@ -9,7 +8,6 @@
       <option event_delta="0x60" name="S3" description="Slave interface 3"/>
       <option event_delta="0x80" name="S4" description="Slave interface 4"/>
     </option_set>
-
     <event event="0x00" option_set="Slave" title="CCI-400" name="Read: any" description="Read request handshake: any"/>
     <event event="0x01" option_set="Slave" title="CCI-400" name="Read: transaction" description="Read request handshake: device transaction"/>
     <event event="0x02" option_set="Slave" title="CCI-400" name="Read: normal" description="Read request handshake: normal, non-shareable or system-shareable, but not barrier or cache maintenance operation"/>
     <event event="0x11" option_set="Slave" title="CCI-400" name="Write: WriteLineUnique" description="Write request handshake: WriteLineUnique"/>
     <event event="0x12" option_set="Slave" title="CCI-400" name="Write: Evict" description="Write request handshake: Evict"/>
     <event event="0x13" option_set="Slave" title="CCI-400" name="Write stall: tracker full" description="Write request stall cycle because the transaction tracker is full. Increase SIx_W_MAX to avoid this stall"/>
-
     <option_set name="Master">
       <option event_delta="0xa0" name="M0" description="Master interface 0"/>
       <option event_delta="0xc0" name="M1" description="Master interface 1"/>
       <option event_delta="0xe0" name="M2" description="Master interface 2"/>
     </option_set>
-
     <event event="0x14" option_set="Master" title="CCI-400" name="Retry fetch" description="RETRY of speculative fetch transaction"/>
     <event event="0x15" option_set="Master" title="CCI-400" name="Read stall: address hazard" description="Read request stall cycle because of an address hazard"/>
     <event event="0x16" option_set="Master" title="CCI-400" name="Read stall: ID hazard" description="Read request stall cycle because of an ID hazard"/>
     <event event="0x19" option_set="Master" title="CCI-400" name="Write stall: barrier hazard" description="Write request stall cycle because of a barrier hazard"/>
     <event event="0x1a" option_set="Master" title="CCI-400" name="Write stall: tracker full" description="Write request stall cycle because the transaction tracker is full. Increase MIx_W_MAX to avoid this stall. See the CoreLink CCI-400 Cache Coherent Interconnect Integration Manual"/>
   </category>
-
-  <counter_set name="cci-400-r1_cnt" count="4"/>
-  <category name="CCI-400" counter_set="cci-400-r1_cnt" per_cpu="no" supports_event_based_sampling="yes">
-    <event counter="cci-400-r1_ccnt" event="0xff" title="CCI-400 Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
-
+  <counter_set name="CCI_400-r1_cnt" count="4"/>
+  <category name="CCI-400" counter_set="CCI_400-r1_cnt" per_cpu="no" supports_event_based_sampling="yes">
+    <event counter="CCI_400-r1_ccnt" event="0xff" title="CCI-400 Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
     <option_set name="Slave">
       <option event_delta="0x00" name="S0" description="Slave interface 0"/>
       <option event_delta="0x20" name="S1" description="Slave interface 1"/>
@@ -57,7 +51,6 @@
       <option event_delta="0x60" name="S3" description="Slave interface 3"/>
       <option event_delta="0x80" name="S4" description="Slave interface 4"/>
     </option_set>
-
     <event event="0x00" option_set="Slave" title="CCI-400" name="Read: any" description="Read request handshake: any"/>
     <event event="0x01" option_set="Slave" title="CCI-400" name="Read: transaction" description="Read request handshake: device transaction"/>
     <event event="0x02" option_set="Slave" title="CCI-400" name="Read: normal" description="Read request handshake: normal, non-shareable or system-shareable, but not barrier or cache maintenance operation"/>
     <event event="0x12" option_set="Slave" title="CCI-400" name="Write: Evict" description="Write request handshake: Evict"/>
     <event event="0x13" option_set="Slave" title="CCI-400" name="Write stall: tracker full" description="Write request stall cycle because the transaction tracker is full. Increase SIx_W_MAX to avoid this stall"/>
     <event event="0x14" option_set="Slave" title="CCI-400" name="Read stall: slave hazard" description="Read request stall cycle because of a slave interface ID hazard"/>
-
     <option_set name="Master">
       <option event_delta="0xa0" name="M0" description="Master interface 0"/>
       <option event_delta="0xc0" name="M1" description="Master interface 1"/>
       <option event_delta="0xe0" name="M2" description="Master interface 2"/>
     </option_set>
-
     <event event="0x00" option_set="Master" title="CCI-400" name="Retry fetch" description="RETRY of speculative fetch transaction"/>
     <event event="0x01" option_set="Master" title="CCI-400" name="Read stall: address hazard" description="Stall cycle because of an address hazard. A read or write invalidation is stalled because of an outstanding transaction to an overlapping address"/>
     <event event="0x02" option_set="Master" title="CCI-400" name="Read stall: ID hazard" description="Read request stall cycle because of a master interface ID hazard"/>
index cfabf65949ed7925043eac8158ae51f02d6a8923..6ef3e6483717b7c65b6da0c7d001900febcaaa5c 100644 (file)
@@ -1,7 +1,6 @@
   <counter_set name="CCN-504_cnt" count="4"/>
   <category name="CCN-504" counter_set="CCN-504_cnt">
     <event counter="CCN-504_ccnt" title="CCN-504 Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
-
     <option_set name="XP_Region">
       <option event_delta="0x400000" name="XP 0" description="Crosspoint 0"/>
       <option event_delta="0x410000" name="XP 1" description="Crosspoint 1"/>
@@ -15,7 +14,6 @@
       <option event_delta="0x490000" name="XP 9" description="Crosspoint 9"/>
       <option event_delta="0x4A0000" name="XP 10" description="Crosspoint 10"/>
     </option_set>
-
     <event event="0x0801" option_set="XP_Region" title="CCN-504" name="Bus 0: REQ: H-bit" description="Bus 0: REQ: Set H-bit, signaled when this XP sets the H-bit."/>
     <event event="0x0802" option_set="XP_Region" title="CCN-504" name="Bus 0: REQ: S-bit" description="Bus 0: REQ: Set S-bit, signaled when this XP sets the S-bit."/>
     <event event="0x0803" option_set="XP_Region" title="CCN-504" name="Bus 0: REQ: P-Cnt" description="Bus 0: REQ: Set P-Cnt, signaled when this XP sets the P-Cnt. This is not applicable for the SNP VC."/>
@@ -56,7 +54,6 @@
     <event event="0x087A" option_set="XP_Region" title="CCN-504" name="Bus 1: DATB: S-bit" description="Bus 1: DATB: Set S-bit, signaled when this XP sets the S-bit."/>
     <event event="0x087B" option_set="XP_Region" title="CCN-504" name="Bus 1: DATB: P-Cnt" description="Bus 1: DATB: Set P-Cnt, signaled when this XP sets the P-Cnt. This is not applicable for the SNP VC."/>
     <event event="0x087C" option_set="XP_Region" title="CCN-504" name="Bus 1: DATB: TknV" description="Bus 1: DATB: No TknV, signaled when this XP transmits a valid packet."/>
-
     <option_set name="HN-F_Region">
       <option event_delta="0x200000" name="HN-F 3" description="Fully-coherent Home Node 3"/>
       <option event_delta="0x210000" name="HN-F 5" description="Fully-coherent Home Node 5"/>
@@ -67,7 +64,6 @@
       <option event_delta="0x260000" name="HN-F 17" description="Fully-coherent Home Node 17"/>
       <option event_delta="0x270000" name="HN-F 18" description="Fully-coherent Home Node 18"/>
     </option_set>
-
     <event event="0x0401" option_set="HN-F_Region" title="CCN-504" name="Cache Miss" description="Counts the total cache misses. This is the first time lookup result, and is high priority."/>
     <event event="0x0402" option_set="HN-F_Region" title="CCN-504" name="L3 SF Cache Access" description="Counts the number of cache accesses. This is the first time access, and is high priority."/>
     <event event="0x0403" option_set="HN-F_Region" title="CCN-504" name="Cache Fill" description="Counts the total allocations in the HN L3 cache, and all cache line allocations to the L3 cache."/>
@@ -82,7 +78,6 @@
     <event event="0x040C" option_set="HN-F_Region" title="CCN-504" name="MC Retries" description="Counts the number of transactions retried by the memory controller."/>
     <event event="0x040D" option_set="HN-F_Region" title="CCN-504" name="MC Reqs" description="Counts the number of requests to the memory controller."/>
     <event event="0x040E" option_set="HN-F_Region" title="CCN-504" name="QOS HH Retry" description="Counts the number of times a highest-priority QoS class was retried at the HN-F."/>
-
     <option_set name="RN-I_Region">
       <option event_delta="0x800000" name="RN-I 0" description="I/O-coherent Requesting Node 0"/>
       <option event_delta="0x820000" name="RN-I 2" description="I/O-coherent Requesting Node 2"/>
@@ -91,7 +86,6 @@
       <option event_delta="0x900000" name="RN-I 16" description="I/O-coherent Requesting Node 16"/>
       <option event_delta="0x940000" name="RN-I 20" description="I/O-coherent Requesting Node 20"/>
     </option_set>
-
     <event event="0x1601" option_set="RN-I_Region" title="CCN-504" name="S0 RDataBeats" description="S0 RDataBeats."/>
     <event event="0x1602" option_set="RN-I_Region" title="CCN-504" name="S1 RDataBeats" description="S1 RDataBeats."/>
     <event event="0x1603" option_set="RN-I_Region" title="CCN-504" name="S2 RDataBeats" description="S2 RDataBeats."/>
     <event event="0x1608" option_set="RN-I_Region" title="CCN-504" name="RRT full" description="RRT full."/>
     <event event="0x1609" option_set="RN-I_Region" title="CCN-504" name="WRT full" description="WRT full."/>
     <event event="0x160A" option_set="RN-I_Region" title="CCN-504" name="Replayed TXREQ Flits" description="Replayed TXREQ Flits."/>
-
     <option_set name="SBAS_Region">
       <option event_delta="0x810000" name="SBAS 1" description="ACE master to CHI protocol bridge 1"/>
       <option event_delta="0x890000" name="SBAS 9" description="ACE master to CHI protocol bridge 9"/>
       <option event_delta="0x8B0000" name="SBAS 11" description="ACE master to CHI protocol bridge 11"/>
       <option event_delta="0x930000" name="SBAS 19" description="ACE master to CHI protocol bridge 19"/>
     </option_set>
-
     <event event="0x1001" option_set="SBAS_Region" title="CCN-504" name="S0 RDataBeats" description="S0 RDataBeats."/>
     <event event="0x1004" option_set="SBAS_Region" title="CCN-504" name="RXDAT Flits received" description="RXDAT Flits received."/>
     <event event="0x1005" option_set="SBAS_Region" title="CCN-504" name="TXDAT Flits sent" description="TXDAT Flits sent."/>
     <event event="0x1008" option_set="SBAS_Region" title="CCN-504" name="RRT full" description="RRT full."/>
     <event event="0x1009" option_set="SBAS_Region" title="CCN-504" name="WRT full" description="WRT full."/>
     <event event="0x100A" option_set="SBAS_Region" title="CCN-504" name="Replayed TXREQ Flits" description="Replayed TXREQ Flits."/>
-
   </category>
diff --git a/tools/gator/daemon/events-Cortex-A12.xml b/tools/gator/daemon/events-Cortex-A12.xml
deleted file mode 100644 (file)
index 9c04354..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-  <counter_set name="ARMv7_Cortex_A12_cnt" count="6"/>
-  <category name="Cortex-A12" counter_set="ARMv7_Cortex_A12_cnt" per_cpu="yes" supports_event_based_sampling="yes">
-    <event counter="ARMv7_Cortex_A12_ccnt" event="0xff" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" average_cores="yes" description="The number of core clock cycles"/>
-    <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
-    <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
-    <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
-    <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
-    <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
-    <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
-    <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
-    <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
-    <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
-    <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
-    <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
-    <event event="0x13" title="Memory" name="Memory access" description="Data memory access"/>
-    <event event="0x14" title="Cache" name="L1 inst access" description="Instruction cache access"/>
-    <event event="0x15" title="Cache" name="L1 data write" description="Level 1 data cache Write-Back"/>
-    <event event="0x16" title="Cache" name="L2 data access" description="Level 2 data cache access"/>
-    <event event="0x17" title="Cache" name="L2 data refill" description="Level 2 data cache refill"/>
-    <event event="0x18" title="Cache" name="L2 data write" description="Level 2 data cache Write-Back"/>
-    <event event="0x19" title="Bus" name="Access" description="Bus - Access"/>
-    <event event="0x1b" title="Instruction" name="Speculative" description="Instruction speculatively executed"/>
-    <event event="0x1c" title="Memory" name="Translation table" description="Write to translation table base architecturally executed"/>
-    <event event="0x1d" title="Bus" name="Cycle" description="Bus - Cycle"/>
-    <event event="0x40" title="Cache" name="L1 data read" description="Level 1 data cache access - Read"/>
-    <event event="0x41" title="Cache" name="L1 data access write" description="Level 1 data cache access - Write"/>
-    <event event="0x50" title="Cache" name="L2 data read" description="Level 2 data cache access - Read"/>
-    <event event="0x51" title="Cache" name="L2 data access write" description="Level 2 data cache access - Write"/>
-    <event event="0x56" title="Cache" name="L2 data victim" description="Level 2 data cache Write-Back - Victim"/>
-    <event event="0x57" title="Cache" name="L2 data clean" description="Level 2 data cache Write-Back - Cleaning and coherency"/>
-    <event event="0x58" title="Cache" name="L2 data invalidate" description="Level 2 data cache invalidate"/>
-    <event event="0x60" title="Bus" name="Read" description="Bus access - Read"/>
-    <event event="0x62" title="Bus" name="Access shared" description="Bus access - Normal"/>
-    <event event="0x63" title="Bus" name="Access not shared" description="Bus access - Not normal"/>
-    <event event="0x64" title="Bus" name="Access normal" description="Bus access - Normal"/>
-    <event event="0x65" title="Bus" name="Peripheral" description="Bus access - Peripheral"/>
-    <event event="0x66" title="Memory" name="Read" description="Data memory access - Read"/>
-    <event event="0x67" title="Memory" name="Write" description="Data memory access - Write"/>
-    <event event="0x68" title="Memory" name="Unaligned Read" description="Unaligned access - Read"/>
-    <event event="0x69" title="Memory" name="Unaligned Write" description="Unaligned access - Write"/>
-    <event event="0x6a" title="Memory" name="Unaligned" description="Unaligned access"/>
-    <event event="0x6c" title="Intrinsic" name="LDREX" description="Exclusive instruction speculatively executed - LDREX"/>
-    <event event="0x6e" title="Intrinsic" name="STREX fail" description="Exclusive instruction speculatively executed - STREX fail"/>
-    <event event="0x6f" title="Intrinsic" name="STREX" description="Exclusive instruction speculatively executed - STREX"/>
-    <event event="0x70" title="Instruction" name="Load" description="Instruction speculatively executed - Load"/>
-    <event event="0x71" title="Instruction" name="Store" description="Instruction speculatively executed - Store"/>
-    <event event="0x72" title="Instruction" name="Load/Store" description="Instruction speculatively executed - Load or store"/>
-    <event event="0x73" title="Instruction" name="Integer" description="Instruction speculatively executed - Integer data processing"/>
-    <event event="0x74" title="Instruction" name="Advanced SIMD" description="Instruction speculatively executed - Advanced SIMD"/>
-    <event event="0x75" title="Instruction" name="VFP" description="Instruction speculatively executed - VFP"/>
-    <event event="0x76" title="Instruction" name="Software change" description="Instruction speculatively executed - Software change of the PC"/>
-    <event event="0x78" title="Instruction" name="Immediate branch" description="Branch speculatively executed - Immediate branch"/>
-    <event event="0x79" title="Instruction" name="Procedure return" description="Branch speculatively executed - Procedure return"/>
-    <event event="0x7a" title="Instruction" name="Indirect branch" description="Branch speculatively executed - Indirect branch"/>
-    <event event="0x7c" title="Instruction" name="ISB" description="Barrier speculatively executed - ISB"/>
-    <event event="0x7d" title="Instruction" name="DSB" description="Barrier speculatively executed - DSB"/>
-    <event event="0x7e" title="Instruction" name="DMB" description="Barrier speculatively executed - DMB"/>
-    <event event="0x81" title="Exception" name="Undefined" description="Exception taken, other synchronous"/>
-    <event event="0x8a" title="Exception" name="Hypervisor call" description="Exception taken, Hypervisor Call"/>
-    <event event="0xc0" title="Instruction" name="Stalled Linefill" description="Instruction side stalled due to a Linefill"/>
-    <event event="0xc1" title="Instruction" name="Stalled Page Table Walk" description="Instruction Side stalled due to a Page Table Walk"/>
-    <event event="0xc2" title="Cache" name="4 Ways Read" description="Number of set of 4 ways read in the instruction cache - Tag RAM"/>
-    <event event="0xc3" title="Cache" name="Ways Read" description="Number of ways read in the instruction cache - Data RAM"/>
-    <event event="0xc4" title="Cache" name="BATC Read" description="Number of ways read in the instruction BTAC RAM"/>
-    <event event="0xca" title="Memory" name="Snoop" description="Data snooped from other processor. This event counts memory-read operations that read data from another processor within the local Cortex-A12 cluster, rather than accessing the L2 cache or issuing an external read. It increments on each transaction, rather than on each beat of data"/>
-    <event event="0xd3" title="Slots" name="Load-Store Unit" description="Duration during which all slots in the Load-Store Unit are busy"/>
-    <event event="0xd8" title="Slots" name="Load-Store Issue Queue" description="Duration during which all slots in the Load-Store Issue queue are busy"/>
-    <event event="0xd9" title="Slots" name="Data Processing Issue Queue" description="Duration during which all slots in the Data Processing issue queue are busy"/>
-    <event event="0xda" title="Slots" name="Data Engine Issue Queue" description="Duration during which all slots in the Data Engine issue queue are busy"/>
-    <event event="0xdb" title="NEON" name="Flush" description="Number of NEON instruction which fail their condition code and lead to a flush of the DE pipe"/>
-    <event event="0xdc" title="Hypervisor" name="Traps" description="Number of Trap to hypervisor"/>
-    <event event="0xde" title="PTM" name="EXTOUT 0" description="PTM EXTOUT 0"/>
-    <event event="0xdf" title="PTM" name="EXTOUT 1" description="PTM EXTOUT 1"/>
-    <event event="0xe0" title="MMU" name="Table Walk" description="Duration during which the MMU handle a Page table walk"/>
-    <event event="0xe1" title="MMU" name="Stage1 Table Walk" description="Duration during which the MMU handle a Stage1 Page table walk"/>
-    <event event="0xe2" title="MMU" name="Stage2 Table Walk" description="Duration during which the MMU handle a Stage2 Page table walk"/>
-    <event event="0xe3" title="MMU" name="LSU Table Walk" description="Duration during which the MMU handle a Page table walk requested by the Load Store Unit"/>
-    <event event="0xe4" title="MMU" name="Instruction Table Walk" description="Duration during which the MMU handle a Page table walk requested by the Instruction side"/>
-    <event event="0xe5" title="MMU" name="Preload Table Walk" description="Duration during which the MMU handle a Page table walk requested by a Preload instruction or Prefetch request"/>
-    <event event="0xe6" title="MMU" name="cp15 Table Walk" description="Duration during which the MMU handle a Page table walk requested by a cp15 operation (maintenance by MVA and VA-to-PA operation)"/>
-    <event event="0xe7" title="Cache" name="L1 PLD TLB refill" description="Level 1 PLD TLB refill"/>
-    <event event="0xe8" title="Cache" name="L1 CP15 TLB refill" description="Level 1 CP15 TLB refill"/>
-    <event event="0xe9" title="Cache" name="L1 TLB flush" description="Level 1 TLB flush"/>
-    <event event="0xea" title="Cache" name="L2 TLB access" description="Level 2 TLB access"/>
-    <event event="0xeb" title="Cache" name="L2 TLB miss" description="Level 2 TLB miss"/>
-  </category>
diff --git a/tools/gator/daemon/events-Cortex-A17.xml b/tools/gator/daemon/events-Cortex-A17.xml
new file mode 100644 (file)
index 0000000..4dd08c1
--- /dev/null
@@ -0,0 +1,86 @@
+  <counter_set name="ARMv7_Cortex_A17_cnt" count="6"/>
+  <category name="Cortex-A17" counter_set="ARMv7_Cortex_A17_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+    <event counter="ARMv7_Cortex_A17_ccnt" event="0xff" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" average_cores="yes" description="The number of core clock cycles"/>
+    <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+    <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+    <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+    <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+    <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+    <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
+    <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+    <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+    <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+    <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+    <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+    <event event="0x13" title="Memory" name="Memory access" description="Data memory access"/>
+    <event event="0x14" title="Cache" name="L1 inst access" description="Instruction cache access"/>
+    <event event="0x15" title="Cache" name="L1 data write" description="Level 1 data cache Write-Back"/>
+    <event event="0x16" title="Cache" name="L2 data access" description="Level 2 data cache access"/>
+    <event event="0x17" title="Cache" name="L2 data refill" description="Level 2 data cache refill"/>
+    <event event="0x18" title="Cache" name="L2 data write" description="Level 2 data cache Write-Back"/>
+    <event event="0x19" title="Bus" name="Access" description="Bus - Access"/>
+    <event event="0x1b" title="Instruction" name="Speculative" description="Instruction speculatively executed"/>
+    <event event="0x1c" title="Memory" name="Translation table" description="Write to translation table base architecturally executed"/>
+    <event event="0x1d" title="Bus" name="Cycle" description="Bus - Cycle"/>
+    <event event="0x40" title="Cache" name="L1 data read" description="Level 1 data cache access - Read"/>
+    <event event="0x41" title="Cache" name="L1 data access write" description="Level 1 data cache access - Write"/>
+    <event event="0x50" title="Cache" name="L2 data read" description="Level 2 data cache access - Read"/>
+    <event event="0x51" title="Cache" name="L2 data access write" description="Level 2 data cache access - Write"/>
+    <event event="0x56" title="Cache" name="L2 data victim" description="Level 2 data cache Write-Back - Victim"/>
+    <event event="0x57" title="Cache" name="L2 data clean" description="Level 2 data cache Write-Back - Cleaning and coherency"/>
+    <event event="0x58" title="Cache" name="L2 data invalidate" description="Level 2 data cache invalidate"/>
+    <event event="0x60" title="Bus" name="Read" description="Bus access - Read"/>
+    <event event="0x62" title="Bus" name="Access shared" description="Bus access - Normal"/>
+    <event event="0x63" title="Bus" name="Access not shared" description="Bus access - Not normal"/>
+    <event event="0x64" title="Bus" name="Access normal" description="Bus access - Normal"/>
+    <event event="0x65" title="Bus" name="Peripheral" description="Bus access - Peripheral"/>
+    <event event="0x66" title="Memory" name="Read" description="Data memory access - Read"/>
+    <event event="0x67" title="Memory" name="Write" description="Data memory access - Write"/>
+    <event event="0x68" title="Memory" name="Unaligned Read" description="Unaligned access - Read"/>
+    <event event="0x69" title="Memory" name="Unaligned Write" description="Unaligned access - Write"/>
+    <event event="0x6a" title="Memory" name="Unaligned" description="Unaligned access"/>
+    <event event="0x6c" title="Intrinsic" name="LDREX" description="Exclusive instruction speculatively executed - LDREX"/>
+    <event event="0x6e" title="Intrinsic" name="STREX fail" description="Exclusive instruction speculatively executed - STREX fail"/>
+    <event event="0x6f" title="Intrinsic" name="STREX" description="Exclusive instruction speculatively executed - STREX"/>
+    <event event="0x70" title="Instruction" name="Load" description="Instruction speculatively executed - Load"/>
+    <event event="0x71" title="Instruction" name="Store" description="Instruction speculatively executed - Store"/>
+    <event event="0x72" title="Instruction" name="Load/Store" description="Instruction speculatively executed - Load or store"/>
+    <event event="0x73" title="Instruction" name="Integer" description="Instruction speculatively executed - Integer data processing"/>
+    <event event="0x74" title="Instruction" name="Advanced SIMD" description="Instruction speculatively executed - Advanced SIMD"/>
+    <event event="0x75" title="Instruction" name="VFP" description="Instruction speculatively executed - VFP"/>
+    <event event="0x76" title="Instruction" name="Software change" description="Instruction speculatively executed - Software change of the PC"/>
+    <event event="0x78" title="Instruction" name="Immediate branch" description="Branch speculatively executed - Immediate branch"/>
+    <event event="0x79" title="Instruction" name="Procedure return" description="Branch speculatively executed - Procedure return"/>
+    <event event="0x7a" title="Instruction" name="Indirect branch" description="Branch speculatively executed - Indirect branch"/>
+    <event event="0x7c" title="Instruction" name="ISB" description="Barrier speculatively executed - ISB"/>
+    <event event="0x7d" title="Instruction" name="DSB" description="Barrier speculatively executed - DSB"/>
+    <event event="0x7e" title="Instruction" name="DMB" description="Barrier speculatively executed - DMB"/>
+    <event event="0x81" title="Exception" name="Undefined" description="Exception taken, other synchronous"/>
+    <event event="0x8a" title="Exception" name="Hypervisor call" description="Exception taken, Hypervisor Call"/>
+    <event event="0xc0" title="Instruction" name="Stalled Linefill" description="Instruction side stalled due to a Linefill"/>
+    <event event="0xc1" title="Instruction" name="Stalled Page Table Walk" description="Instruction Side stalled due to a Page Table Walk"/>
+    <event event="0xc2" title="Cache" name="4 Ways Read" description="Number of set of 4 ways read in the instruction cache - Tag RAM"/>
+    <event event="0xc3" title="Cache" name="Ways Read" description="Number of ways read in the instruction cache - Data RAM"/>
+    <event event="0xc4" title="Cache" name="BATC Read" description="Number of ways read in the instruction BTAC RAM"/>
+    <event event="0xca" title="Memory" name="Snoop" description="Data snooped from other processor. This event counts memory-read operations that read data from another processor within the local Cortex-A17 cluster, rather than accessing the L2 cache or issuing an external read. It increments on each transaction, rather than on each beat of data"/>
+    <event event="0xd3" title="Slots" name="Load-Store Unit" description="Duration during which all slots in the Load-Store Unit are busy"/>
+    <event event="0xd8" title="Slots" name="Load-Store Issue Queue" description="Duration during which all slots in the Load-Store Issue queue are busy"/>
+    <event event="0xd9" title="Slots" name="Data Processing Issue Queue" description="Duration during which all slots in the Data Processing issue queue are busy"/>
+    <event event="0xda" title="Slots" name="Data Engine Issue Queue" description="Duration during which all slots in the Data Engine issue queue are busy"/>
+    <event event="0xdb" title="NEON" name="Flush" description="Number of NEON instruction which fail their condition code and lead to a flush of the DE pipe"/>
+    <event event="0xdc" title="Hypervisor" name="Traps" description="Number of Trap to hypervisor"/>
+    <event event="0xde" title="PTM" name="EXTOUT 0" description="PTM EXTOUT 0"/>
+    <event event="0xdf" title="PTM" name="EXTOUT 1" description="PTM EXTOUT 1"/>
+    <event event="0xe0" title="MMU" name="Table Walk" description="Duration during which the MMU handle a Page table walk"/>
+    <event event="0xe1" title="MMU" name="Stage1 Table Walk" description="Duration during which the MMU handle a Stage1 Page table walk"/>
+    <event event="0xe2" title="MMU" name="Stage2 Table Walk" description="Duration during which the MMU handle a Stage2 Page table walk"/>
+    <event event="0xe3" title="MMU" name="LSU Table Walk" description="Duration during which the MMU handle a Page table walk requested by the Load Store Unit"/>
+    <event event="0xe4" title="MMU" name="Instruction Table Walk" description="Duration during which the MMU handle a Page table walk requested by the Instruction side"/>
+    <event event="0xe5" title="MMU" name="Preload Table Walk" description="Duration during which the MMU handle a Page table walk requested by a Preload instruction or Prefetch request"/>
+    <event event="0xe6" title="MMU" name="cp15 Table Walk" description="Duration during which the MMU handle a Page table walk requested by a cp15 operation (maintenance by MVA and VA-to-PA operation)"/>
+    <event event="0xe7" title="Cache" name="L1 PLD TLB refill" description="Level 1 PLD TLB refill"/>
+    <event event="0xe8" title="Cache" name="L1 CP15 TLB refill" description="Level 1 CP15 TLB refill"/>
+    <event event="0xe9" title="Cache" name="L1 TLB flush" description="Level 1 TLB flush"/>
+    <event event="0xea" title="Cache" name="L2 TLB access" description="Level 2 TLB access"/>
+    <event event="0xeb" title="Cache" name="L2 TLB miss" description="Level 2 TLB miss"/>
+  </category>
index 577dcd94185e32777c8687504a669239b66b7ebe..5ba17907d5ab81cdb98d16f8667aecbe5c670d0e 100644 (file)
   <counter_set name="ARM_Cortex-A53_cnt" count="6"/>
   <category name="Cortex-A53" counter_set="ARM_Cortex-A53_cnt" per_cpu="yes" supports_event_based_sampling="yes">
-    <!-- 0x11 CPU_CYCLES - Cycle -->
     <event counter="ARM_Cortex-A53_ccnt" event="0x11" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" average_cores="yes" description="The number of core clock cycles"/>
-    <!-- 0x00 SW_INCR - Instruction architecturally executed (condition check pass) - Software increment -->
     <event event="0x00" title="Software" name="Increment" description="Incremented only on writes to the Software Increment Register"/>
-    <!-- 0x01 L1I_CACHE_REFILL - Level 1 instruction cache refill -->
     <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
-    <!-- 0x02 L1I_TLB_REFILL - Level 1 instruction TLB refill -->
     <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
-    <!-- 0x03 L1D_CACHE_REFILL - Level 1 data cache refill -->
     <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
-    <!-- 0x04 L1D_CACHE - Level 1 data cache access -->
     <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
-    <!-- 0x05 L1D_TLB_REFILL - Level 1 data TLB refill -->
     <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
-    <!-- 0x08 INST_RETIRED - Instruction architecturally executed -->
     <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
-    <!-- 0x09 EXC_TAKEN - Exception taken -->
     <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
-    <!-- 0x0A EXC_RETURN - Instruction architecturally executed (condition check pass) - Exception return -->
     <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
-    <!-- 0x0B CID_WRITE_RETIRED - Instruction architecturally executed (condition check pass) - Write to CONTEXTIDR -->
     <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
-    <!-- 0x10 BR_MIS_PRED - Mispredicted or not predicted branch speculatively executed -->
     <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
-    <!-- 0x12 BR_PRED - Predictable branch speculatively executed -->
     <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
-    <!-- 0x13 MEM_ACCESS - Data memory access -->
     <event event="0x13" title="Memory" name="Memory access" description="Data memory access"/>
-    <!-- 0x14 L1I_CACHE - Level 1 instruction cache access -->
     <event event="0x14" title="Cache" name="L1 inst access" description="Level 1 instruction cache access"/>
-    <!-- 0x15 L1D_CACHE_WB - Level 1 data cache Write-Back -->
     <event event="0x15" title="Cache" name="L1 data write" description="Level 1 data cache Write-Back"/>
-    <!-- 0x16 L2D_CACHE - Level 2 data cache access -->
     <event event="0x16" title="Cache" name="L2 data access" description="Level 2 data cache access"/>
-    <!-- 0x17 L2D_CACHE_REFILL - Level 2 data cache refill -->
     <event event="0x17" title="Cache" name="L2 data refill" description="Level 2 data cache refill"/>
-    <!-- 0x18 L2D_CACHE_WB - Level 2 data cache Write-Back -->
     <event event="0x18" title="Cache" name="L2 data write" description="Level 2 data cache Write-Back"/>
-    <!-- 0x19 BUS_ACCESS - Bus access -->
     <event event="0x19" title="Bus" name="Access" description="Bus access"/>
-    <!-- 0x1A MEMORY_ERROR - Local memory error -->
     <event event="0x1A" title="Memory" name="Error" description="Local memory error"/>
-    <!-- 0x1B INST_SPEC - Operation speculatively executed -->
     <event event="0x1B" title="Instruction" name="Speculative" description="Operation speculatively executed"/>
-    <!-- 0x1C TTBR_WRITE_RETIRED - Instruction architecturally executed (condition check pass) - Write to translation table base -->
     <event event="0x1C" title="Memory" name="Translation table" description="Instruction architecturally executed (condition check pass) - Write to translation table base"/>
-    <!-- 0x1D BUS_CYCLES - Bus cycle -->
     <event event="0x1D" title="Bus" name="Cycle" description="Bus cycle"/>
-    <!-- 0x1E CHAIN - Odd performance counter chain mode -->
     <event event="0x1E" title="Counter chain" name="Odd Performance" description="Odd performance counter chain mode"/>
-    <!-- 0x40 L1D_CACHE_LD - Level 1 data cache access - Read -->
     <event event="0x40" title="Cache" name="L1 data read" description="Level 1 data cache access - Read"/>
-    <!-- 0x41 L1D_CACHE_ST - Level 1 data cache access - Write -->
     <event event="0x41" title="Cache" name="L1 data access write" description="Level 1 data cache access - Write"/>
-    <!-- 0x42 L1D_CACHE_REFILL_LD - Level 1 data cache refill - Read -->
     <event event="0x42" title="Cache" name="L1 data refill read" description="Level 1 data cache refill - Read"/>
-    <!-- 0x43 L1D_CACHE_REFILL_ST - Level 1 data cache refill - Write -->
     <event event="0x43" title="Cache" name="L1 data refill write" description="Level 1 data cache refill - Write"/>
-    <!-- 0x46 L1D_CACHE_WB_VICTIM - Level 1 data cache Write-back - Victim -->
     <event event="0x46" title="Cache" name="L1 data victim" description="Level 1 data cache Write-back - Victim"/>
-    <!-- 0x47 L1D_CACHE_WB_CLEAN - Level 1 data cache Write-back - Cleaning and coherency -->
     <event event="0x47" title="Cache" name="L1 data clean" description="Level 1 data cache Write-back - Cleaning and coherency"/>
-    <!-- 0x48 L1D_CACHE_INVAL - Level 1 data cache invalidate -->
     <event event="0x48" title="Cache" name="L1 data invalidate" description="Level 1 data cache invalidate"/>
-    <!-- 0x4C L1D_TLB_REFILL_LD - Level 1 data TLB refill - Read -->
     <event event="0x4C" title="Cache" name="L1 data refill read" description="Level 1 data TLB refill - Read"/>
-    <!-- 0x4D L1D_TLB_REFILL_ST - Level 1 data TLB refill - Write -->
     <event event="0x4D" title="Cache" name="L1 data refill write" description="Level 1 data TLB refill - Write"/>
-    <!-- 0x50 L2D_CACHE_LD - Level 2 data cache access - Read -->
     <event event="0x50" title="Cache" name="L2 data read" description="Level 2 data cache access - Read"/>
-    <!-- 0x51 L2D_CACHE_ST - Level 2 data cache access - Write -->
     <event event="0x51" title="Cache" name="L2 data access write" description="Level 2 data cache access - Write"/>
-    <!-- 0x52 L2D_CACHE_REFILL_LD - Level 2 data cache refill - Read -->
     <event event="0x52" title="Cache" name="L2 data refill read" description="Level 2 data cache refill - Read"/>
-    <!-- 0x53 L2D_CACHE_REFILL_ST - Level 2 data cache refill - Write -->
     <event event="0x53" title="Cache" name="L2 data refill write" description="Level 2 data cache refill - Write"/>
-    <!-- 0x56 L2D_CACHE_WB_VICTIM - Level 2 data cache Write-back - Victim -->
     <event event="0x56" title="Cache" name="L2 data victim" description="Level 2 data cache Write-back - Victim"/>
-    <!-- 0x57 L2D_CACHE_WB_CLEAN - Level 2 data cache Write-back - Cleaning and coherency -->
     <event event="0x57" title="Cache" name="L2 data clean" description="Level 2 data cache Write-back - Cleaning and coherency"/>
-    <!-- 0x58 L2D_CACHE_INVAL - Level 2 data cache invalidate -->
     <event event="0x58" title="Cache" name="L2 data invalidate" description="Level 2 data cache invalidate"/>
-    <!-- 0x60 BUS_ACCESS_LD - Bus access - Read -->
     <event event="0x60" title="Bus" name="Read" description="Bus access - Read"/>
-    <!-- 0x61 BUS_ACCESS_ST - Bus access - Write -->
     <event event="0x61" title="Bus" name="Write" description="Bus access - Write"/>
-    <!-- 0x62 BUS_ACCESS_SHARED - Bus access - Normal -->
     <event event="0x62" title="Bus" name="Access shared" description="Bus access - Normal"/>
-    <!-- 0x63 BUS_ACCESS_NOT_SHARED - Bus access - Not normal -->
     <event event="0x63" title="Bus" name="Access not shared" description="Bus access - Not normal"/>
-    <!-- 0x64 BUS_ACCESS_NORMAL - Bus access - Normal -->
     <event event="0x64" title="Bus" name="Access normal" description="Bus access - Normal"/>
-    <!-- 0x65 BUS_ACCESS_PERIPH - Bus access - Peripheral -->
     <event event="0x65" title="Bus" name="Peripheral" description="Bus access - Peripheral"/>
-    <!-- 0x66 MEM_ACCESS_LD - Data memory access - Read -->
     <event event="0x66" title="Memory" name="Read" description="Data memory access - Read"/>
-    <!-- 0x67 MEM_ACCESS_ST - Data memory access - Write -->
     <event event="0x67" title="Memory" name="Write" description="Data memory access - Write"/>
-    <!-- 0x68 UNALIGNED_LD_SPEC - Unaligned access - Read -->
     <event event="0x68" title="Memory" name="Unaligned Read" description="Unaligned access - Read"/>
-    <!-- 0x69 UNALIGNED_ST_SPEC - Unaligned access - Write -->
     <event event="0x69" title="Memory" name="Unaligned Write" description="Unaligned access - Write"/>
-    <!-- 0x6A UNALIGNED_LDST_SPEC - Unaligned access -->
     <event event="0x6A" title="Memory" name="Unaligned" description="Unaligned access"/>
-    <!-- 0x6C LDREX_SPEC - Exclusive operation speculatively executed - LDREX -->
     <event event="0x6C" title="Intrinsic" name="LDREX" description="Exclusive operation speculatively executed - LDREX"/>
-    <!-- 0x6D STREX_PASS_SPEC - Exclusive instruction speculatively executed - STREX pass -->
     <event event="0x6D" title="Intrinsic" name="STREX pass" description="Exclusive instruction speculatively executed - STREX pass"/>
-    <!-- 0x6E STREX_FAIL_SPEC - Exclusive operation speculatively executed - STREX fail -->
     <event event="0x6E" title="Intrinsic" name="STREX fail" description="Exclusive operation speculatively executed - STREX fail"/>
-    <!-- 0x70 LD_SPEC - Operation speculatively executed - Load -->
     <event event="0x70" title="Instruction" name="Load" description="Operation speculatively executed - Load"/>
-    <!-- 0x71 ST_SPEC - Operation speculatively executed - Store -->
     <event event="0x71" title="Instruction" name="Store" description="Operation speculatively executed - Store"/>
-    <!-- 0x72 LDST_SPEC - Operation speculatively executed - Load or store -->
     <event event="0x72" title="Instruction" name="Load/Store" description="Operation speculatively executed - Load or store"/>
-    <!-- 0x73 DP_SPEC - Operation speculatively executed - Integer data processing -->
     <event event="0x73" title="Instruction" name="Integer" description="Operation speculatively executed - Integer data processing"/>
-    <!-- 0x74 ASE_SPEC - Operation speculatively executed - Advanced SIMD -->
     <event event="0x74" title="Instruction" name="Advanced SIMD" description="Operation speculatively executed - Advanced SIMD"/>
-    <!-- 0x75 VFP_SPEC - Operation speculatively executed - VFP -->
     <event event="0x75" title="Instruction" name="VFP" description="Operation speculatively executed - VFP"/>
-    <!-- 0x76 PC_WRITE_SPEC - Operation speculatively executed - Software change of the PC -->
     <event event="0x76" title="Instruction" name="Software change" description="Operation speculatively executed - Software change of the PC"/>
-    <!-- 0x77 CRYPTO_SPEC - Operation speculatively executed, crypto data processing -->
     <event event="0x77" title="Instruction" name="Crypto" description="Operation speculatively executed, crypto data processing"/>
-    <!-- 0x78 BR_IMMED_SPEC - Branch speculatively executed - Immediate branch -->
     <event event="0x78" title="Instruction" name="Immediate branch" description="Branch speculatively executed - Immediate branch"/>
-    <!-- 0x79 BR_RETURN_SPEC - Branch speculatively executed - Procedure return -->
     <event event="0x79" title="Instruction" name="Procedure return" description="Branch speculatively executed - Procedure return"/>
-    <!-- 0x7A BR_INDIRECT_SPEC - Branch speculatively executed - Indirect branch -->
     <event event="0x7A" title="Instruction" name="Indirect branch" description="Branch speculatively executed - Indirect branch"/>
-    <!-- 0x7C ISB_SPEC - Barrier speculatively executed - ISB -->
     <event event="0x7C" title="Instruction" name="ISB" description="Barrier speculatively executed - ISB"/>
-    <!-- 0x7D DSB_SPEC - Barrier speculatively executed - DSB -->
     <event event="0x7D" title="Instruction" name="DSB" description="Barrier speculatively executed - DSB"/>
-    <!-- 0x7E DMB_SPEC - Barrier speculatively executed - DMB -->
     <event event="0x7E" title="Instruction" name="DMB" description="Barrier speculatively executed - DMB"/>
-    <!-- 0x81 EXC_UNDEF - Exception taken, other synchronous -->
     <event event="0x81" title="Exception" name="Undefined" description="Exception taken, other synchronous"/>
-    <!-- 0x82 EXC_SVC - Exception taken, Supervisor Call -->
     <event event="0x82" title="Exception" name="Supervisor" description="Exception taken, Supervisor Call"/>
-    <!-- 0x83 EXC_PABORT - Exception taken, Instruction Abort -->
     <event event="0x83" title="Exception" name="Instruction abort" description="Exception taken, Instruction Abort"/>
-    <!-- 0x84 EXC_DABORT - Exception taken, Data Abort or SError -->
     <event event="0x84" title="Exception" name="Data abort" description="Exception taken, Data Abort or SError"/>
-    <!-- 0x86 EXC_IRQ - Exception taken, IRQ -->
     <event event="0x86" title="Interrupts" name="IRQ" description="Exception taken, IRQ"/>
-    <!-- 0x87 EXC_FIQ - Exception taken, FIQ -->
     <event event="0x87" title="Interrupts" name="FIQ" description="Exception taken, FIQ"/>
-    <!-- 0x88 EXC_SMC - Exception taken, Secure Monitor Call -->
     <event event="0x88" title="Exception" name="Secure monitor call" description="Exception taken, Secure Monitor Call"/>
-    <!-- 0x8A EXC_HVC - Exception taken, Hypervisor Call -->
     <event event="0x8A" title="Exception" name="Hypervisor call" description="Exception taken, Hypervisor Call"/>
-    <!-- 0x8B EXC_TRAP_PABORT - Exception taken, Instruction Abort not taken locally -->
     <event event="0x8B" title="Exception" name="Instruction abort non-local" description="Exception taken, Instruction Abort not taken locally"/>
-    <!-- 0x8C EXC_TRAP_DABORT - Exception taken, Data Abort or SError not taken locally -->
     <event event="0x8C" title="Exception" name="Data abort non-local" description="Exception taken, Data Abort or SError not taken locally"/>
-    <!-- 0x8D EXC_TRAP_OTHER - Exception taken - Other traps not taken locally -->
     <event event="0x8D" title="Exception" name="Other non-local" description="Exception taken - Other traps not taken locally"/>
-    <!-- 0x8E EXC_TRAP_IRQ - Exception taken, IRQ not taken locally -->
     <event event="0x8E" title="Exception" name="IRQ non-local" description="Exception taken, IRQ not taken locally"/>
-    <!-- 0x8F EXC_TRAP_FIQ - Exception taken, FIQ not taken locally -->
     <event event="0x8F" title="Exception" name="FIQ non-local" description="Exception taken, FIQ not taken locally"/>
-    <!-- 0x90 RC_LD_SPEC - Release consistency instruction speculatively executed - Load Acquire -->
     <event event="0x90" title="Release Consistency" name="Load" description="Release consistency instruction speculatively executed - Load Acquire"/>
-    <!-- 0x91 RC_ST_SPEC - Release consistency instruction speculatively executed - Store Release -->
     <event event="0x91" title="Release Consistency" name="Store" description="Release consistency instruction speculatively executed - Store Release"/>
   </category>
index b7178c0c742701adf63f9d1cdafa2456a9ae3d51..fbe96c2d4eb2935f7cb05af6216ba62022350298 100644 (file)
   <counter_set name="ARM_Cortex-A57_cnt" count="6"/>
   <category name="Cortex-A57" counter_set="ARM_Cortex-A57_cnt" per_cpu="yes" supports_event_based_sampling="yes">
-    <!-- 0x11 CPU_CYCLES - Cycle -->
     <event counter="ARM_Cortex-A57_ccnt" event="0x11" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" average_cores="yes" description="The number of core clock cycles"/>
-    <!-- 0x00 SW_INCR - Instruction architecturally executed (condition check pass) - Software increment -->
     <event event="0x00" title="Software" name="Increment" description="Incremented only on writes to the Software Increment Register"/>
-    <!-- 0x01 L1I_CACHE_REFILL - Level 1 instruction cache refill -->
     <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
-    <!-- 0x02 L1I_TLB_REFILL - Level 1 instruction TLB refill -->
     <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
-    <!-- 0x03 L1D_CACHE_REFILL - Level 1 data cache refill -->
     <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
-    <!-- 0x04 L1D_CACHE - Level 1 data cache access -->
     <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
-    <!-- 0x05 L1D_TLB_REFILL - Level 1 data TLB refill -->
     <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
-    <!-- 0x08 INST_RETIRED - Instruction architecturally executed -->
     <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
-    <!-- 0x09 EXC_TAKEN - Exception taken -->
     <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
-    <!-- 0x0A EXC_RETURN - Instruction architecturally executed (condition check pass) - Exception return -->
     <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
-    <!-- 0x0B CID_WRITE_RETIRED - Instruction architecturally executed (condition check pass) - Write to CONTEXTIDR -->
     <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
-    <!-- 0x10 BR_MIS_PRED - Mispredicted or not predicted branch speculatively executed -->
     <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
-    <!-- 0x12 BR_PRED - Predictable branch speculatively executed -->
     <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
-    <!-- 0x13 MEM_ACCESS - Data memory access -->
     <event event="0x13" title="Memory" name="Memory access" description="Data memory access"/>
-    <!-- 0x14 L1I_CACHE - Level 1 instruction cache access -->
     <event event="0x14" title="Cache" name="L1 inst access" description="Level 1 instruction cache access"/>
-    <!-- 0x15 L1D_CACHE_WB - Level 1 data cache Write-Back -->
     <event event="0x15" title="Cache" name="L1 data write" description="Level 1 data cache Write-Back"/>
-    <!-- 0x16 L2D_CACHE - Level 2 data cache access -->
     <event event="0x16" title="Cache" name="L2 data access" description="Level 2 data cache access"/>
-    <!-- 0x17 L2D_CACHE_REFILL - Level 2 data cache refill -->
     <event event="0x17" title="Cache" name="L2 data refill" description="Level 2 data cache refill"/>
-    <!-- 0x18 L2D_CACHE_WB - Level 2 data cache Write-Back -->
     <event event="0x18" title="Cache" name="L2 data write" description="Level 2 data cache Write-Back"/>
-    <!-- 0x19 BUS_ACCESS - Bus access -->
     <event event="0x19" title="Bus" name="Access" description="Bus access"/>
-    <!-- 0x1A MEMORY_ERROR - Local memory error -->
     <event event="0x1A" title="Memory" name="Error" description="Local memory error"/>
-    <!-- 0x1B INST_SPEC - Operation speculatively executed -->
     <event event="0x1B" title="Instruction" name="Speculative" description="Operation speculatively executed"/>
-    <!-- 0x1C TTBR_WRITE_RETIRED - Instruction architecturally executed (condition check pass) - Write to translation table base -->
     <event event="0x1C" title="Memory" name="Translation table" description="Instruction architecturally executed (condition check pass) - Write to translation table base"/>
-    <!-- 0x1D BUS_CYCLES - Bus cycle -->
     <event event="0x1D" title="Bus" name="Cycle" description="Bus cycle"/>
-    <!-- 0x1E CHAIN - Odd performance counter chain mode -->
     <event event="0x1E" title="Counter chain" name="Odd Performance" description="Odd performance counter chain mode"/>
-    <!-- 0x40 L1D_CACHE_LD - Level 1 data cache access - Read -->
     <event event="0x40" title="Cache" name="L1 data read" description="Level 1 data cache access - Read"/>
-    <!-- 0x41 L1D_CACHE_ST - Level 1 data cache access - Write -->
     <event event="0x41" title="Cache" name="L1 data access write" description="Level 1 data cache access - Write"/>
-    <!-- 0x42 L1D_CACHE_REFILL_LD - Level 1 data cache refill - Read -->
     <event event="0x42" title="Cache" name="L1 data refill read" description="Level 1 data cache refill - Read"/>
-    <!-- 0x43 L1D_CACHE_REFILL_ST - Level 1 data cache refill - Write -->
     <event event="0x43" title="Cache" name="L1 data refill write" description="Level 1 data cache refill - Write"/>
-    <!-- 0x46 L1D_CACHE_WB_VICTIM - Level 1 data cache Write-back - Victim -->
     <event event="0x46" title="Cache" name="L1 data victim" description="Level 1 data cache Write-back - Victim"/>
-    <!-- 0x47 L1D_CACHE_WB_CLEAN - Level 1 data cache Write-back - Cleaning and coherency -->
     <event event="0x47" title="Cache" name="L1 data clean" description="Level 1 data cache Write-back - Cleaning and coherency"/>
-    <!-- 0x48 L1D_CACHE_INVAL - Level 1 data cache invalidate -->
     <event event="0x48" title="Cache" name="L1 data invalidate" description="Level 1 data cache invalidate"/>
-    <!-- 0x4C L1D_TLB_REFILL_LD - Level 1 data TLB refill - Read -->
     <event event="0x4C" title="Cache" name="L1 data refill read" description="Level 1 data TLB refill - Read"/>
-    <!-- 0x4D L1D_TLB_REFILL_ST - Level 1 data TLB refill - Write -->
     <event event="0x4D" title="Cache" name="L1 data refill write" description="Level 1 data TLB refill - Write"/>
-    <!-- 0x50 L2D_CACHE_LD - Level 2 data cache access - Read -->
     <event event="0x50" title="Cache" name="L2 data read" description="Level 2 data cache access - Read"/>
-    <!-- 0x51 L2D_CACHE_ST - Level 2 data cache access - Write -->
     <event event="0x51" title="Cache" name="L2 data access write" description="Level 2 data cache access - Write"/>
-    <!-- 0x52 L2D_CACHE_REFILL_LD - Level 2 data cache refill - Read -->
     <event event="0x52" title="Cache" name="L2 data refill read" description="Level 2 data cache refill - Read"/>
-    <!-- 0x53 L2D_CACHE_REFILL_ST - Level 2 data cache refill - Write -->
     <event event="0x53" title="Cache" name="L2 data refill write" description="Level 2 data cache refill - Write"/>
-    <!-- 0x56 L2D_CACHE_WB_VICTIM - Level 2 data cache Write-back - Victim -->
     <event event="0x56" title="Cache" name="L2 data victim" description="Level 2 data cache Write-back - Victim"/>
-    <!-- 0x57 L2D_CACHE_WB_CLEAN - Level 2 data cache Write-back - Cleaning and coherency -->
     <event event="0x57" title="Cache" name="L2 data clean" description="Level 2 data cache Write-back - Cleaning and coherency"/>
-    <!-- 0x58 L2D_CACHE_INVAL - Level 2 data cache invalidate -->
     <event event="0x58" title="Cache" name="L2 data invalidate" description="Level 2 data cache invalidate"/>
-    <!-- 0x60 BUS_ACCESS_LD - Bus access - Read -->
     <event event="0x60" title="Bus" name="Read" description="Bus access - Read"/>
-    <!-- 0x61 BUS_ACCESS_ST - Bus access - Write -->
     <event event="0x61" title="Bus" name="Write" description="Bus access - Write"/>
-    <!-- 0x62 BUS_ACCESS_SHARED - Bus access - Normal -->
     <event event="0x62" title="Bus" name="Access shared" description="Bus access - Normal"/>
-    <!-- 0x63 BUS_ACCESS_NOT_SHARED - Bus access - Not normal -->
     <event event="0x63" title="Bus" name="Access not shared" description="Bus access - Not normal"/>
-    <!-- 0x64 BUS_ACCESS_NORMAL - Bus access - Normal -->
     <event event="0x64" title="Bus" name="Access normal" description="Bus access - Normal"/>
-    <!-- 0x65 BUS_ACCESS_PERIPH - Bus access - Peripheral -->
     <event event="0x65" title="Bus" name="Peripheral" description="Bus access - Peripheral"/>
-    <!-- 0x66 MEM_ACCESS_LD - Data memory access - Read -->
     <event event="0x66" title="Memory" name="Read" description="Data memory access - Read"/>
-    <!-- 0x67 MEM_ACCESS_ST - Data memory access - Write -->
     <event event="0x67" title="Memory" name="Write" description="Data memory access - Write"/>
-    <!-- 0x68 UNALIGNED_LD_SPEC - Unaligned access - Read -->
     <event event="0x68" title="Memory" name="Unaligned Read" description="Unaligned access - Read"/>
-    <!-- 0x69 UNALIGNED_ST_SPEC - Unaligned access - Write -->
     <event event="0x69" title="Memory" name="Unaligned Write" description="Unaligned access - Write"/>
-    <!-- 0x6A UNALIGNED_LDST_SPEC - Unaligned access -->
     <event event="0x6A" title="Memory" name="Unaligned" description="Unaligned access"/>
-    <!-- 0x6C LDREX_SPEC - Exclusive operation speculatively executed - LDREX -->
     <event event="0x6C" title="Intrinsic" name="LDREX" description="Exclusive operation speculatively executed - LDREX"/>
-    <!-- 0x6D STREX_PASS_SPEC - Exclusive instruction speculatively executed - STREX pass -->
     <event event="0x6D" title="Intrinsic" name="STREX pass" description="Exclusive instruction speculatively executed - STREX pass"/>
-    <!-- 0x6E STREX_FAIL_SPEC - Exclusive operation speculatively executed - STREX fail -->
     <event event="0x6E" title="Intrinsic" name="STREX fail" description="Exclusive operation speculatively executed - STREX fail"/>
-    <!-- 0x70 LD_SPEC - Operation speculatively executed - Load -->
     <event event="0x70" title="Instruction" name="Load" description="Operation speculatively executed - Load"/>
-    <!-- 0x71 ST_SPEC - Operation speculatively executed - Store -->
     <event event="0x71" title="Instruction" name="Store" description="Operation speculatively executed - Store"/>
-    <!-- 0x72 LDST_SPEC - Operation speculatively executed - Load or store -->
     <event event="0x72" title="Instruction" name="Load/Store" description="Operation speculatively executed - Load or store"/>
-    <!-- 0x73 DP_SPEC - Operation speculatively executed - Integer data processing -->
     <event event="0x73" title="Instruction" name="Integer" description="Operation speculatively executed - Integer data processing"/>
-    <!-- 0x74 ASE_SPEC - Operation speculatively executed - Advanced SIMD -->
     <event event="0x74" title="Instruction" name="Advanced SIMD" description="Operation speculatively executed - Advanced SIMD"/>
-    <!-- 0x75 VFP_SPEC - Operation speculatively executed - VFP -->
     <event event="0x75" title="Instruction" name="VFP" description="Operation speculatively executed - VFP"/>
-    <!-- 0x76 PC_WRITE_SPEC - Operation speculatively executed - Software change of the PC -->
     <event event="0x76" title="Instruction" name="Software change" description="Operation speculatively executed - Software change of the PC"/>
-    <!-- 0x77 CRYPTO_SPEC - Operation speculatively executed, crypto data processing -->
     <event event="0x77" title="Instruction" name="Crypto" description="Operation speculatively executed, crypto data processing"/>
-    <!-- 0x78 BR_IMMED_SPEC - Branch speculatively executed - Immediate branch -->
     <event event="0x78" title="Instruction" name="Immediate branch" description="Branch speculatively executed - Immediate branch"/>
-    <!-- 0x79 BR_RETURN_SPEC - Branch speculatively executed - Procedure return -->
     <event event="0x79" title="Instruction" name="Procedure return" description="Branch speculatively executed - Procedure return"/>
-    <!-- 0x7A BR_INDIRECT_SPEC - Branch speculatively executed - Indirect branch -->
     <event event="0x7A" title="Instruction" name="Indirect branch" description="Branch speculatively executed - Indirect branch"/>
-    <!-- 0x7C ISB_SPEC - Barrier speculatively executed - ISB -->
     <event event="0x7C" title="Instruction" name="ISB" description="Barrier speculatively executed - ISB"/>
-    <!-- 0x7D DSB_SPEC - Barrier speculatively executed - DSB -->
     <event event="0x7D" title="Instruction" name="DSB" description="Barrier speculatively executed - DSB"/>
-    <!-- 0x7E DMB_SPEC - Barrier speculatively executed - DMB -->
     <event event="0x7E" title="Instruction" name="DMB" description="Barrier speculatively executed - DMB"/>
-    <!-- 0x81 EXC_UNDEF - Exception taken, other synchronous -->
     <event event="0x81" title="Exception" name="Undefined" description="Exception taken, other synchronous"/>
-    <!-- 0x82 EXC_SVC - Exception taken, Supervisor Call -->
     <event event="0x82" title="Exception" name="Supervisor" description="Exception taken, Supervisor Call"/>
-    <!-- 0x83 EXC_PABORT - Exception taken, Instruction Abort -->
     <event event="0x83" title="Exception" name="Instruction abort" description="Exception taken, Instruction Abort"/>
-    <!-- 0x84 EXC_DABORT - Exception taken, Data Abort or SError -->
     <event event="0x84" title="Exception" name="Data abort" description="Exception taken, Data Abort or SError"/>
-    <!-- 0x86 EXC_IRQ - Exception taken, IRQ -->
     <event event="0x86" title="Interrupts" name="IRQ" description="Exception taken, IRQ"/>
-    <!-- 0x87 EXC_FIQ - Exception taken, FIQ -->
     <event event="0x87" title="Interrupts" name="FIQ" description="Exception taken, FIQ"/>
-    <!-- 0x88 EXC_SMC - Exception taken, Secure Monitor Call -->
     <event event="0x88" title="Exception" name="Secure monitor call" description="Exception taken, Secure Monitor Call"/>
-    <!-- 0x8A EXC_HVC - Exception taken, Hypervisor Call -->
     <event event="0x8A" title="Exception" name="Hypervisor call" description="Exception taken, Hypervisor Call"/>
-    <!-- 0x8B EXC_TRAP_PABORT - Exception taken, Instruction Abort not taken locally -->
     <event event="0x8B" title="Exception" name="Instruction abort non-local" description="Exception taken, Instruction Abort not taken locally"/>
-    <!-- 0x8C EXC_TRAP_DABORT - Exception taken, Data Abort or SError not taken locally -->
     <event event="0x8C" title="Exception" name="Data abort non-local" description="Exception taken, Data Abort or SError not taken locally"/>
-    <!-- 0x8D EXC_TRAP_OTHER - Exception taken - Other traps not taken locally -->
     <event event="0x8D" title="Exception" name="Other non-local" description="Exception taken - Other traps not taken locally"/>
-    <!-- 0x8E EXC_TRAP_IRQ - Exception taken, IRQ not taken locally -->
     <event event="0x8E" title="Exception" name="IRQ non-local" description="Exception taken, IRQ not taken locally"/>
-    <!-- 0x8F EXC_TRAP_FIQ - Exception taken, FIQ not taken locally -->
     <event event="0x8F" title="Exception" name="FIQ non-local" description="Exception taken, FIQ not taken locally"/>
-    <!-- 0x90 RC_LD_SPEC - Release consistency instruction speculatively executed - Load Acquire -->
     <event event="0x90" title="Release Consistency" name="Load" description="Release consistency instruction speculatively executed - Load Acquire"/>
-    <!-- 0x91 RC_ST_SPEC - Release consistency instruction speculatively executed - Store Release -->
     <event event="0x91" title="Release Consistency" name="Store" description="Release consistency instruction speculatively executed - Store Release"/>
   </category>
diff --git a/tools/gator/daemon/events-Filesystem.xml b/tools/gator/daemon/events-Filesystem.xml
new file mode 100644 (file)
index 0000000..9ef61dd
--- /dev/null
@@ -0,0 +1,11 @@
+  <category name="Filesystem">
+    <!-- counter attribute must start with filesystem_ and be unique -->
+    <!-- regex item in () is the value shown -->
+    <!--
+    <event counter="filesystem_cpu1_online" path="/sys/devices/system/cpu/cpu1/online" title="online" name="cpu 1" class="absolute" description="If cpu 1 is online"/>
+    <event counter="filesystem_loginuid" path="/proc/self/loginuid" title="loginuid" name="loginuid" class="absolute" description="loginuid"/>
+    <event counter="filesystem_gatord_rss" path="/proc/self/stat" title="stat" name="rss" class="absolute" regex="-?[0-9]+ \(.*\) . -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ -?[0-9]+ (-?[0-9]+)" units="pages" description="resident set size"/>
+    <event counter="filesystem_processes" path="/proc/stat" title="proc-stat" name="processes" class="absolute" regex="processes ([0-9]+)" description="Number of processes and threads created"/>
+    <event counter="filesystem_context_switches" path="/proc/stat" title="proc-stat" name="context switches" class="absolute" regex="ctxt ([0-9]+)" description="Number of processes and threads created"/>
+    -->
+  </category>
index 4da4d1d6343142b09e01e605a79f6bd1a00a362c..923fb90334d042a29b828a033b93f33d537640f7 100644 (file)
@@ -1,18 +1,18 @@
   <counter_set name="L2C-310_cnt" count="2"/>
   <category name="L2C-310" counter_set="L2C-310_cnt" per_cpu="no">
-    <event event="0x1" title="L2 Cache" name="CO" description="Eviction, CastOUT, of a line from the L2 cache"/>
-    <event event="0x2" title="L2 Cache" name="DRH" description="Data read hit"/>
-    <event event="0x3" title="L2 Cache" name="DRREQ" description="Data read request"/>
-    <event event="0x4" title="L2 Cache" name="DWHIT" description="Data write hit"/>
-    <event event="0x5" title="L2 Cache" name="DWREQ" description="Data write request"/>
-    <event event="0x6" title="L2 Cache" name="DWTREQ" description="Data write request with write-through attribute"/>
-    <event event="0x7" title="L2 Cache" name="IRHIT" description="Instruction read hit"/>
-    <event event="0x8" title="L2 Cache" name="IRREQ" description="Instruction read request"/>
-    <event event="0x9" title="L2 Cache" name="WA" description="Write allocate"/>
-    <event event="0xa" title="L2 Cache" name="IPFALLOC" description="Allocation of a prefetch generated by L2C-310 into the L2 cache"/>
-    <event event="0xb" title="L2 Cache" name="EPFHIT" description="Prefetch hint hits in the L2 cache"/>
-    <event event="0xc" title="L2 Cache" name="EPFALLOC" description="Prefetch hint allocated into the L2 cache"/>
-    <event event="0xd" title="L2 Cache" name="SRRCVD" description="Speculative read received"/>
-    <event event="0xe" title="L2 Cache" name="SRCONF" description="Speculative read confirmed"/>
-    <event event="0xf" title="L2 Cache" name="EPFRCVD" description="Prefetch hint received"/>
+    <event event="0x1" title="L2 Cache" name="CastOUT" description="Eviction, CastOUT, of a line from the L2 cache"/>
+    <event event="0x2" title="L2 Cache" name="Data Read Hit" description="Data read hit in the L2 cache"/>
+    <event event="0x3" title="L2 Cache" name="Data Read Request" description="Data read lookup to the L2 cache. Subsequently results in a hit or miss"/>
+    <event event="0x4" title="L2 Cache" name="Data Write Hit" description="Data write hit in the L2 cache"/>
+    <event event="0x5" title="L2 Cache" name="Data Write Request" description="Data write lookup to the L2 cache. Subsequently results in a hit or miss"/>
+    <event event="0x6" title="L2 Cache" name="Data Write-Through Request" description="Data write lookup to the L2 cache with Write-Through attribute. Subsequently results in a hit or miss"/>
+    <event event="0x7" title="L2 Cache" name="Instruction Read Hit" description="Instruction read hit in the L2 cache"/>
+    <event event="0x8" title="L2 Cache" name="Instruction Read Request" description="Instruction read lookup to the L2 cache. Subsequently results in a hit or miss"/>
+    <event event="0x9" title="L2 Cache" name="Write Allocate Miss" description="Allocation into the L2 cache caused by a write, with Write-Allocate attribute, miss"/>
+    <event event="0xa" title="L2 Cache" name="Internal Prefetch Allocate" description="Allocation of a prefetch generated by L2C-310 into the L2 cache"/>
+    <event event="0xb" title="L2 Cache" name="Prefitch Hit" description="Prefetch hint hits in the L2 cache"/>
+    <event event="0xc" title="L2 Cache" name="Prefitch Allocate" description="Prefetch hint allocated into the L2 cache"/>
+    <event event="0xd" title="L2 Cache" name="Speculative Read Received" description="Speculative read received"/>
+    <event event="0xe" title="L2 Cache" name="Speculative Read Confirmed" description="Speculative read confirmed"/>
+    <event event="0xf" title="L2 Cache" name="Prefetch Hint Received" description="Prefetch hint received"/>
   </category>
index 4d677e15db7e8546c0db6b4052c77575e5931f7c..62a7018d038f733ee9e67a56828318ca10a59815 100644 (file)
@@ -1,17 +1,17 @@
   <category name="Linux">
     <event counter="Linux_irq_softirq" title="Interrupts" name="SoftIRQ" per_cpu="yes" description="Linux SoftIRQ taken"/>
     <event counter="Linux_irq_irq" title="Interrupts" name="IRQ" per_cpu="yes" description="Linux IRQ taken"/>
-    <event counter="Linux_block_rq_wr" title="Disk IO" name="Write" units="B" description="Disk IO Bytes Written"/>
-    <event counter="Linux_block_rq_rd" title="Disk IO" name="Read" units="B" description="Disk IO Bytes Read"/>
+    <event counter="Linux_block_rq_wr" title="Disk I/O" name="Write" units="B" description="Disk I/O Bytes Written"/>
+    <event counter="Linux_block_rq_rd" title="Disk I/O" name="Read" units="B" description="Disk I/O Bytes Read"/>
     <event counter="Linux_net_rx" title="Network" name="Receive" units="B" description="Receive network traffic, including effect from Streamline"/>
     <event counter="Linux_net_tx" title="Network" name="Transmit" units="B" description="Transmit network traffic, including effect from Streamline"/>
     <event counter="Linux_sched_switch" title="Scheduler" name="Switch" per_cpu="yes" description="Context switch events"/>
     <event counter="Linux_meminfo_memused" title="Memory" name="Used" class="absolute" units="B" proc="yes" description="Total used memory size. Note: a process' used memory includes shared memory that may be counted more than once (equivalent to RES from top). Kernel threads are not filterable."/>
+    <event counter="Linux_meminfo_memused2" title="Memory" name="Used" class="absolute" units="B" description="Total used memory size"/>
     <event counter="Linux_meminfo_memfree" title="Memory" name="Free" class="absolute" display="minimum" units="B" description="Available memory size"/>
     <event counter="Linux_meminfo_bufferram" title="Memory" name="Buffer" class="absolute" units="B" description="Memory used by OS disk buffers"/>
     <event counter="Linux_power_cpu_freq" title="Clock" name="Frequency" per_cpu="yes" class="absolute" units="Hz" series_composition="overlay" average_cores="yes" description="Frequency setting of the CPU"/>
-    <event counter="Linux_power_cpu_idle" title="Idle" name="State" per_cpu="yes" class="absolute" description="CPU Idle State + 1, set the Sample Rate to None to prevent the hrtimer from interrupting the system"/>
-    <event counter="Linux_cpu_wait_contention" title="CPU Contention" name="Wait" per_cpu="no" class="activity" derived="yes" rendering_type="bar" average_selection="yes" percentage="yes" modifier="10000" description="Thread waiting on contended resource"/>
-    <event counter="Linux_cpu_wait_io" title="CPU I/O" name="Wait" per_cpu="no" class="activity" derived="yes" rendering_type="bar" average_selection="yes" percentage="yes" modifier="10000" description="Thread waiting on I/O resource"/>
+    <event counter="Linux_cpu_wait_contention" title="CPU Contention" name="Wait" per_cpu="no" class="activity" derived="yes" rendering_type="bar" average_selection="yes" percentage="yes" modifier="10000" color="0x003c96fb" description="One or more threads are runnable but waiting due to CPU contention"/>
+    <event counter="Linux_cpu_wait_io" title="CPU I/O" name="Wait" per_cpu="no" class="activity" derived="yes" rendering_type="bar" average_selection="yes" percentage="yes" modifier="10000" color="0x00b30000" description="One or more threads are blocked on an I/O resource"/>
+    <event counter="Linux_power_cpu" title="CPU Status" name="Activity" class="activity" activity1="Off" activity_color1="0x0000ff00" activity2="WFI" activity_color2="0x000000ff" rendering_type="bar" average_selection="yes" average_cores="yes" percentage="yes" description="CPU Status"/>
   </category>
-
index 5a71386830ba1ff627ca30f3051826c54c79e3e2..0a95dfeb64855910097c3c186a02fcd71e7d166b 100644 (file)
@@ -1,34 +1,33 @@
   <counter_set name="ARM_Mali-4xx_VP_0_cnt" count="2"/>
   <counter_set name="ARM_Mali-4xx_SW_cnt" count="0"/>
-  <counter_set name="ARM_Mali-4xx_Filmstrip_cnt" count="1"/>
-  <category name="Mali-4xx-VP" counter_set="ARM_Mali-4xx_VP_0_cnt" per_cpu="no">
-    <event event="0x01" title="Mali GPU Vertex Processor" name="Active cycles" description="Number of cycles per frame the MaliGP2 was active."/>
-    <event event="0x02" title="Mali GPU Vertex Processor" name="Active cycles, vertex shader" description="Number of cycles per frame the vertex shader unit was active."/>
-    <event event="0x03" title="Mali GPU Vertex Processor" name="Active cycles, vertex storer" description="Number of cycles per frame the vertex storer unit was active."/>
-    <event event="0x04" title="Mali GPU Vertex Processor" name="Active cycles, vertex loader" description="Number of cycles per frame the vertex loader unit was active."/>
-    <event event="0x05" title="Mali GPU Vertex Processor" name="Cycles vertex loader waiting for vertex shader" description="Number of cycles per frame the vertex loader was idle while waiting on the vertex shader."/>
-    <event event="0x06" title="Mali GPU Vertex Processor" name="Words read, system bus" description="Total number of 64 bit words read by the GP2 from the system bus per frame."/>
-    <event event="0x07" title="Mali GPU Vertex Processor" name="Words written, system bus" description="Total number of 64 bit words written by the GP2 to the system bus per frame."/>
-    <event event="0x08" title="Mali GPU Vertex Processor" name="Read bursts, system bus" description="Number of read bursts by the GP2 from the system bus per frame."/>
-    <event event="0x09" title="Mali GPU Vertex Processor" name="Write bursts, system bus" description="Number of write bursts from the MaliGP2 to the system bus per frame."/>
-    <event event="0x0a" title="Mali GPU Vertex Processor" name="Vertices processed" description="Number of vertices processed by the MaliGP2 per frame."/>
-    <event event="0x0b" title="Mali GPU Vertex Processor" name="Vertices fetched" description="Number of vertices fetched by the MaliGP2 per frame."/>
-    <event event="0x0c" title="Mali GPU Vertex Processor" name="Primitives fetched" description="Number of graphics primitives fetched by the MaliGP2 per frame."/>
-    <event event="0x0e" title="Mali GPU Vertex Processor" name="Primitives culled" description="Number of graphics primitives discarded per frame, because they were seen from the back or were offscreen."/>
-    <event event="0x0f" title="Mali GPU Vertex Processor" name="Commands written to tiles" description="Number of commands (8 Bytes, mainly primitives) written by GP2 to the PP input data structure per frame."/>
-    <event event="0x10" title="Mali GPU Vertex Processor" name="Memory blocks allocated" description="Number of overflow data blocks needed for outputting the PP input data structure per frame ."/>
-    <event event="0x13" title="Mali GPU Vertex Processor" name="Vertex loader cache misses" description="Number of cache misses for the vertex shader's vertex input unit per frame."/>
-    <event event="0x16" title="Mali GPU Vertex Processor" name="Active cycles, vertex shader command processor" description="Number of cycles per frame the GP2 vertex shader command processor was active. This includes time waiting for semaphores."/>
-    <event event="0x17" title="Mali GPU Vertex Processor" name="Active cycles, PLBU command processor" description="Number of cycles per frame the MaliGP2 PLBU command processor was active. This includes time waiting for semaphores."/>
-    <event event="0x18" title="Mali GPU Vertex Processor" name="MaliGP2 PLBU cycles per frame" description="Number of cycles per frame the MaliGP2 PLBU output unit was active. This includes time spent waiting on the bus."/>
-    <event event="0x19" title="Mali GPU Vertex Processor" name="Active cycles, PLBU geometry processing" description="Number of cycles per frame the MaliGP2 PLBU was active, excepting final data output. In other words: active cycles through the prepare list commands. This includes time spent waiting on the bus."/>
-    <event event="0x1b" title="Mali GPU Vertex Processor" name="Active cycles, PLBU primitive assembly" description="Number of active cycles per frame spent by the MaliGP2 PLBU doing primitive assembly. This does not include scissoring or final output. This includes time spent waiting on the bus."/>
-    <event event="0x1c" title="Mali GPU Vertex Processor" name="Active cycles, PLBU vertex fetcher" description="Number of active cycles per frame spent by the MaliGP2 PLBU fetching vertex data. This includes time spent waiting on the bus."/>
-    <event event="0x1e" title="Mali GPU Vertex Processor" name="Active cycles, Bounding-box and command generator" description="Number of active cycles per frame spent by the MaliGP2 PLBU setting up bounding boxes and commands (mainly graphics primitives). This includes time spent waiting on the bus."/>
-    <event event="0x20" title="Mali GPU Vertex Processor" name="Active cycles, Scissor tile iterator" description="Number of active cycles per frame spent by the MaliGP2 PLBU iterating over tiles to perform scissoring. This includes time spent waiting on the bus."/>
-    <event event="0x21" title="Mali GPU Vertex Processor" name="Active cycles, PLBU tile iterator" description="Number of active cycles per frame spent by the MaliGP2 PLBU iterating over the tiles in the bounding box generating commands (mainly graphics primitives). This includes time spent waiting on the bus."/>
+  <category name="Mali Vertex Processor" counter_set="ARM_Mali-4xx_VP_0_cnt" per_cpu="no">
+    <event event="0x01" title="Mali-4xx VP" name="Active cycles" description="Number of cycles per frame the MaliGP2 was active."/>
+    <event event="0x02" title="Mali-4xx VP" name="Active cycles, vertex shader" description="Number of cycles per frame the vertex shader unit was active."/>
+    <event event="0x03" title="Mali-4xx VP" name="Active cycles, vertex storer" description="Number of cycles per frame the vertex storer unit was active."/>
+    <event event="0x04" title="Mali-4xx VP" name="Active cycles, vertex loader" description="Number of cycles per frame the vertex loader unit was active."/>
+    <event event="0x05" title="Mali-4xx VP" name="Cycles vertex loader waiting for vertex shader" description="Number of cycles per frame the vertex loader was idle while waiting on the vertex shader."/>
+    <event event="0x06" title="Mali-4xx VP" name="Words read, system bus" description="Total number of 64 bit words read by the GP2 from the system bus per frame."/>
+    <event event="0x07" title="Mali-4xx VP" name="Words written, system bus" description="Total number of 64 bit words written by the GP2 to the system bus per frame."/>
+    <event event="0x08" title="Mali-4xx VP" name="Read bursts, system bus" description="Number of read bursts by the GP2 from the system bus per frame."/>
+    <event event="0x09" title="Mali-4xx VP" name="Write bursts, system bus" description="Number of write bursts from the MaliGP2 to the system bus per frame."/>
+    <event event="0x0a" title="Mali-4xx VP" name="Vertices processed" description="Number of vertices processed by the MaliGP2 per frame."/>
+    <event event="0x0b" title="Mali-4xx VP" name="Vertices fetched" description="Number of vertices fetched by the MaliGP2 per frame."/>
+    <event event="0x0c" title="Mali-4xx VP" name="Primitives fetched" description="Number of graphics primitives fetched by the MaliGP2 per frame."/>
+    <event event="0x0e" title="Mali-4xx VP" name="Primitives culled" description="Number of graphics primitives discarded per frame, because they were seen from the back or were offscreen."/>
+    <event event="0x0f" title="Mali-4xx VP" name="Commands written to tiles" description="Number of commands (8 Bytes, mainly primitives) written by GP2 to the PP input data structure per frame."/>
+    <event event="0x10" title="Mali-4xx VP" name="Memory blocks allocated" description="Number of overflow data blocks needed for outputting the PP input data structure per frame ."/>
+    <event event="0x13" title="Mali-4xx VP" name="Vertex loader cache misses" description="Number of cache misses for the vertex shader's vertex input unit per frame."/>
+    <event event="0x16" title="Mali-4xx VP" name="Active cycles, vertex shader command processor" description="Number of cycles per frame the GP2 vertex shader command processor was active. This includes time waiting for semaphores."/>
+    <event event="0x17" title="Mali-4xx VP" name="Active cycles, PLBU command processor" description="Number of cycles per frame the MaliGP2 PLBU command processor was active. This includes time waiting for semaphores."/>
+    <event event="0x18" title="Mali-4xx VP" name="Active Cycles, PLBU list writer" description="Number of cycles per frame the MaliGP2 PLBU output unit was active. This includes time spent waiting on the bus."/>
+    <event event="0x19" title="Mali-4xx VP" name="Active cycles, PLBU geometry processing" description="Number of cycles per frame the MaliGP2 PLBU was active, excepting final data output. In other words: active cycles through the prepare list commands. This includes time spent waiting on the bus."/>
+    <event event="0x1b" title="Mali-4xx VP" name="Active cycles, PLBU primitive assembly" description="Number of active cycles per frame spent by the MaliGP2 PLBU doing primitive assembly. This does not include scissoring or final output. This includes time spent waiting on the bus."/>
+    <event event="0x1c" title="Mali-4xx VP" name="Active cycles, PLBU vertex fetcher" description="Number of active cycles per frame spent by the MaliGP2 PLBU fetching vertex data. This includes time spent waiting on the bus."/>
+    <event event="0x1e" title="Mali-4xx VP" name="Active cycles, Bounding-box and command generator" description="Number of active cycles per frame spent by the MaliGP2 PLBU setting up bounding boxes and commands (mainly graphics primitives). This includes time spent waiting on the bus."/>
+    <event event="0x20" title="Mali-4xx VP" name="Active cycles, Scissor tile iterator" description="Number of active cycles per frame spent by the MaliGP2 PLBU iterating over tiles to perform scissoring. This includes time spent waiting on the bus."/>
+    <event event="0x21" title="Mali-4xx VP" name="Active cycles, PLBU tile iterator" description="Number of active cycles per frame spent by the MaliGP2 PLBU iterating over the tiles in the bounding box generating commands (mainly graphics primitives). This includes time spent waiting on the bus."/>
   </category>
-  <category name="Mali GPU Fragment Processor" per_cpu="no">
+  <category name="Mali Fragment Processor" per_cpu="no">
     <counter_set name="ARM_Mali-4xx_FP_0_cnt" title="Mali-4xx FP0" description="Mali GPU Fragment Processor 0" count="2"/>
     <counter_set name="ARM_Mali-4xx_FP_1_cnt" title="Mali-4xx FP1" description="Mali GPU Fragment Processor 1" count="2"/>
     <counter_set name="ARM_Mali-4xx_FP_2_cnt" title="Mali-4xx FP2" description="Mali GPU Fragment Processor 2" count="2"/>
@@ -37,7 +36,6 @@
     <counter_set name="ARM_Mali-4xx_FP_5_cnt" title="Mali-4xx FP5" description="Mali GPU Fragment Processor 5" count="2"/>
     <counter_set name="ARM_Mali-4xx_FP_6_cnt" title="Mali-4xx FP6" description="Mali GPU Fragment Processor 6" count="2"/>
     <counter_set name="ARM_Mali-4xx_FP_7_cnt" title="Mali-4xx FP7" description="Mali GPU Fragment Processor 7" count="2"/>
-
     <event event="0x00" title="Mali-4xx FP" name="Active clock cycles" description="Active clock cycles, between polygon start and IRQ."/>
     <event event="0x02" title="Mali-4xx FP" name="Total bus reads" description="Total number of 64-bit words read from the bus."/>
     <event event="0x03" title="Mali-4xx FP" name="Total bus writes" description="Total number of 64-bit words written to the bus."/>
     <event event="0x3c" title="Mali-4xx FP" name="Program cache hit count" description="Number of hits in the program cache."/>
     <event event="0x3d" title="Mali-4xx FP" name="Program cache miss count" description="Number of misses in the program cache."/>
   </category>
-  <counter_set name="ARM_Mali-4xx_L2_0_cnt" title="Mali-4xx L2 0" description="Mali GPU L2 Cache Core 0" count="2"/>
-  <category name="Mali-4xx-L2_0" counter_set="ARM_Mali-4xx_L2_0_cnt" per_cpu="no">
+  <counter_set name="ARM_Mali-4xx_L2_0_cnt" title="Mali-4xx L2" description="Mali GPU L2 Cache Core 0" count="2"/>
+  <category name="Mali-4xx L2" counter_set="ARM_Mali-4xx_L2_0_cnt" per_cpu="no">
     <event event="0x01" title="Mali L2 Cache" name="Total clock cycles" description="Total clock cycles"/>
     <event event="0x02" title="Mali L2 Cache" name="Active clock cycles" description="Active clock cycles"/>
-
     <option_set name="All">
       <option event_delta="0x08" name="Master" description="Master"/>
       <option event_delta="0x10" name="All slaves" description="All slaves"/>
       <option event_delta="0x50" name="Slave 3" description="Slave 3"/>
       <option event_delta="0x60" name="Slave 4" description="Slave 4"/>
     </option_set>
-
     <option_set name="Slaves">
       <option event_delta="0x10" name="All slaves" description="All slaves"/>
       <option event_delta="0x20" name="Slave 0" description="Slave 0"/>
       <option event_delta="0x50" name="Slave 3" description="Slave 3"/>
       <option event_delta="0x60" name="Slave 4" description="Slave 4"/>
     </option_set>
-
     <event event="0x00" option_set="All" title="Mali L2 Cache" name="Read transactions" description="Read transactions"/>
     <event event="0x01" option_set="All" title="Mali L2 Cache" name="Write transactions" description="Write transactions"/>
     <event event="0x02" option_set="All" title="Mali L2 Cache" name="Words read" description="Words read"/>
     <event event="0x08" option_set="Slaves" title="Mali L2 Cache" name="Cacheable read transactions" description="Cacheable read transactions"/>
   </category>
   <counter_set name="ARM_Mali-4xx_L2_1_cnt" title="Mali-4xx L2 1" description="Mali GPU L2 Cache Core 1" count="2"/>
-  <category name="Mali-4xx-L2_1" counter_set="ARM_Mali-4xx_L2_1_cnt" per_cpu="no">
-    <event event="0x01" title="Mali L2 Cache" name="Total clock cycles" description="Total clock cycles"/>
-    <event event="0x02" title="Mali L2 Cache" name="Active clock cycles" description="Active clock cycles"/>
-
+  <category name="Mali-4xx L2_1" counter_set="ARM_Mali-4xx_L2_1_cnt" per_cpu="no">
+    <event event="0x01" title="Mali L2 Cache 1" name="Total clock cycles" description="Total clock cycles"/>
+    <event event="0x02" title="Mali L2 Cache 1" name="Active clock cycles" description="Active clock cycles"/>
     <option_set name="All">
       <option event_delta="0x08" name="Master" description="Master"/>
       <option event_delta="0x10" name="All slaves" description="All slaves"/>
       <option event_delta="0x50" name="Slave 3" description="Slave 3"/>
       <option event_delta="0x60" name="Slave 4" description="Slave 4"/>
     </option_set>
-
     <option_set name="Slaves">
       <option event_delta="0x10" name="All slaves" description="All slaves"/>
       <option event_delta="0x20" name="Slave 0" description="Slave 0"/>
       <option event_delta="0x50" name="Slave 3" description="Slave 3"/>
       <option event_delta="0x60" name="Slave 4" description="Slave 4"/>
     </option_set>
-
-    <event event="0x00" option_set="All" title="Mali L2 Cache" name="Read transactions" description="Read transactions"/>
-    <event event="0x01" option_set="All" title="Mali L2 Cache" name="Write transactions" description="Write transactions"/>
-    <event event="0x02" option_set="All" title="Mali L2 Cache" name="Words read" description="Words read"/>
-    <event event="0x03" option_set="All" title="Mali L2 Cache" name="Words written" description="Words written"/>
-    <event event="0x04" option_set="Slaves" title="Mali L2 Cache" name="Read hits" description="Read hits"/>
-    <event event="0x05" option_set="Slaves" title="Mali L2 Cache" name="Read misses" description="Read misses"/>
-    <event event="0x06" option_set="Slaves" title="Mali L2 Cache" name="Write invalidates" description="Write invalidates"/>
-    <event event="0x07" option_set="Slaves" title="Mali L2 Cache" name="Read invalidates" description="Read invalidates"/>
-    <event event="0x08" option_set="Slaves" title="Mali L2 Cache" name="Cacheable read transactions" description="Cacheable read transactions"/>
+    <event event="0x00" option_set="All" title="Mali L2 Cache 1" name="Read transactions" description="Read transactions"/>
+    <event event="0x01" option_set="All" title="Mali L2 Cache 1" name="Write transactions" description="Write transactions"/>
+    <event event="0x02" option_set="All" title="Mali L2 Cache 1" name="Words read" description="Words read"/>
+    <event event="0x03" option_set="All" title="Mali L2 Cache 1" name="Words written" description="Words written"/>
+    <event event="0x04" option_set="Slaves" title="Mali L2 Cache 1" name="Read hits" description="Read hits"/>
+    <event event="0x05" option_set="Slaves" title="Mali L2 Cache 1" name="Read misses" description="Read misses"/>
+    <event event="0x06" option_set="Slaves" title="Mali L2 Cache 1" name="Write invalidates" description="Write invalidates"/>
+    <event event="0x07" option_set="Slaves" title="Mali L2 Cache 1" name="Read invalidates" description="Read invalidates"/>
+    <event event="0x08" option_set="Slaves" title="Mali L2 Cache 1" name="Cacheable read transactions" description="Cacheable read transactions"/>
   </category>
   <counter_set name="ARM_Mali-4xx_L2_2_cnt" title="Mali-4xx L2 2" description="Mali GPU L2 Cache Core 2" count="2"/>
-  <category name="Mali-4xx-L2_2" counter_set="ARM_Mali-4xx_L2_2_cnt" per_cpu="no">
-    <event event="0x01" title="Mali L2 Cache" name="Total clock cycles" description="Total clock cycles"/>
-    <event event="0x02" title="Mali L2 Cache" name="Active clock cycles" description="Active clock cycles"/>
-
+  <category name="Mali-4xx L2_2" counter_set="ARM_Mali-4xx_L2_2_cnt" per_cpu="no">
+    <event event="0x01" title="Mali L2 Cache 2" name="Total clock cycles" description="Total clock cycles"/>
+    <event event="0x02" title="Mali L2 Cache 2" name="Active clock cycles" description="Active clock cycles"/>
     <option_set name="All">
       <option event_delta="0x08" name="Master" description="Master"/>
       <option event_delta="0x10" name="All slaves" description="All slaves"/>
       <option event_delta="0x50" name="Slave 3" description="Slave 3"/>
       <option event_delta="0x60" name="Slave 4" description="Slave 4"/>
     </option_set>
-
     <option_set name="Slaves">
       <option event_delta="0x10" name="All slaves" description="All slaves"/>
       <option event_delta="0x20" name="Slave 0" description="Slave 0"/>
       <option event_delta="0x50" name="Slave 3" description="Slave 3"/>
       <option event_delta="0x60" name="Slave 4" description="Slave 4"/>
     </option_set>
-
-    <event event="0x00" option_set="All" title="Mali L2 Cache" name="Read transactions" description="Read transactions"/>
-    <event event="0x01" option_set="All" title="Mali L2 Cache" name="Write transactions" description="Write transactions"/>
-    <event event="0x02" option_set="All" title="Mali L2 Cache" name="Words read" description="Words read"/>
-    <event event="0x03" option_set="All" title="Mali L2 Cache" name="Words written" description="Words written"/>
-    <event event="0x04" option_set="Slaves" title="Mali L2 Cache" name="Read hits" description="Read hits"/>
-    <event event="0x05" option_set="Slaves" title="Mali L2 Cache" name="Read misses" description="Read misses"/>
-    <event event="0x06" option_set="Slaves" title="Mali L2 Cache" name="Write invalidates" description="Write invalidates"/>
-    <event event="0x07" option_set="Slaves" title="Mali L2 Cache" name="Read invalidates" description="Read invalidates"/>
-    <event event="0x08" option_set="Slaves" title="Mali L2 Cache" name="Cacheable read transactions" description="Cacheable read transactions"/>
+    <event event="0x00" option_set="All" title="Mali L2 Cache 2" name="Read transactions" description="Read transactions"/>
+    <event event="0x01" option_set="All" title="Mali L2 Cache 2" name="Write transactions" description="Write transactions"/>
+    <event event="0x02" option_set="All" title="Mali L2 Cache 2" name="Words read" description="Words read"/>
+    <event event="0x03" option_set="All" title="Mali L2 Cache 2" name="Words written" description="Words written"/>
+    <event event="0x04" option_set="Slaves" title="Mali L2 Cache 2" name="Read hits" description="Read hits"/>
+    <event event="0x05" option_set="Slaves" title="Mali L2 Cache 2" name="Read misses" description="Read misses"/>
+    <event event="0x06" option_set="Slaves" title="Mali L2 Cache 2" name="Write invalidates" description="Write invalidates"/>
+    <event event="0x07" option_set="Slaves" title="Mali L2 Cache 2" name="Read invalidates" description="Read invalidates"/>
+    <event event="0x08" option_set="Slaves" title="Mali L2 Cache 2" name="Cacheable read transactions" description="Cacheable read transactions"/>
   </category>
-  <category name="ARM Mali-4xx Filmstrip" counter_set="ARM_Mali-4xx_Filmstrip_cnt" per_cpu="no">
+  <counter_set name="ARM_Mali-4xx_Filmstrip_cnt" count="1"/>
+  <category name="Mali-4xx Filmstrip" counter_set="ARM_Mali-4xx_Filmstrip_cnt" per_cpu="no">
     <option_set name="fs">
       <option event_delta="0x3c" name="1:60" description="captures every 60th frame"/>
       <option event_delta="0x1e" name="1:30" description="captures every 30th frame"/>
   <category name="ARM_Mali-4xx_Frequency" per_cpu="no">
     <event counter="ARM_Mali-4xx_Frequency" title="Mali GPU Frequency" name="Frequency" display="average" average_selection="yes" units="MHz" description="GPU core frequency."/>
   </category>
-  <category name="Mali-4xx-SW" counter_set="ARM_Mali-4xx_SW_cnt" per_cpu="no">
+  <category name="Mali-4xx Activity" counter_set="ARM_Mali-4xx_Activity_cnt">
+    <event counter="ARM_Mali-4xx_fragment" title="GPU Fragment" name="Activity" class="activity" activity1="Activity" activity_color1="0x00006fcc" rendering_type="bar" average_selection="yes" average_cores="yes" percentage="yes" description="GPU Fragment Activity"/>
+    <event counter="ARM_Mali-4xx_vertex" title="GPU Vertex" name="Activity" class="activity" activity1="Activity" activity_color1="0x00eda000" rendering_type="bar" average_selection="yes" percentage="yes" description="GPU Vertex Activity"/>
+  </category>
+  <category name="Mali-4xx Software Counters" counter_set="ARM_Mali-4xx_SW_cnt" per_cpu="no">
     <!-- EGL Counters -->
     <event counter="ARM_Mali-4xx_SW_0" title="Mali EGL Software Counters" name="Blit Time" description="Time spent blitting the framebuffer from video memory to framebuffer."/>
     <!-- glDrawElements Counters -->
diff --git a/tools/gator/daemon/events-Mali-Midgard.xml b/tools/gator/daemon/events-Mali-Midgard.xml
new file mode 100644 (file)
index 0000000..b6ab4b8
--- /dev/null
@@ -0,0 +1,46 @@
+  <category name="Mali-Midgard Software Counters" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_TOTAL_ALLOC_PAGES" title="Mali Total Alloc Pages" name="Total number of allocated pages" description="Mali total number of allocated pages."/>
+  </category>
+  <category name="Mali-Midgard PM Shader" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_PM_SHADER_0" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 0" description="Mali PM Shader: PM Shader Core 0."/>
+    <event counter="ARM_Mali-Midgard_PM_SHADER_1" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 1" description="Mali PM Shader: PM Shader Core 1."/>
+    <event counter="ARM_Mali-Midgard_PM_SHADER_2" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 2" description="Mali PM Shader: PM Shader Core 2."/>
+    <event counter="ARM_Mali-Midgard_PM_SHADER_3" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 3" description="Mali PM Shader: PM Shader Core 3."/>
+    <event counter="ARM_Mali-Midgard_PM_SHADER_4" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 4" description="Mali PM Shader: PM Shader Core 4."/>
+    <event counter="ARM_Mali-Midgard_PM_SHADER_5" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 5" description="Mali PM Shader: PM Shader Core 5."/>
+    <event counter="ARM_Mali-Midgard_PM_SHADER_6" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 6" description="Mali PM Shader: PM Shader Core 6."/>
+    <event counter="ARM_Mali-Midgard_PM_SHADER_7" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 7" description="Mali PM Shader: PM Shader Core 7."/>
+  </category>
+  <category name="Mali-Midgard PM Tiler" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_PM_TILER_0" display="average" average_selection="yes" percentage="yes" title="Mali PM Tiler" name="PM Tiler Core 0" description="Mali PM Tiler: PM Tiler Core 0."/>
+  </category>
+  <category name="Mali-Midgard PM L2" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_PM_L2_0" display="average" average_selection="yes" percentage="yes" title="Mali PM L2" name="PM L2 Core 0" description="Mali PM L2: PM L2 Core 0."/>
+    <event counter="ARM_Mali-Midgard_PM_L2_1" display="average" average_selection="yes" percentage="yes" title="Mali PM L2" name="PM L2 Core 1" description="Mali PM L2: PM L2 Core 1."/>
+  </category>
+  <category name="Mali-Midgard MMU Address Space" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_MMU_AS_0" display="average" average_selection="yes" percentage="yes" title="Mali MMU Address Space" name="MMU Address Space 0" description="Mali MMU Address Space 0 usage."/>
+    <event counter="ARM_Mali-Midgard_MMU_AS_1" display="average" average_selection="yes" percentage="yes" title="Mali MMU Address Space" name="MMU Address Space 1" description="Mali MMU Address Space 1 usage."/>
+    <event counter="ARM_Mali-Midgard_MMU_AS_2" display="average" average_selection="yes" percentage="yes" title="Mali MMU Address Space" name="MMU Address Space 2" description="Mali MMU Address Space 2 usage."/>
+    <event counter="ARM_Mali-Midgard_MMU_AS_3" display="average" average_selection="yes" percentage="yes" title="Mali MMU Address Space" name="MMU Address Space 3" description="Mali MMU Address Space 3 usage."/>
+  </category>
+  <category name="Mali-Midgard MMU Page Fault" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_MMU_PAGE_FAULT_0" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 0" description="Reports the number of newly allocated pages after a MMU page fault in address space 0."/>
+    <event counter="ARM_Mali-Midgard_MMU_PAGE_FAULT_1" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 1" description="Reports the number of newly allocated pages after a MMU page fault in address space 1."/>
+    <event counter="ARM_Mali-Midgard_MMU_PAGE_FAULT_2" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 2" description="Reports the number of newly allocated pages after a MMU page fault in address space 2."/>
+    <event counter="ARM_Mali-Midgard_MMU_PAGE_FAULT_3" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 3" description="Reports the number of newly allocated pages after a MMU page fault in address space 3."/>
+  </category>
+  <counter_set name="ARM_Mali-Midgard_Filmstrip_cnt" count="1"/>
+  <category name="Mali-Midgard Filmstrip" counter_set="ARM_Mali-Midgard_Filmstrip_cnt" per_cpu="no">
+    <option_set name="fs">
+      <option event_delta="0x3c" name="1:60" description="captures every 60th frame"/>
+      <option event_delta="0x1e" name="1:30" description="captures every 30th frame"/>
+      <option event_delta="0xa" name="1:10" description="captures every 10th frame"/>
+    </option_set>
+    <event event="0x0400" option_set="fs" title="ARM Mali-Midgard" name="Filmstrip" description="Scaled framebuffer"/>
+  </category>
+  <category name="Mali-Midgard Activity" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_fragment" title="GPU Fragment" name="Activity" class="activity" activity1="Activity" activity_color1="0x00006fcc" rendering_type="bar" average_selection="yes" percentage="yes" cores="1" description="GPU Job Slot 0 Activity"/>
+    <event counter="ARM_Mali-Midgard_vertex" title="GPU Vertex-Tiling-Compute" name="Activity" class="activity" activity1="Activity" activity_color1="0x00eda000" rendering_type="bar" average_selection="yes" percentage="yes" cores="1" description="GPU Job Slot 1 Activity"/>
+    <event counter="ARM_Mali-Midgard_opencl" title="GPU Vertex-Compute" name="Activity" class="activity" activity1="Activity" activity_color1="0x00ef022f" rendering_type="bar" average_selection="yes" percentage="yes" cores="1" description="GPU Job Slot 2 Activity"/>
+  </category>
diff --git a/tools/gator/daemon/events-Mali-Midgard_hw.xml b/tools/gator/daemon/events-Mali-Midgard_hw.xml
new file mode 100644 (file)
index 0000000..4f3323f
--- /dev/null
@@ -0,0 +1,91 @@
+  <category name="Mali-Midgard Job Manager" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_GPU_ACTIVE" title="Mali Job Manager Cycles" name="GPU cycles" description="Number of cycles the GPU was active"/>
+    <event counter="ARM_Mali-Midgard_IRQ_ACTIVE" title="Mali Job Manager Cycles" name="IRQ cycles" description="Number of cycles the GPU had a pending interrupt"/>
+    <event counter="ARM_Mali-Midgard_JS0_ACTIVE" title="Mali Job Manager Cycles" name="JS0 cycles" description="Number of cycles JS0 (fragment) was active"/>
+    <event counter="ARM_Mali-Midgard_JS1_ACTIVE" title="Mali Job Manager Cycles" name="JS1 cycles" description="Number of cycles JS1 (vertex/tiler/compute) was active"/>
+    <event counter="ARM_Mali-Midgard_JS2_ACTIVE" title="Mali Job Manager Cycles" name="JS2 cycles" description="Number of cycles JS2 (vertex/compute) was active"/>
+    <event counter="ARM_Mali-Midgard_JS0_JOBS" title="Mali Job Manager Work" name="JS0 jobs" description="Number of Jobs (fragment) completed in JS0"/>
+    <event counter="ARM_Mali-Midgard_JS0_TASKS" title="Mali Job Manager Work" name="JS0 tasks" description="Number of Tasks completed in JS0"/>
+    <event counter="ARM_Mali-Midgard_JS1_JOBS" title="Mali Job Manager Work" name="JS1 jobs" description="Number of Jobs (vertex/tiler/compute) completed in JS1"/>
+    <event counter="ARM_Mali-Midgard_JS1_TASKS" title="Mali Job Manager Work" name="JS1 tasks" description="Number of Tasks completed in JS1"/>
+    <event counter="ARM_Mali-Midgard_JS2_TASKS" title="Mali Job Manager Work" name="JS2 tasks" description="Number of Tasks completed in JS2"/>
+    <event counter="ARM_Mali-Midgard_JS2_JOBS" title="Mali Job Manager Work" name="JS2 jobs" description="Number of Jobs (vertex/compute) completed in JS2"/>
+  </category>
+  <category name="Mali-Midgard Tiler" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_POLYGONS" title="Mali Tiler Primitives" name="Polygons" description="Number of polygons processed"/>
+    <event counter="ARM_Mali-Midgard_QUADS" title="Mali Tiler Primitives" name="Quads" description="Number of quads processed"/>
+    <event counter="ARM_Mali-Midgard_TRIANGLES" title="Mali Tiler Primitives" name="Triangles" description="Number of triangles processed"/>
+    <event counter="ARM_Mali-Midgard_LINES" title="Mali Tiler Primitives" name="Lines" description="Number of lines processed"/>
+    <event counter="ARM_Mali-Midgard_POINTS" title="Mali Tiler Primitives" name="Points" description="Number of points processed"/>
+    <event counter="ARM_Mali-Midgard_FRONT_FACING" title="Mali Tiler Culling" name="Front facing prims" description="Number of front facing primitives"/>
+    <event counter="ARM_Mali-Midgard_BACK_FACING" title="Mali Tiler Culling" name="Back facing prims" description="Number of back facing primitives"/>
+    <event counter="ARM_Mali-Midgard_PRIM_VISIBLE" title="Mali Tiler Culling" name="Visible prims" description="Number of visible primitives"/>
+    <event counter="ARM_Mali-Midgard_PRIM_CULLED" title="Mali Tiler Culling" name="Culled prims" description="Number of culled primitives"/>
+    <event counter="ARM_Mali-Midgard_PRIM_CLIPPED" title="Mali Tiler Culling" name="Clipped prims" description="Number of clipped primitives"/>
+    <event counter="ARM_Mali-Midgard_LEVEL0" title="Mali Tiler Hierarchy" name="L0 prims" description="Number of primitives in hierarchy level 0"/>
+    <event counter="ARM_Mali-Midgard_LEVEL1" title="Mali Tiler Hierarchy" name="L1 prims" description="Number of primitives in hierarchy level 1"/>
+    <event counter="ARM_Mali-Midgard_LEVEL2" title="Mali Tiler Hierarchy" name="L2 prims" description="Number of primitives in hierarchy level 2"/>
+    <event counter="ARM_Mali-Midgard_LEVEL3" title="Mali Tiler Hierarchy" name="L3 prims" description="Number of primitives in hierarchy level 3"/>
+    <event counter="ARM_Mali-Midgard_LEVEL4" title="Mali Tiler Hierarchy" name="L4 prims" description="Number of primitives in hierarchy level 4"/>
+    <event counter="ARM_Mali-Midgard_LEVEL5" title="Mali Tiler Hierarchy" name="L5 prims" description="Number of primitives in hierarchy level 5"/>
+    <event counter="ARM_Mali-Midgard_LEVEL6" title="Mali Tiler Hierarchy" name="L6 prims" description="Number of primitives in hierarchy level 6"/>
+    <event counter="ARM_Mali-Midgard_LEVEL7" title="Mali Tiler Hierarchy" name="L7 prims" description="Number of primitives in hierarchy level 7"/>
+    <event counter="ARM_Mali-Midgard_COMMAND_1" title="Mali Tiler Commands" name="Prims in 1 command" description="Number of primitives producing 1 command"/>
+    <event counter="ARM_Mali-Midgard_COMMAND_2" title="Mali Tiler Commands" name="Prims in 2 command" description="Number of primitives producing 2 commands"/>
+    <event counter="ARM_Mali-Midgard_COMMAND_3" title="Mali Tiler Commands" name="Prims in 3 command" description="Number of primitives producing 3 commands"/>
+    <event counter="ARM_Mali-Midgard_COMMAND_4" title="Mali Tiler Commands" name="Prims in 4 command" description="Number of primitives producing 4 commands"/>
+    <event counter="ARM_Mali-Midgard_COMMAND_4_7" title="Mali Tiler Commands" name="Prims in 4-7 commands" description="Number of primitives producing 4-7 commands"/>
+    <event counter="ARM_Mali-Midgard_COMMAND_5_7" title="Mali Tiler Commands" name="Prims in 5-7 commands" description="Number of primitives producing 5-7 commands"/>
+    <event counter="ARM_Mali-Midgard_COMMAND_8_15" title="Mali Tiler Commands" name="Prims in 8-15 commands" description="Number of primitives producing 8-15 commands"/>
+    <event counter="ARM_Mali-Midgard_COMMAND_16_63" title="Mali Tiler Commands" name="Prims in 16-63 commands" description="Number of primitives producing 16-63 commands"/>
+    <event counter="ARM_Mali-Midgard_COMMAND_64" title="Mali Tiler Commands" name="Prims in &gt;= 64 commands" description="Number of primitives producing &gt;= 64 commands"/>
+  </category>
+  <category name="Mali-Midgard Shader Core" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_TRIPIPE_ACTIVE" title="Mali Core Cycles" name="Tripipe cycles" description="Number of cycles the Tripipe was active"/>
+    <event counter="ARM_Mali-Midgard_FRAG_ACTIVE" title="Mali Core Cycles" name="Fragment cycles" description="Number of cycles fragment processing was active"/>
+    <event counter="ARM_Mali-Midgard_COMPUTE_ACTIVE" title="Mali Core Cycles" name="Compute cycles" description="Number of cycles vertex\compute processing was active"/>
+    <event counter="ARM_Mali-Midgard_FRAG_CYCLE_NO_TILE" title="Mali Core Cycles" name="Fragment cycles waiting for tile" description="Number of cycles spent waiting for a physical tile buffer"/>
+    <event counter="ARM_Mali-Midgard_FRAG_THREADS" title="Mali Core Threads" name="Fragment threads" description="Number of fragment threads started"/>
+    <event counter="ARM_Mali-Midgard_FRAG_DUMMY_THREADS" title="Mali Core Threads" name="Dummy fragment threads" description="Number of dummy fragment threads started"/>
+    <event counter="ARM_Mali-Midgard_FRAG_QUADS_LZS_TEST" title="Mali Core Threads" name="Frag threads doing late ZS" description="Number of threads doing late ZS test"/>
+    <event counter="ARM_Mali-Midgard_FRAG_QUADS_LZS_KILLED" title="Mali Core Threads" name="Frag threads killed late ZS" description="Number of threads killed by late ZS test"/>
+    <event counter="ARM_Mali-Midgard_FRAG_THREADS_LZS_TEST" title="Mali Core Threads" name="Frag threads doing late ZS" description="Number of threads doing late ZS test"/>
+    <event counter="ARM_Mali-Midgard_FRAG_THREADS_LZS_KILLED" title="Mali Core Threads" name="Frag threads killed late ZS" description="Number of threads killed by late ZS test"/>
+    <event counter="ARM_Mali-Midgard_COMPUTE_TASKS" title="Mali Compute Threads" name="Compute tasks" description="Number of compute tasks"/>
+    <event counter="ARM_Mali-Midgard_COMPUTE_THREADS" title="Mali Compute Threads" name="Compute threads started" description="Number of compute threads started"/>
+    <event counter="ARM_Mali-Midgard_COMPUTE_CYCLES_DESC" title="Mali Compute Threads" name="Compute cycles awaiting descriptors" description="Number of compute cycles spent waiting for descriptors"/>
+    <event counter="ARM_Mali-Midgard_FRAG_PRIMATIVES" title="Mali Fragment Primitives" name="Primitives loaded" description="Number of primitives loaded from tiler"/>
+    <event counter="ARM_Mali-Midgard_FRAG_PRIMATIVES_DROPPED" title="Mali Fragment Primitives" name="Primitives dropped" description="Number of primitives dropped because out of tile"/>
+    <event counter="ARM_Mali-Midgard_FRAG_PRIMITIVES" title="Mali Fragment Primitives" name="Primitives loaded" description="Number of primitives loaded from tiler"/>
+    <event counter="ARM_Mali-Midgard_FRAG_PRIMITIVES_DROPPED" title="Mali Fragment Primitives" name="Primitives dropped" description="Number of primitives dropped because out of tile"/>
+    <event counter="ARM_Mali-Midgard_FRAG_QUADS_RAST" title="Mali Fragment Quads" name="Quads rasterized" description="Number of quads rasterized"/>
+    <event counter="ARM_Mali-Midgard_FRAG_QUADS_EZS_TEST" title="Mali Fragment Quads" name="Quads doing early ZS" description="Number of quads doing early ZS test"/>
+    <event counter="ARM_Mali-Midgard_FRAG_QUADS_EZS_KILLED" title="Mali Fragment Quads" name="Quads killed early Z" description="Number of quads killed by early ZS test"/>
+    <event counter="ARM_Mali-Midgard_FRAG_NUM_TILES" title="Mali Fragment Tasks" name="Tiles rendered" description="Number of tiles rendered"/>
+    <event counter="ARM_Mali-Midgard_FRAG_TRANS_ELIM" title="Mali Fragment Tasks" name="Tile writes killed by TE" description="Number of tile writes skipped by transaction elimination"/>
+    <event counter="ARM_Mali-Midgard_ARITH_WORDS" title="Mali Arithmetic Pipe" name="A instructions" description="Number of instructions completed by the the A-pipe (normalized per pipeline)"/>
+    <event counter="ARM_Mali-Midgard_LS_WORDS" title="Mali Load/Store Pipe" name="LS instructions" description="Number of instructions completed by the LS-pipe"/>
+    <event counter="ARM_Mali-Midgard_LS_ISSUES" title="Mali Load/Store Pipe" name="LS instruction issues" description="Number of instructions issued to the LS-pipe, including restarts"/>
+    <event counter="ARM_Mali-Midgard_TEX_WORDS" title="Mali Texture Pipe" name="T instructions" description="Number of instructions completed by the T-pipe"/>
+    <event counter="ARM_Mali-Midgard_TEX_THREADS" title="Mali Texture Pipe" name="T instruction issues" description="Number of instructions issused to the T-pipe, including restarts"/>
+    <event counter="ARM_Mali-Midgard_TEX_RECIRC_FMISS" title="Mali Texture Pipe" name="Cache misses" description="Number of instructions in the T-pipe, recirculated due to cache miss"/>
+    <event counter="ARM_Mali-Midgard_LSC_READ_HITS" title="Mali Load/Store Cache" name="Read hits" description="Number of read hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-Midgard_LSC_READ_MISSES" title="Mali Load/Store Cache" name="Read misses" description="Number of read misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-Midgard_LSC_WRITE_HITS" title="Mali Load/Store Cache" name="Write hits" description="Number of write hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-Midgard_LSC_WRITE_MISSES" title="Mali Load/Store Cache" name="Write misses" description="Number of write misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-Midgard_LSC_ATOMIC_HITS" title="Mali Load/Store Cache" name="Atomic hits" description="Number of atomic hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-Midgard_LSC_ATOMIC_MISSES" title="Mali Load/Store Cache" name="Atomic misses" description="Number of atomic misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-Midgard_LSC_LINE_FETCHES" title="Mali Load/Store Cache" name="Line fetches" description="Number of line fetches in the Load/Store cache"/>
+    <event counter="ARM_Mali-Midgard_LSC_DIRTY_LINE" title="Mali Load/Store Cache" name="Dirty line evictions" description="Number of dirty line evictions in the Load/Store cache"/>
+    <event counter="ARM_Mali-Midgard_LSC_SNOOPS" title="Mali Load/Store Cache" name="Snoops in to LSC" description="Number of coherent memory snoops in to the Load/Store cache"/>
+  </category>
+  <category name="Mali-Midgard L2 and MMU" per_cpu="no">
+    <event counter="ARM_Mali-Midgard_L2_WRITE_BEATS" title="Mali L2 Cache" name="External write beats" description="Number of external bus write beats"/>
+    <event counter="ARM_Mali-Midgard_L2_READ_BEATS" title="Mali L2 Cache" name="External read beats" description="Number of external bus read beats"/>
+    <event counter="ARM_Mali-Midgard_L2_READ_SNOOP" title="Mali L2 Cache" name="Read snoops" description="Number of read transaction snoops"/>
+    <event counter="ARM_Mali-Midgard_L2_READ_HIT" title="Mali L2 Cache" name="L2 read hits" description="Number of reads hitting in the L2 cache"/>
+    <event counter="ARM_Mali-Midgard_L2_WRITE_SNOOP" title="Mali L2 Cache" name="Write snoops" description="Number of write transaction snoops"/>
+    <event counter="ARM_Mali-Midgard_L2_WRITE_HIT" title="Mali L2 Cache" name="L2 write hits" description="Number of writes hitting in the L2 cache"/>
+    <event counter="ARM_Mali-Midgard_L2_EXT_AR_STALL" title="Mali L2 Cache" name="External bus stalls (AR)" description="Number of cycles a valid read address (AR) is stalled by the external interconnect"/>
+    <event counter="ARM_Mali-Midgard_L2_EXT_W_STALL" title="Mali L2 Cache" name="External bus stalls (W)" description="Number of cycles a valid write data (W channel) is stalled by the external interconnect"/>
+  </category>
diff --git a/tools/gator/daemon/events-Mali-T60x_hw.xml b/tools/gator/daemon/events-Mali-T60x_hw.xml
new file mode 100644 (file)
index 0000000..50797e6
--- /dev/null
@@ -0,0 +1,108 @@
+
+  <category name="Mali Job Manager" per_cpu="no">
+
+    <event counter="ARM_Mali-T60x_GPU_ACTIVE" title="Mali Job Manager Cycles" name="GPU cycles" description="Number of cycles GPU active"/>
+    <event counter="ARM_Mali-T60x_IRQ_ACTIVE" title="Mali Job Manager Cycles" name="IRQ cycles" description="Number of cycles GPU interrupt pending"/>
+    <event counter="ARM_Mali-T60x_JS0_ACTIVE" title="Mali Job Manager Cycles" name="JS0 cycles" description="Number of cycles JS0 (fragment) active"/>
+    <event counter="ARM_Mali-T60x_JS1_ACTIVE" title="Mali Job Manager Cycles" name="JS1 cycles" description="Number of cycles JS1 (vertex/tiler/compute) active"/>
+    <event counter="ARM_Mali-T60x_JS2_ACTIVE" title="Mali Job Manager Cycles" name="JS2 cycles" description="Number of cycles JS2 (vertex/compute) active"/>
+
+    <event counter="ARM_Mali-T60x_JS0_JOBS" title="Mali Job Manager Work" name="JS0 jobs" description="Number of Jobs (fragment) completed in JS0"/>
+    <event counter="ARM_Mali-T60x_JS0_TASKS" title="Mali Job Manager Work" name="JS0 tasks" description="Number of Tasks completed in JS0"/>
+    <event counter="ARM_Mali-T60x_JS1_JOBS" title="Mali Job Manager Work" name="JS1 jobs" description="Number of Jobs (vertex/tiler/compute) completed in JS1"/>
+    <event counter="ARM_Mali-T60x_JS1_TASKS" title="Mali Job Manager Work" name="JS1 tasks" description="Number of Tasks completed in JS1"/>
+    <event counter="ARM_Mali-T60x_JS2_TASKS" title="Mali Job Manager Work" name="JS2 tasks" description="Number of Tasks completed in JS2"/>
+    <event counter="ARM_Mali-T60x_JS2_JOBS" title="Mali Job Manager Work" name="JS2 jobs" description="Number of Jobs (vertex/compute) completed in JS2"/>
+
+  </category>
+
+  <category name="Mali Tiler" per_cpu="no">
+
+    <event counter="ARM_Mali-T60x_TI_ACTIVE" title="Mali Tiler Cycles" name="Tiler cycles" description="Number of cycles Tiler active"/>
+
+    <event counter="ARM_Mali-T60x_TI_POLYGONS" title="Mali Tiler Primitives" name="Polygons" description="Number of polygons processed"/>
+    <event counter="ARM_Mali-T60x_TI_QUADS" title="Mali Tiler Primitives" name="Quads" description="Number of quads processed"/>
+    <event counter="ARM_Mali-T60x_TI_TRIANGLES" title="Mali Tiler Primitives" name="Triangles" description="Number of triangles processed"/>
+    <event counter="ARM_Mali-T60x_TI_LINES" title="Mali Tiler Primitives" name="Lines" description="Number of lines processed"/>
+    <event counter="ARM_Mali-T60x_TI_POINTS" title="Mali Tiler Primitives" name="Points" description="Number of points processed"/>
+
+    <event counter="ARM_Mali-T60x_TI_FRONT_FACING" title="Mali Tiler Culling" name="Front facing prims" description="Number of front facing primitives"/>
+    <event counter="ARM_Mali-T60x_TI_BACK_FACING" title="Mali Tiler Culling" name="Back facing prims" description="Number of back facing primitives"/>
+    <event counter="ARM_Mali-T60x_TI_PRIM_VISIBLE" title="Mali Tiler Culling" name="Visible prims" description="Number of visible primitives"/>
+    <event counter="ARM_Mali-T60x_TI_PRIM_CULLED" title="Mali Tiler Culling" name="Culled prims" description="Number of culled primitives"/>
+    <event counter="ARM_Mali-T60x_TI_PRIM_CLIPPED" title="Mali Tiler Culling" name="Clipped prims" description="Number of clipped primitives"/>
+
+    <event counter="ARM_Mali-T60x_TI_LEVEL0" title="Mali Tiler Hierarchy" name="L0 prims" description="Number of primitives in hierarchy level 0"/>
+    <event counter="ARM_Mali-T60x_TI_LEVEL1" title="Mali Tiler Hierarchy" name="L1 prims" description="Number of primitives in hierarchy level 1"/>
+    <event counter="ARM_Mali-T60x_TI_LEVEL2" title="Mali Tiler Hierarchy" name="L2 prims" description="Number of primitives in hierarchy level 2"/>
+    <event counter="ARM_Mali-T60x_TI_LEVEL3" title="Mali Tiler Hierarchy" name="L3 prims" description="Number of primitives in hierarchy level 3"/>
+    <event counter="ARM_Mali-T60x_TI_LEVEL4" title="Mali Tiler Hierarchy" name="L4 prims" description="Number of primitives in hierarchy level 4"/>
+    <event counter="ARM_Mali-T60x_TI_LEVEL5" title="Mali Tiler Hierarchy" name="L5 prims" description="Number of primitives in hierarchy level 5"/>
+    <event counter="ARM_Mali-T60x_TI_LEVEL6" title="Mali Tiler Hierarchy" name="L6 prims" description="Number of primitives in hierarchy level 6"/>
+    <event counter="ARM_Mali-T60x_TI_LEVEL7" title="Mali Tiler Hierarchy" name="L7 prims" description="Number of primitives in hierarchy level 7"/>
+
+  </category>
+
+  <category name="Mali Shader Core" per_cpu="no">
+
+    <event counter="ARM_Mali-T60x_TRIPIPE_ACTIVE" title="Mali Core Cycles" name="Tripipe cycles" description="Number of cycles tripipe was active"/>
+    <event counter="ARM_Mali-T60x_FRAG_ACTIVE" title="Mali Core Cycles" name="Fragment cycles" description="Number of cycles fragment processing was active"/>
+    <event counter="ARM_Mali-T60x_COMPUTE_ACTIVE" title="Mali Core Cycles" name="Compute cycles" description="Number of cycles vertex\compute processing was active"/>
+    <event counter="ARM_Mali-T60x_FRAG_CYCLES_NO_TILE" title="Mali Core Cycles" name="Fragment cycles waiting for tile" description="Number of cycles spent waiting for a physical tile buffer"/>
+
+    <event counter="ARM_Mali-T60x_FRAG_THREADS" title="Mali Fragment Threads" name="Fragment threads" description="Number of fragment threads started"/>
+    <event counter="ARM_Mali-T60x_FRAG_DUMMY_THREADS" title="Mali Fragment Threads" name="Dummy fragment threads" description="Number of dummy fragment threads started"/>
+    <event counter="ARM_Mali-T60x_FRAG_THREADS_LZS_TEST" title="Mali Fragment Threads" name="Fragment threads doing late ZS" description="Number of threads doing late ZS test"/>
+    <event counter="ARM_Mali-T60x_FRAG_THREADS_LZS_KILLED" title="Mali Fragment Threads" name="Fragment threads killed late ZS" description="Number of threads killed by late ZS test"/>
+
+    <event counter="ARM_Mali-T60x_COMPUTE_TASKS" title="Mali Compute Tasks" name="Compute tasks" description="Number of compute tasks"/>
+    <event counter="ARM_Mali-T60x_COMPUTE_THREADS" title="Mali Compute Threads" name="Compute threads" description="Number of compute threads started"/>
+
+    <event counter="ARM_Mali-T60x_FRAG_PRIMITIVES" title="Mali Fragment Primitives" name="Primitives loaded" description="Number of primitives loaded from tiler"/>
+    <event counter="ARM_Mali-T60x_FRAG_PRIMITIVES_DROPPED" title="Mali Fragment Primitives" name="Primitives dropped" description="Number of primitives dropped because out of tile"/>
+
+    <event counter="ARM_Mali-T60x_FRAG_QUADS_RAST" title="Mali Fragment Quads" name="Quads rasterized" description="Number of quads rasterized"/>
+    <event counter="ARM_Mali-T60x_FRAG_QUADS_EZS_TEST" title="Mali Fragment Quads" name="Quads doing early ZS" description="Number of quads doing early ZS test"/>
+    <event counter="ARM_Mali-T60x_FRAG_QUADS_EZS_KILLED" title="Mali Fragment Quads" name="Quads killed early Z" description="Number of quads killed by early ZS test"/>
+
+    <event counter="ARM_Mali-T60x_FRAG_NUM_TILES" title="Mali Fragment Tasks" name="Tiles rendered" description="Number of tiles rendered"/>
+    <event counter="ARM_Mali-T60x_FRAG_TRANS_ELIM" title="Mali Fragment Tasks" name="Tile writes killed by TE" description="Number of tile writes skipped by transaction elimination"/>
+
+    <event counter="ARM_Mali-T60x_ARITH_WORDS" title="Mali Arithmetic Pipe" name="A instructions" description="Number of instructions completed by the the A-pipe (normalized per pipeline)"/>
+
+    <event counter="ARM_Mali-T60x_LS_WORDS" title="Mali Load/Store Pipe" name="LS instructions" description="Number of instructions completed by the LS-pipe"/>
+    <event counter="ARM_Mali-T60x_LS_ISSUES" title="Mali Load/Store Pipe" name="LS instruction issues" description="Number of instructions issued to the LS-pipe, including restarts"/>
+
+    <event counter="ARM_Mali-T60x_TEX_WORDS" title="Mali Texture Pipe" name="T instructions" description="Number of instructions completed by the T-pipe"/>
+    <event counter="ARM_Mali-T60x_TEX_ISSUES" title="Mali Texture Pipe" name="T instruction issues" description="Number of threads through loop 2 address calculation"/>
+    <event counter="ARM_Mali-T60x_TEX_RECIRC_FMISS" title="Mali Texture Pipe" name="Cache misses" description="Number of instructions in the T-pipe, recirculated due to cache miss"/>
+
+    <event counter="ARM_Mali-T60x_LSC_READ_HITS" title="Mali Load/Store Cache" name="Read hits" description="Number of read hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T60x_LSC_READ_MISSES" title="Mali Load/Store Cache" name="Read misses" description="Number of read misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T60x_LSC_WRITE_HITS" title="Mali Load/Store Cache" name="Write hits" description="Number of write hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T60x_LSC_WRITE_MISSES" title="Mali Load/Store Cache" name="Write misses" description="Number of write misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T60x_LSC_ATOMIC_HITS" title="Mali Load/Store Cache" name="Atomic hits" description="Number of atomic hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T60x_LSC_ATOMIC_MISSES" title="Mali Load/Store Cache" name="Atomic misses" description="Number of atomic misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T60x_LSC_LINE_FETCHES" title="Mali Load/Store Cache" name="Line fetches" description="Number of line fetches in the Load/Store cache"/>
+    <event counter="ARM_Mali-T60x_LSC_DIRTY_LINE" title="Mali Load/Store Cache" name="Dirty line evictions" description="Number of dirty line evictions in the Load/Store cache"/>
+    <event counter="ARM_Mali-T60x_LSC_SNOOPS" title="Mali Load/Store Cache" name="Snoops in to LSC" description="Number of coherent memory snoops in to the Load/Store cache"/>
+
+  </category>
+
+  <category name="Mali L2 Cache" per_cpu="no">
+
+    <event counter="ARM_Mali-T60x_L2_EXT_WRITE_BEATS" title="Mali L2 Cache" name="External write beats" description="Number of external bus write beats"/>
+    <event counter="ARM_Mali-T60x_L2_EXT_READ_BEATS" title="Mali L2 Cache" name="External read beats" description="Number of external bus read beats"/>
+    <event counter="ARM_Mali-T60x_L2_READ_SNOOP" title="Mali L2 Cache" name="Read snoops" description="Number of read transaction snoops"/>
+    <event counter="ARM_Mali-T60x_L2_READ_HIT" title="Mali L2 Cache" name="L2 read hits" description="Number of reads hitting in the L2 cache"/>
+    <event counter="ARM_Mali-T60x_L2_WRITE_SNOOP" title="Mali L2 Cache" name="Write snoops" description="Number of write transaction snoops"/>
+    <event counter="ARM_Mali-T60x_L2_WRITE_HIT" title="Mali L2 Cache" name="L2 write hits" description="Number of writes hitting in the L2 cache"/>
+    <event counter="ARM_Mali-T60x_L2_EXT_AR_STALL" title="Mali L2 Cache" name="External bus stalls (AR)" description="Number of cycles a valid read address (AR) is stalled by the external interconnect"/>
+    <event counter="ARM_Mali-T60x_L2_EXT_W_STALL" title="Mali L2 Cache" name="External bus stalls (W)" description="Number of cycles a valid write data (W channel) is stalled by the external interconnect"/>
+    <event counter="ARM_Mali-T60x_L2_EXT_R_BUF_FULL" title="Mali L2 Cache" name="External bus response buffer full" description="Number of cycles a valid request is blocked by a full response buffer"/>
+    <event counter="ARM_Mali-T60x_L2_EXT_RD_BUF_FULL" title="Mali L2 Cache" name="External bus read data buffer full" description="Number of cycles a valid request is blocked by a full read data buffer"/>
+    <event counter="ARM_Mali-T60x_L2_EXT_W_BUF_FULL" title="Mali L2 Cache" name="External bus write buffer full" description="Number of cycles a valid request is blocked by a full write buffer"/>
+    <event counter="ARM_Mali-T60x_L2_READ_LOOKUP" title="Mali L2 Cache" name="L2 read lookups" description="Number of reads into the L2 cache"/>
+    <event counter="ARM_Mali-T60x_L2_WRITE_LOOKUP" title="Mali L2 Cache" name="L2 write lookups" description="Number of writes into the L2 cache"/>
+
+  </category>
diff --git a/tools/gator/daemon/events-Mali-T62x_hw.xml b/tools/gator/daemon/events-Mali-T62x_hw.xml
new file mode 100644 (file)
index 0000000..6ecc53c
--- /dev/null
@@ -0,0 +1,109 @@
+
+  <category name="Mali Job Manager" per_cpu="no">
+
+    <event counter="ARM_Mali-T62x_GPU_ACTIVE" title="Mali Job Manager Cycles" name="GPU cycles" description="Number of cycles GPU active"/>
+    <event counter="ARM_Mali-T62x_IRQ_ACTIVE" title="Mali Job Manager Cycles" name="IRQ cycles" description="Number of cycles GPU interrupt pending"/>
+    <event counter="ARM_Mali-T62x_JS0_ACTIVE" title="Mali Job Manager Cycles" name="JS0 cycles" description="Number of cycles JS0 (fragment) active"/>
+    <event counter="ARM_Mali-T62x_JS1_ACTIVE" title="Mali Job Manager Cycles" name="JS1 cycles" description="Number of cycles JS1 (vertex/tiler/compute) active"/>
+    <event counter="ARM_Mali-T62x_JS2_ACTIVE" title="Mali Job Manager Cycles" name="JS2 cycles" description="Number of cycles JS2 (vertex/compute) active"/>
+
+    <event counter="ARM_Mali-T62x_JS0_JOBS" title="Mali Job Manager Work" name="JS0 jobs" description="Number of Jobs (fragment) completed in JS0"/>
+    <event counter="ARM_Mali-T62x_JS0_TASKS" title="Mali Job Manager Work" name="JS0 tasks" description="Number of Tasks completed in JS0"/>
+    <event counter="ARM_Mali-T62x_JS1_JOBS" title="Mali Job Manager Work" name="JS1 jobs" description="Number of Jobs (vertex/tiler/compute) completed in JS1"/>
+    <event counter="ARM_Mali-T62x_JS1_TASKS" title="Mali Job Manager Work" name="JS1 tasks" description="Number of Tasks completed in JS1"/>
+    <event counter="ARM_Mali-T62x_JS2_TASKS" title="Mali Job Manager Work" name="JS2 tasks" description="Number of Tasks completed in JS2"/>
+    <event counter="ARM_Mali-T62x_JS2_JOBS" title="Mali Job Manager Work" name="JS2 jobs" description="Number of Jobs (vertex/compute) completed in JS2"/>
+
+  </category>
+
+  <category name="Mali Tiler" per_cpu="no">
+
+    <event counter="ARM_Mali-T62x_TI_ACTIVE" title="Mali Tiler Cycles" name="Tiler cycles" description="Number of cycles Tiler active"/>
+
+    <event counter="ARM_Mali-T62x_TI_POLYGONS" title="Mali Tiler Primitives" name="Polygons" description="Number of polygons processed"/>
+    <event counter="ARM_Mali-T62x_TI_QUADS" title="Mali Tiler Primitives" name="Quads" description="Number of quads processed"/>
+    <event counter="ARM_Mali-T62x_TI_TRIANGLES" title="Mali Tiler Primitives" name="Triangles" description="Number of triangles processed"/>
+    <event counter="ARM_Mali-T62x_TI_LINES" title="Mali Tiler Primitives" name="Lines" description="Number of lines processed"/>
+    <event counter="ARM_Mali-T62x_TI_POINTS" title="Mali Tiler Primitives" name="Points" description="Number of points processed"/>
+
+    <event counter="ARM_Mali-T62x_TI_FRONT_FACING" title="Mali Tiler Culling" name="Front facing prims" description="Number of front facing primitives"/>
+    <event counter="ARM_Mali-T62x_TI_BACK_FACING" title="Mali Tiler Culling" name="Back facing prims" description="Number of back facing primitives"/>
+    <event counter="ARM_Mali-T62x_TI_PRIM_VISIBLE" title="Mali Tiler Culling" name="Visible prims" description="Number of visible primitives"/>
+    <event counter="ARM_Mali-T62x_TI_PRIM_CULLED" title="Mali Tiler Culling" name="Culled prims" description="Number of culled primitives"/>
+    <event counter="ARM_Mali-T62x_TI_PRIM_CLIPPED" title="Mali Tiler Culling" name="Clipped prims" description="Number of clipped primitives"/>
+
+    <event counter="ARM_Mali-T62x_TI_LEVEL0" title="Mali Tiler Hierarchy" name="L0 prims" description="Number of primitives in hierarchy level 0"/>
+    <event counter="ARM_Mali-T62x_TI_LEVEL1" title="Mali Tiler Hierarchy" name="L1 prims" description="Number of primitives in hierarchy level 1"/>
+    <event counter="ARM_Mali-T62x_TI_LEVEL2" title="Mali Tiler Hierarchy" name="L2 prims" description="Number of primitives in hierarchy level 2"/>
+    <event counter="ARM_Mali-T62x_TI_LEVEL3" title="Mali Tiler Hierarchy" name="L3 prims" description="Number of primitives in hierarchy level 3"/>
+    <event counter="ARM_Mali-T62x_TI_LEVEL4" title="Mali Tiler Hierarchy" name="L4 prims" description="Number of primitives in hierarchy level 4"/>
+    <event counter="ARM_Mali-T62x_TI_LEVEL5" title="Mali Tiler Hierarchy" name="L5 prims" description="Number of primitives in hierarchy level 5"/>
+    <event counter="ARM_Mali-T62x_TI_LEVEL6" title="Mali Tiler Hierarchy" name="L6 prims" description="Number of primitives in hierarchy level 6"/>
+    <event counter="ARM_Mali-T62x_TI_LEVEL7" title="Mali Tiler Hierarchy" name="L7 prims" description="Number of primitives in hierarchy level 7"/>
+
+  </category>
+
+  <category name="Mali Shader Core" per_cpu="no">
+
+    <event counter="ARM_Mali-T62x_TRIPIPE_ACTIVE" title="Mali Core Cycles" name="Tripipe cycles" description="Number of cycles tripipe was active"/>
+    <event counter="ARM_Mali-T62x_FRAG_ACTIVE" title="Mali Core Cycles" name="Fragment cycles" description="Number of cycles fragment processing was active"/>
+    <event counter="ARM_Mali-T62x_COMPUTE_ACTIVE" title="Mali Core Cycles" name="Compute cycles" description="Number of cycles vertex\compute processing was active"/>
+    <event counter="ARM_Mali-T62x_FRAG_CYCLES_NO_TILE" title="Mali Core Cycles" name="Fragment cycles waiting for tile" description="Number of cycles spent waiting for a physical tile buffer"/>
+    <event counter="ARM_Mali-T62x_FRAG_CYCLES_FPKQ_ACTIVE" title="Mali Core Cycles" name="Fragment cycles pre-pipe buffer not empty" description="Number of cycles the pre-pipe queue contains quads"/>
+
+    <event counter="ARM_Mali-T62x_FRAG_THREADS" title="Mali Fragment Threads" name="Fragment threads" description="Number of fragment threads started"/>
+    <event counter="ARM_Mali-T62x_FRAG_DUMMY_THREADS" title="Mali Fragment Threads" name="Dummy fragment threads" description="Number of dummy fragment threads started"/>
+    <event counter="ARM_Mali-T62x_FRAG_THREADS_LZS_TEST" title="Mali Fragment Threads" name="Fragment threads doing late ZS" description="Number of threads doing late ZS test"/>
+    <event counter="ARM_Mali-T62x_FRAG_THREADS_LZS_KILLED" title="Mali Fragment Threads" name="Fragment threads killed late ZS" description="Number of threads killed by late ZS test"/>
+
+    <event counter="ARM_Mali-T62x_COMPUTE_TASKS" title="Mali Compute Tasks" name="Compute tasks" description="Number of compute tasks"/>
+    <event counter="ARM_Mali-T62x_COMPUTE_THREADS" title="Mali Compute Threads" name="Compute threads" description="Number of compute threads started"/>
+
+    <event counter="ARM_Mali-T62x_FRAG_PRIMITIVES" title="Mali Fragment Primitives" name="Primitives loaded" description="Number of primitives loaded from tiler"/>
+    <event counter="ARM_Mali-T62x_FRAG_PRIMITIVES_DROPPED" title="Mali Fragment Primitives" name="Primitives dropped" description="Number of primitives dropped because out of tile"/>
+
+    <event counter="ARM_Mali-T62x_FRAG_QUADS_RAST" title="Mali Fragment Quads" name="Quads rasterized" description="Number of quads rasterized"/>
+    <event counter="ARM_Mali-T62x_FRAG_QUADS_EZS_TEST" title="Mali Fragment Quads" name="Quads doing early ZS" description="Number of quads doing early ZS test"/>
+    <event counter="ARM_Mali-T62x_FRAG_QUADS_EZS_KILLED" title="Mali Fragment Quads" name="Quads killed early Z" description="Number of quads killed by early ZS test"/>
+
+    <event counter="ARM_Mali-T62x_FRAG_NUM_TILES" title="Mali Fragment Tasks" name="Tiles rendered" description="Number of tiles rendered"/>
+    <event counter="ARM_Mali-T62x_FRAG_TRANS_ELIM" title="Mali Fragment Tasks" name="Tile writes killed by TE" description="Number of tile writes skipped by transaction elimination"/>
+
+    <event counter="ARM_Mali-T62x_ARITH_WORDS" title="Mali Arithmetic Pipe" name="A instructions" description="Number of instructions completed by the the A-pipe (normalized per pipeline)"/>
+
+    <event counter="ARM_Mali-T62x_LS_WORDS" title="Mali Load/Store Pipe" name="LS instructions" description="Number of instructions completed by the LS-pipe"/>
+    <event counter="ARM_Mali-T62x_LS_ISSUES" title="Mali Load/Store Pipe" name="LS instruction issues" description="Number of instructions issued to the LS-pipe, including restarts"/>
+
+    <event counter="ARM_Mali-T62x_TEX_WORDS" title="Mali Texture Pipe" name="T instructions" description="Number of instructions completed by the T-pipe"/>
+    <event counter="ARM_Mali-T62x_TEX_ISSUES" title="Mali Texture Pipe" name="T instruction issues" description="Number of threads through loop 2 address calculation"/>
+    <event counter="ARM_Mali-T62x_TEX_RECIRC_FMISS" title="Mali Texture Pipe" name="Cache misses" description="Number of instructions in the T-pipe, recirculated due to cache miss"/>
+
+    <event counter="ARM_Mali-T62x_LSC_READ_HITS" title="Mali Load/Store Cache" name="Read hits" description="Number of read hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T62x_LSC_READ_MISSES" title="Mali Load/Store Cache" name="Read misses" description="Number of read misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T62x_LSC_WRITE_HITS" title="Mali Load/Store Cache" name="Write hits" description="Number of write hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T62x_LSC_WRITE_MISSES" title="Mali Load/Store Cache" name="Write misses" description="Number of write misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T62x_LSC_ATOMIC_HITS" title="Mali Load/Store Cache" name="Atomic hits" description="Number of atomic hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T62x_LSC_ATOMIC_MISSES" title="Mali Load/Store Cache" name="Atomic misses" description="Number of atomic misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T62x_LSC_LINE_FETCHES" title="Mali Load/Store Cache" name="Line fetches" description="Number of line fetches in the Load/Store cache"/>
+    <event counter="ARM_Mali-T62x_LSC_DIRTY_LINE" title="Mali Load/Store Cache" name="Dirty line evictions" description="Number of dirty line evictions in the Load/Store cache"/>
+    <event counter="ARM_Mali-T62x_LSC_SNOOPS" title="Mali Load/Store Cache" name="Snoops in to LSC" description="Number of coherent memory snoops in to the Load/Store cache"/>
+
+  </category>
+
+  <category name="Mali L2 Cache" per_cpu="no">
+
+    <event counter="ARM_Mali-T62x_L2_EXT_WRITE_BEATS" title="Mali L2 Cache" name="External write beats" description="Number of external bus write beats"/>
+    <event counter="ARM_Mali-T62x_L2_EXT_READ_BEATS" title="Mali L2 Cache" name="External read beats" description="Number of external bus read beats"/>
+    <event counter="ARM_Mali-T62x_L2_READ_SNOOP" title="Mali L2 Cache" name="Read snoops" description="Number of read transaction snoops"/>
+    <event counter="ARM_Mali-T62x_L2_READ_HIT" title="Mali L2 Cache" name="L2 read hits" description="Number of reads hitting in the L2 cache"/>
+    <event counter="ARM_Mali-T62x_L2_WRITE_SNOOP" title="Mali L2 Cache" name="Write snoops" description="Number of write transaction snoops"/>
+    <event counter="ARM_Mali-T62x_L2_WRITE_HIT" title="Mali L2 Cache" name="L2 write hits" description="Number of writes hitting in the L2 cache"/>
+    <event counter="ARM_Mali-T62x_L2_EXT_AR_STALL" title="Mali L2 Cache" name="External bus stalls (AR)" description="Number of cycles a valid read address (AR) is stalled by the external interconnect"/>
+    <event counter="ARM_Mali-T62x_L2_EXT_W_STALL" title="Mali L2 Cache" name="External bus stalls (W)" description="Number of cycles a valid write data (W channel) is stalled by the external interconnect"/>
+    <event counter="ARM_Mali-T62x_L2_EXT_R_BUF_FULL" title="Mali L2 Cache" name="External bus response buffer full" description="Number of cycles a valid request is blocked by a full response buffer"/>
+    <event counter="ARM_Mali-T62x_L2_EXT_RD_BUF_FULL" title="Mali L2 Cache" name="External bus read data buffer full" description="Number of cycles a valid request is blocked by a full read data buffer"/>
+    <event counter="ARM_Mali-T62x_L2_EXT_W_BUF_FULL" title="Mali L2 Cache" name="External bus write buffer full" description="Number of cycles a valid request is blocked by a full write buffer"/>
+    <event counter="ARM_Mali-T62x_L2_READ_LOOKUP" title="Mali L2 Cache" name="L2 read lookups" description="Number of reads into the L2 cache"/>
+    <event counter="ARM_Mali-T62x_L2_WRITE_LOOKUP" title="Mali L2 Cache" name="L2 write lookups" description="Number of writes into the L2 cache"/>
+
+  </category>
diff --git a/tools/gator/daemon/events-Mali-T6xx.xml b/tools/gator/daemon/events-Mali-T6xx.xml
deleted file mode 100644 (file)
index ec9ca00..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-
-  <category name="Mali-T6xx-SW-counters" per_cpu="no">
-    <event counter="ARM_Mali-T6xx_TOTAL_ALLOC_PAGES" title="Mali Total Alloc Pages" name="Total number of allocated pages" description="Mali total number of allocated pages."/>
-  </category>
-
-  <category name="Mali-T6xx-PMShader" per_cpu="no">
-    <event counter="ARM_Mali-T6xx_PM_SHADER_0" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 0" description="Mali PM Shader: PM Shader Core 0."/>
-    <event counter="ARM_Mali-T6xx_PM_SHADER_1" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 1" description="Mali PM Shader: PM Shader Core 1."/>
-    <event counter="ARM_Mali-T6xx_PM_SHADER_2" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 2" description="Mali PM Shader: PM Shader Core 2."/>
-    <event counter="ARM_Mali-T6xx_PM_SHADER_3" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 3" description="Mali PM Shader: PM Shader Core 3."/>
-    <event counter="ARM_Mali-T6xx_PM_SHADER_4" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 4" description="Mali PM Shader: PM Shader Core 4."/>
-    <event counter="ARM_Mali-T6xx_PM_SHADER_5" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 5" description="Mali PM Shader: PM Shader Core 5."/>
-    <event counter="ARM_Mali-T6xx_PM_SHADER_6" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 6" description="Mali PM Shader: PM Shader Core 6."/>
-    <event counter="ARM_Mali-T6xx_PM_SHADER_7" class="absolute" display="average" average_selection="yes" percentage="yes" title="Mali PM Shader" name="PM Shader Core 7" description="Mali PM Shader: PM Shader Core 7."/>
-  </category>
-
-  <category name="Mali-T6xx-PMTiler" per_cpu="no">
-    <event counter="ARM_Mali-T6xx_PM_TILER_0" display="average" average_selection="yes" percentage="yes" title="Mali PM Tiler" name="PM Tiler Core 0" description="Mali PM Tiler: PM Tiler Core 0."/>
-  </category>
-
-  <category name="Mali-T6xx-PML2" per_cpu="no">
-    <event counter="ARM_Mali-T6xx_PM_L2_0" display="average" average_selection="yes" percentage="yes" title="Mali PM L2" name="PM L2 Core 0" description="Mali PM L2: PM L2 Core 0."/>
-    <event counter="ARM_Mali-T6xx_PM_L2_1" display="average" average_selection="yes" percentage="yes" title="Mali PM L2" name="PM L2 Core 1" description="Mali PM L2: PM L2 Core 1."/>
-  </category>
-
-  <category name="Mali-T6xx-MMU_AS" per_cpu="no">
-    <event counter="ARM_Mali-T6xx_MMU_AS_0" display="average" average_selection="yes" percentage="yes" title="Mali MMU Address Space" name="MMU Address Space 0" description="Mali MMU Address Space 0 usage."/>
-    <event counter="ARM_Mali-T6xx_MMU_AS_1" display="average" average_selection="yes" percentage="yes" title="Mali MMU Address Space" name="MMU Address Space 1" description="Mali MMU Address Space 1 usage."/>
-    <event counter="ARM_Mali-T6xx_MMU_AS_2" display="average" average_selection="yes" percentage="yes" title="Mali MMU Address Space" name="MMU Address Space 2" description="Mali MMU Address Space 2 usage."/>
-    <event counter="ARM_Mali-T6xx_MMU_AS_3" display="average" average_selection="yes" percentage="yes" title="Mali MMU Address Space" name="MMU Address Space 3" description="Mali MMU Address Space 3 usage."/>
-  </category>
-
-  <category name="Mali-T6xx-MMU_page_fault" per_cpu="no">
-    <event counter="ARM_Mali-T6xx_MMU_PAGE_FAULT_0" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 0" description="Reports the number of newly allocated pages after a MMU page fault in address space 0."/>
-    <event counter="ARM_Mali-T6xx_MMU_PAGE_FAULT_1" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 1" description="Reports the number of newly allocated pages after a MMU page fault in address space 1."/>
-    <event counter="ARM_Mali-T6xx_MMU_PAGE_FAULT_2" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 2" description="Reports the number of newly allocated pages after a MMU page fault in address space 2."/>
-    <event counter="ARM_Mali-T6xx_MMU_PAGE_FAULT_3" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 3" description="Reports the number of newly allocated pages after a MMU page fault in address space 3."/>
-  </category>
-
-  <counter_set name="ARM_Mali-T6xx_Filmstrip_cnt" count="1"/>
-  <category name="ARM Mali-T6xx Filmstrip" counter_set="ARM_Mali-T6xx_Filmstrip_cnt" per_cpu="no">
-    <option_set name="fs">
-      <option event_delta="0x3c" name="1:60" description="captures every 60th frame"/>
-      <option event_delta="0x1e" name="1:30" description="captures every 30th frame"/>
-      <option event_delta="0xa" name="1:10" description="captures every 10th frame"/>
-    </option_set>
-    <event event="0x0400" option_set="fs" title="ARM Mali-T6xx" name="Filmstrip" description="Scaled framebuffer"/>
-  </category>
diff --git a/tools/gator/daemon/events-Mali-T6xx_hw.xml b/tools/gator/daemon/events-Mali-T6xx_hw.xml
deleted file mode 100644 (file)
index 03566cb..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-
-  <category name="Mali-T6xx-JobManager" per_cpu="no">
-
-    <event counter="ARM_Mali-T6xx_GPU_ACTIVE" title="Mali Job Manager Cycles" name="GPU cycles" description="Number of cycles the GPU was active"/>
-    <event counter="ARM_Mali-T6xx_IRQ_ACTIVE" title="Mali Job Manager Cycles" name="IRQ cycles" description="Number of cycles the GPU had a pending interrupt"/>
-    <event counter="ARM_Mali-T6xx_JS0_ACTIVE" title="Mali Job Manager Cycles" name="JS0 cycles" description="Number of cycles JS0 (fragment) was active"/>
-    <event counter="ARM_Mali-T6xx_JS1_ACTIVE" title="Mali Job Manager Cycles" name="JS1 cycles" description="Number of cycles JS1 (vertex/tiler/compute) was active"/>
-    <event counter="ARM_Mali-T6xx_JS2_ACTIVE" title="Mali Job Manager Cycles" name="JS2 cycles" description="Number of cycles JS2 (vertex/compute) was active"/>
-
-    <event counter="ARM_Mali-T6xx_JS0_JOBS" title="Mali Job Manager Work" name="JS0 jobs" description="Number of Jobs (fragment) completed in JS0"/>
-    <event counter="ARM_Mali-T6xx_JS0_TASKS" title="Mali Job Manager Work" name="JS0 tasks" description="Number of Tasks completed in JS0"/>
-    <event counter="ARM_Mali-T6xx_JS1_JOBS" title="Mali Job Manager Work" name="JS1 jobs" description="Number of Jobs (vertex/tiler/compute) completed in JS1"/>
-    <event counter="ARM_Mali-T6xx_JS1_TASKS" title="Mali Job Manager Work" name="JS1 tasks" description="Number of Tasks completed in JS1"/>
-    <event counter="ARM_Mali-T6xx_JS2_TASKS" title="Mali Job Manager Work" name="JS2 tasks" description="Number of Tasks completed in JS2"/>
-    <event counter="ARM_Mali-T6xx_JS2_JOBS" title="Mali Job Manager Work" name="JS2 jobs" description="Number of Jobs (vertex/compute) completed in JS2"/>
-
-  </category>
-
-  <category name="Mali-T6xx-Tiler" per_cpu="no">
-
-    <event counter="ARM_Mali-T6xx_POLYGONS" title="Mali Tiler Primitives" name="Polygons" description="Number of polygons processed"/>
-    <event counter="ARM_Mali-T6xx_QUADS" title="Mali Tiler Primitives" name="Quads" description="Number of quads processed"/>
-    <event counter="ARM_Mali-T6xx_TRIANGLES" title="Mali Tiler Primitives" name="Triangles" description="Number of triangles processed"/>
-    <event counter="ARM_Mali-T6xx_LINES" title="Mali Tiler Primitives" name="Lines" description="Number of lines processed"/>
-    <event counter="ARM_Mali-T6xx_POINTS" title="Mali Tiler Primitives" name="Points" description="Number of points processed"/>
-
-    <event counter="ARM_Mali-T6xx_FRONT_FACING" title="Mali Tiler Culling" name="Front facing prims" description="Number of front facing primitives"/>
-    <event counter="ARM_Mali-T6xx_BACK_FACING" title="Mali Tiler Culling" name="Back facing prims" description="Number of back facing primitives"/>
-    <event counter="ARM_Mali-T6xx_PRIM_VISIBLE" title="Mali Tiler Culling" name="Visible prims" description="Number of visible primitives"/>
-    <event counter="ARM_Mali-T6xx_PRIM_CULLED" title="Mali Tiler Culling" name="Culled prims" description="Number of culled primitives"/>
-    <event counter="ARM_Mali-T6xx_PRIM_CLIPPED" title="Mali Tiler Culling" name="Clipped prims" description="Number of clipped primitives"/>
-
-    <event counter="ARM_Mali-T6xx_LEVEL0" title="Mali Tiler Hierarchy" name="L0 prims" description="Number of primitives in hierarchy level 0"/>
-    <event counter="ARM_Mali-T6xx_LEVEL1" title="Mali Tiler Hierarchy" name="L1 prims" description="Number of primitives in hierarchy level 1"/>
-    <event counter="ARM_Mali-T6xx_LEVEL2" title="Mali Tiler Hierarchy" name="L2 prims" description="Number of primitives in hierarchy level 2"/>
-    <event counter="ARM_Mali-T6xx_LEVEL3" title="Mali Tiler Hierarchy" name="L3 prims" description="Number of primitives in hierarchy level 3"/>
-    <event counter="ARM_Mali-T6xx_LEVEL4" title="Mali Tiler Hierarchy" name="L4 prims" description="Number of primitives in hierarchy level 4"/>
-    <event counter="ARM_Mali-T6xx_LEVEL5" title="Mali Tiler Hierarchy" name="L5 prims" description="Number of primitives in hierarchy level 5"/>
-    <event counter="ARM_Mali-T6xx_LEVEL6" title="Mali Tiler Hierarchy" name="L6 prims" description="Number of primitives in hierarchy level 6"/>
-    <event counter="ARM_Mali-T6xx_LEVEL7" title="Mali Tiler Hierarchy" name="L7 prims" description="Number of primitives in hierarchy level 7"/>
-
-    <event counter="ARM_Mali-T6xx_COMMAND_1" title="Mali Tiler Commands" name="Prims in 1 command" description="Number of primitives producing 1 command"/>
-    <event counter="ARM_Mali-T6xx_COMMAND_2" title="Mali Tiler Commands" name="Prims in 2 command" description="Number of primitives producing 2 commands"/>
-    <event counter="ARM_Mali-T6xx_COMMAND_3" title="Mali Tiler Commands" name="Prims in 3 command" description="Number of primitives producing 3 commands"/>
-    <event counter="ARM_Mali-T6xx_COMMAND_4" title="Mali Tiler Commands" name="Prims in 4 command" description="Number of primitives producing 4 commands"/>
-    <event counter="ARM_Mali-T6xx_COMMAND_4_7" title="Mali Tiler Commands" name="Prims in 4-7 commands" description="Number of primitives producing 4-7 commands"/>
-    <event counter="ARM_Mali-T6xx_COMMAND_5_7" title="Mali Tiler Commands" name="Prims in 5-7 commands" description="Number of primitives producing 5-7 commands"/>
-    <event counter="ARM_Mali-T6xx_COMMAND_8_15" title="Mali Tiler Commands" name="Prims in 8-15 commands" description="Number of primitives producing 8-15 commands"/>
-    <event counter="ARM_Mali-T6xx_COMMAND_16_63" title="Mali Tiler Commands" name="Prims in 16-63 commands" description="Number of primitives producing 16-63 commands"/>
-    <event counter="ARM_Mali-T6xx_COMMAND_64" title="Mali Tiler Commands" name="Prims in &gt;= 64 commands" description="Number of primitives producing &gt;= 64 commands"/>
-
-  </category>
-
-  <category name="Mali-T6xx-ShaderCore" per_cpu="no">
-
-    <event counter="ARM_Mali-T6xx_TRIPIPE_ACTIVE" title="Mali Core Cycles" name="Tripipe cycles" description="Number of cycles the Tripipe was active"/>
-    <event counter="ARM_Mali-T6xx_FRAG_ACTIVE" title="Mali Core Cycles" name="Fragment cycles" description="Number of cycles fragment processing was active"/>
-    <event counter="ARM_Mali-T6xx_COMPUTE_ACTIVE" title="Mali Core Cycles" name="Compute cycles" description="Number of cycles vertex\compute processing was active"/>
-    <event counter="ARM_Mali-T6xx_FRAG_CYCLE_NO_TILE" title="Mali Core Cycles" name="Fragment cycles waiting for tile" description="Number of cycles spent waiting for a physical tile buffer"/>
-
-    <event counter="ARM_Mali-T6xx_FRAG_THREADS" title="Mali Core Threads" name="Fragment threads" description="Number of fragment threads started"/>
-    <event counter="ARM_Mali-T6xx_FRAG_DUMMY_THREADS" title="Mali Core Threads" name="Dummy fragment threads" description="Number of dummy fragment threads started"/>
-    <event counter="ARM_Mali-T6xx_FRAG_QUADS_LZS_TEST" title="Mali Core Threads" name="Frag threads doing late ZS" description="Number of threads doing late ZS test"/>
-    <event counter="ARM_Mali-T6xx_FRAG_QUADS_LZS_KILLED" title="Mali Core Threads" name="Frag threads killed late ZS" description="Number of threads killed by late ZS test"/>
-    <event counter="ARM_Mali-T6xx_FRAG_THREADS_LZS_TEST" title="Mali Core Threads" name="Frag threads doing late ZS" description="Number of threads doing late ZS test"/>
-    <event counter="ARM_Mali-T6xx_FRAG_THREADS_LZS_KILLED" title="Mali Core Threads" name="Frag threads killed late ZS" description="Number of threads killed by late ZS test"/>
-
-    <event counter="ARM_Mali-T6xx_COMPUTE_TASKS" title="Mali Compute Threads" name="Compute tasks" description="Number of compute tasks"/>
-    <event counter="ARM_Mali-T6xx_COMPUTE_THREADS" title="Mali Compute Threads" name="Compute threads started" description="Number of compute threads started"/>
-    <event counter="ARM_Mali-T6xx_COMPUTE_CYCLES_DESC" title="Mali Compute Threads" name="Compute cycles awaiting descriptors" description="Number of compute cycles spent waiting for descriptors"/>
-
-    <event counter="ARM_Mali-T6xx_FRAG_PRIMATIVES" title="Mali Fragment Primitives" name="Primitives loaded" description="Number of primitives loaded from tiler"/>
-    <event counter="ARM_Mali-T6xx_FRAG_PRIMATIVES_DROPPED" title="Mali Fragment Primitives" name="Primitives dropped" description="Number of primitives dropped because out of tile"/>
-    <event counter="ARM_Mali-T6xx_FRAG_PRIMITIVES" title="Mali Fragment Primitives" name="Primitives loaded" description="Number of primitives loaded from tiler"/>
-    <event counter="ARM_Mali-T6xx_FRAG_PRIMITIVES_DROPPED" title="Mali Fragment Primitives" name="Primitives dropped" description="Number of primitives dropped because out of tile"/>
-
-    <event counter="ARM_Mali-T6xx_FRAG_QUADS_RAST" title="Mali Fragment Quads" name="Quads rasterized" description="Number of quads rasterized"/>
-    <event counter="ARM_Mali-T6xx_FRAG_QUADS_EZS_TEST" title="Mali Fragment Quads" name="Quads doing early ZS" description="Number of quads doing early ZS test"/>
-    <event counter="ARM_Mali-T6xx_FRAG_QUADS_EZS_KILLED" title="Mali Fragment Quads" name="Quads killed early Z" description="Number of quads killed by early ZS test"/>
-
-    <event counter="ARM_Mali-T6xx_FRAG_NUM_TILES" title="Mali Fragment Tasks" name="Tiles rendered" description="Number of tiles rendered"/>
-    <event counter="ARM_Mali-T6xx_FRAG_TRANS_ELIM" title="Mali Fragment Tasks" name="Tile writes killed by TE" description="Number of tile writes skipped by transaction elimination"/>
-
-    <event counter="ARM_Mali-T6xx_ARITH_WORDS" title="Mali Arithmetic Pipe" name="A instructions" description="Number of instructions completed by the the A-pipe (normalized per pipeline)"/>
-
-    <event counter="ARM_Mali-T6xx_LS_WORDS" title="Mali Load/Store Pipe" name="LS instructions" description="Number of instructions completed by the LS-pipe"/>
-    <event counter="ARM_Mali-T6xx_LS_ISSUES" title="Mali Load/Store Pipe" name="LS instruction issues" description="Number of instructions issued to the LS-pipe, including restarts"/>
-
-    <event counter="ARM_Mali-T6xx_TEX_WORDS" title="Mali Texture Pipe" name="T instructions" description="Number of instructions completed by the T-pipe"/>
-    <event counter="ARM_Mali-T6xx_TEX_THREADS" title="Mali Texture Pipe" name="T instruction issues" description="Number of instructions issused to the T-pipe, including restarts"/>
-    <event counter="ARM_Mali-T6xx_TEX_RECIRC_FMISS" title="Mali Texture Pipe" name="Cache misses" description="Number of instructions in the T-pipe, recirculated due to cache miss"/>
-
-    <event counter="ARM_Mali-T6xx_LSC_READ_HITS" title="Mali Load/Store Cache" name="Read hits" description="Number of read hits in the Load/Store cache"/>
-    <event counter="ARM_Mali-T6xx_LSC_READ_MISSES" title="Mali Load/Store Cache" name="Read misses" description="Number of read misses in the Load/Store cache"/>
-    <event counter="ARM_Mali-T6xx_LSC_WRITE_HITS" title="Mali Load/Store Cache" name="Write hits" description="Number of write hits in the Load/Store cache"/>
-    <event counter="ARM_Mali-T6xx_LSC_WRITE_MISSES" title="Mali Load/Store Cache" name="Write misses" description="Number of write misses in the Load/Store cache"/>
-    <event counter="ARM_Mali-T6xx_LSC_ATOMIC_HITS" title="Mali Load/Store Cache" name="Atomic hits" description="Number of atomic hits in the Load/Store cache"/>
-    <event counter="ARM_Mali-T6xx_LSC_ATOMIC_MISSES" title="Mali Load/Store Cache" name="Atomic misses" description="Number of atomic misses in the Load/Store cache"/>
-    <event counter="ARM_Mali-T6xx_LSC_LINE_FETCHES" title="Mali Load/Store Cache" name="Line fetches" description="Number of line fetches in the Load/Store cache"/>
-    <event counter="ARM_Mali-T6xx_LSC_DIRTY_LINE" title="Mali Load/Store Cache" name="Dirty line evictions" description="Number of dirty line evictions in the Load/Store cache"/>
-    <event counter="ARM_Mali-T6xx_LSC_SNOOPS" title="Mali Load/Store Cache" name="Snoops in to LSC" description="Number of coherent memory snoops in to the Load/Store cache"/>
-
-  </category>
-
-  <category name="Mali-T6xx-L2AndMMU" per_cpu="no">
-
-    <event counter="ARM_Mali-T6xx_L2_WRITE_BEATS" title="Mali L2 Cache" name="External write beats" description="Number of external bus write beats"/>
-    <event counter="ARM_Mali-T6xx_L2_READ_BEATS" title="Mali L2 Cache" name="External read beats" description="Number of external bus read beats"/>
-    <event counter="ARM_Mali-T6xx_L2_READ_SNOOP" title="Mali L2 Cache" name="Read snoops" description="Number of read transaction snoops"/>
-    <event counter="ARM_Mali-T6xx_L2_READ_HIT" title="Mali L2 Cache" name="L2 read hits" description="Number of reads hitting in the L2 cache"/>
-    <event counter="ARM_Mali-T6xx_L2_WRITE_SNOOP" title="Mali L2 Cache" name="Write snoops" description="Number of write transaction snoops"/>
-    <event counter="ARM_Mali-T6xx_L2_WRITE_HIT" title="Mali L2 Cache" name="L2 write hits" description="Number of writes hitting in the L2 cache"/>
-    <event counter="ARM_Mali-T6xx_L2_EXT_AR_STALL" title="Mali L2 Cache" name="External bus stalls (AR)" description="Number of cycles a valid read address (AR) is stalled by the external interconnect"/>
-    <event counter="ARM_Mali-T6xx_L2_EXT_W_STALL" title="Mali L2 Cache" name="External bus stalls (W)" description="Number of cycles a valid write data (W channel) is stalled by the external interconnect"/>
-
-  </category>
diff --git a/tools/gator/daemon/events-Mali-T72x_hw.xml b/tools/gator/daemon/events-Mali-T72x_hw.xml
new file mode 100644 (file)
index 0000000..5587534
--- /dev/null
@@ -0,0 +1,95 @@
+
+  <category name="Mali Job Manager" per_cpu="no">
+
+    <event counter="ARM_Mali-T72x_GPU_ACTIVE" title="Mali Job Manager Cycles" name="GPU cycles" description="Number of cycles GPU active"/>
+    <event counter="ARM_Mali-T72x_IRQ_ACTIVE" title="Mali Job Manager Cycles" name="IRQ cycles" description="Number of cycles GPU interrupt pending"/>
+    <event counter="ARM_Mali-T72x_JS0_ACTIVE" title="Mali Job Manager Cycles" name="JS0 cycles" description="Number of cycles JS0 (fragment) active"/>
+    <event counter="ARM_Mali-T72x_JS1_ACTIVE" title="Mali Job Manager Cycles" name="JS1 cycles" description="Number of cycles JS1 (vertex/tiler/compute) active"/>
+    <event counter="ARM_Mali-T72x_JS2_ACTIVE" title="Mali Job Manager Cycles" name="JS2 cycles" description="Number of cycles JS2 (vertex/compute) active"/>
+
+    <event counter="ARM_Mali-T72x_JS0_JOBS" title="Mali Job Manager Work" name="JS0 jobs" description="Number of Jobs (fragment) completed in JS0"/>
+    <event counter="ARM_Mali-T72x_JS0_TASKS" title="Mali Job Manager Work" name="JS0 tasks" description="Number of Tasks completed in JS0"/>
+    <event counter="ARM_Mali-T72x_JS1_JOBS" title="Mali Job Manager Work" name="JS1 jobs" description="Number of Jobs (vertex/tiler/compute) completed in JS1"/>
+    <event counter="ARM_Mali-T72x_JS1_TASKS" title="Mali Job Manager Work" name="JS1 tasks" description="Number of Tasks completed in JS1"/>
+    <event counter="ARM_Mali-T72x_JS2_TASKS" title="Mali Job Manager Work" name="JS2 tasks" description="Number of Tasks completed in JS2"/>
+    <event counter="ARM_Mali-T72x_JS2_JOBS" title="Mali Job Manager Work" name="JS2 jobs" description="Number of Jobs (vertex/compute) completed in JS2"/>
+
+  </category>
+
+  <category name="Mali Tiler" per_cpu="no">
+
+    <event counter="ARM_Mali-T72x_TI_ACTIVE" title="Mali Tiler Cycles" name="Tiler cycles" description="Number of cycles Tiler active"/>
+
+    <event counter="ARM_Mali-T72x_TI_POLYGONS" title="Mali Tiler Primitives" name="Polygons" description="Number of polygons processed"/>
+    <event counter="ARM_Mali-T72x_TI_QUADS" title="Mali Tiler Primitives" name="Quads" description="Number of quads processed"/>
+    <event counter="ARM_Mali-T72x_TI_TRIANGLES" title="Mali Tiler Primitives" name="Triangles" description="Number of triangles processed"/>
+    <event counter="ARM_Mali-T72x_TI_LINES" title="Mali Tiler Primitives" name="Lines" description="Number of lines processed"/>
+    <event counter="ARM_Mali-T72x_TI_POINTS" title="Mali Tiler Primitives" name="Points" description="Number of points processed"/>
+
+    <event counter="ARM_Mali-T72x_TI_FRONT_FACING" title="Mali Tiler Culling" name="Front facing prims" description="Number of front facing primitives"/>
+    <event counter="ARM_Mali-T72x_TI_BACK_FACING" title="Mali Tiler Culling" name="Back facing prims" description="Number of back facing primitives"/>
+    <event counter="ARM_Mali-T72x_TI_PRIM_VISIBLE" title="Mali Tiler Culling" name="Visible prims" description="Number of visible primitives"/>
+    <event counter="ARM_Mali-T72x_TI_PRIM_CULLED" title="Mali Tiler Culling" name="Culled prims" description="Number of culled primitives"/>
+    <event counter="ARM_Mali-T72x_TI_PRIM_CLIPPED" title="Mali Tiler Culling" name="Clipped prims" description="Number of clipped primitives"/>
+
+  </category>
+
+  <category name="Mali Shader Core" per_cpu="no">
+
+    <event counter="ARM_Mali-T72x_TRIPIPE_ACTIVE" title="Mali Core Cycles" name="Tripipe cycles" description="Number of cycles tripipe was active"/>
+    <event counter="ARM_Mali-T72x_FRAG_ACTIVE" title="Mali Core Cycles" name="Fragment cycles" description="Number of cycles fragment processing was active"/>
+    <event counter="ARM_Mali-T72x_COMPUTE_ACTIVE" title="Mali Core Cycles" name="Compute cycles" description="Number of cycles vertex\compute processing was active"/>
+    <event counter="ARM_Mali-T72x_FRAG_CYCLES_NO_TILE" title="Mali Core Cycles" name="Fragment cycles waiting for tile" description="Number of cycles spent waiting for a physical tile buffer"/>
+
+    <event counter="ARM_Mali-T72x_FRAG_THREADS" title="Mali Fragment Threads" name="Fragment threads" description="Number of fragment threads started"/>
+    <event counter="ARM_Mali-T72x_FRAG_DUMMY_THREADS" title="Mali Fragment Threads" name="Dummy fragment threads" description="Number of dummy fragment threads started"/>
+    <event counter="ARM_Mali-T72x_FRAG_THREADS_LZS_TEST" title="Mali Fragment Threads" name="Fragment threads doing late ZS" description="Number of threads doing late ZS test"/>
+    <event counter="ARM_Mali-T72x_FRAG_THREADS_LZS_KILLED" title="Mali Fragment Threads" name="Fragment threads killed late ZS" description="Number of threads killed by late ZS test"/>
+
+    <event counter="ARM_Mali-T72x_COMPUTE_TASKS" title="Mali Compute Tasks" name="Compute tasks" description="Number of compute tasks"/>
+    <event counter="ARM_Mali-T72x_COMPUTE_THREADS" title="Mali Compute Threads" name="Compute threads" description="Number of compute threads started"/>
+
+    <event counter="ARM_Mali-T72x_FRAG_PRIMITIVES" title="Mali Fragment Primitives" name="Primitives loaded" description="Number of primitives loaded from tiler"/>
+    <event counter="ARM_Mali-T72x_FRAG_PRIMITIVES_DROPPED" title="Mali Fragment Primitives" name="Primitives dropped" description="Number of primitives dropped because out of tile"/>
+
+    <event counter="ARM_Mali-T72x_FRAG_QUADS_RAST" title="Mali Fragment Quads" name="Quads rasterized" description="Number of quads rasterized"/>
+    <event counter="ARM_Mali-T72x_FRAG_QUADS_EZS_TEST" title="Mali Fragment Quads" name="Quads doing early ZS" description="Number of quads doing early ZS test"/>
+    <event counter="ARM_Mali-T72x_FRAG_QUADS_EZS_KILLED" title="Mali Fragment Quads" name="Quads killed early Z" description="Number of quads killed by early ZS test"/>
+
+    <event counter="ARM_Mali-T72x_FRAG_NUM_TILES" title="Mali Fragment Tasks" name="Tiles rendered" description="Number of tiles rendered"/>
+    <event counter="ARM_Mali-T72x_FRAG_TRANS_ELIM" title="Mali Fragment Tasks" name="Tile writes killed by TE" description="Number of tile writes skipped by transaction elimination"/>
+
+    <event counter="ARM_Mali-T72x_ARITH_WORDS" title="Mali Arithmetic Pipe" name="A instructions" description="Number of batched instructions executed by the A-pipe"/>
+
+    <event counter="ARM_Mali-T72x_LS_WORDS" title="Mali Load/Store Pipe" name="LS instructions" description="Number of instructions completed by the LS-pipe"/>
+    <event counter="ARM_Mali-T72x_LS_ISSUES" title="Mali Load/Store Pipe" name="LS instruction issues" description="Number of instructions issued to the LS-pipe, including restarts"/>
+
+    <event counter="ARM_Mali-T72x_TEX_WORDS" title="Mali Texture Pipe" name="T instructions" description="Number of instructions completed by the T-pipe"/>
+    <event counter="ARM_Mali-T72x_TEX_ISSUES" title="Mali Texture Pipe" name="T instruction issues" description="Number of threads through loop 2 address calculation"/>
+
+    <event counter="ARM_Mali-T72x_LSC_READ_HITS" title="Mali Load/Store Cache" name="Read hits" description="Number of read hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T72x_LSC_READ_MISSES" title="Mali Load/Store Cache" name="Read misses" description="Number of read misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T72x_LSC_WRITE_HITS" title="Mali Load/Store Cache" name="Write hits" description="Number of write hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T72x_LSC_WRITE_MISSES" title="Mali Load/Store Cache" name="Write misses" description="Number of write misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T72x_LSC_ATOMIC_HITS" title="Mali Load/Store Cache" name="Atomic hits" description="Number of atomic hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T72x_LSC_ATOMIC_MISSES" title="Mali Load/Store Cache" name="Atomic misses" description="Number of atomic misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T72x_LSC_LINE_FETCHES" title="Mali Load/Store Cache" name="Line fetches" description="Number of line fetches in the Load/Store cache"/>
+    <event counter="ARM_Mali-T72x_LSC_DIRTY_LINE" title="Mali Load/Store Cache" name="Dirty line evictions" description="Number of dirty line evictions in the Load/Store cache"/>
+    <event counter="ARM_Mali-T72x_LSC_SNOOPS" title="Mali Load/Store Cache" name="Snoops in to LSC" description="Number of coherent memory snoops in to the Load/Store cache"/>
+
+  </category>
+
+  <category name="Mali L2 Cache" per_cpu="no">
+
+    <event counter="ARM_Mali-T72x_L2_EXT_WRITE_BEATS" title="Mali L2 Cache" name="External write beats" description="Number of external bus write beats"/>
+    <event counter="ARM_Mali-T72x_L2_EXT_READ_BEATS" title="Mali L2 Cache" name="External read beats" description="Number of external bus read beats"/>
+    <event counter="ARM_Mali-T72x_L2_READ_SNOOP" title="Mali L2 Cache" name="Read snoops" description="Number of read transaction snoops"/>
+    <event counter="ARM_Mali-T72x_L2_READ_HIT" title="Mali L2 Cache" name="L2 read hits" description="Number of reads hitting in the L2 cache"/>
+    <event counter="ARM_Mali-T72x_L2_WRITE_SNOOP" title="Mali L2 Cache" name="Write snoops" description="Number of write transaction snoops"/>
+    <event counter="ARM_Mali-T72x_L2_WRITE_HIT" title="Mali L2 Cache" name="L2 write hits" description="Number of writes hitting in the L2 cache"/>
+    <event counter="ARM_Mali-T72x_L2_EXT_AR_STALL" title="Mali L2 Cache" name="External bus stalls (AR)" description="Number of cycles a valid read address (AR) is stalled by the external interconnect"/>
+    <event counter="ARM_Mali-T72x_L2_EXT_W_STALL" title="Mali L2 Cache" name="External bus stalls (W)" description="Number of cycles a valid write data (W channel) is stalled by the external interconnect"/>
+    <event counter="ARM_Mali-T72x_L2_READ_LOOKUP" title="Mali L2 Cache" name="L2 read lookups" description="Number of reads into the L2 cache"/>
+    <event counter="ARM_Mali-T72x_L2_WRITE_LOOKUP" title="Mali L2 Cache" name="L2 write lookups" description="Number of writes into the L2 cache"/>
+
+  </category>
diff --git a/tools/gator/daemon/events-Mali-T76x_hw.xml b/tools/gator/daemon/events-Mali-T76x_hw.xml
new file mode 100644 (file)
index 0000000..be74c5a
--- /dev/null
@@ -0,0 +1,108 @@
+
+  <category name="Mali Job Manager" per_cpu="no">
+
+    <event counter="ARM_Mali-T76x_GPU_ACTIVE" title="Mali Job Manager Cycles" name="GPU cycles" description="Number of cycles GPU active"/>
+    <event counter="ARM_Mali-T76x_IRQ_ACTIVE" title="Mali Job Manager Cycles" name="IRQ cycles" description="Number of cycles GPU interrupt pending"/>
+    <event counter="ARM_Mali-T76x_JS0_ACTIVE" title="Mali Job Manager Cycles" name="JS0 cycles" description="Number of cycles JS0 (fragment) active"/>
+    <event counter="ARM_Mali-T76x_JS1_ACTIVE" title="Mali Job Manager Cycles" name="JS1 cycles" description="Number of cycles JS1 (vertex/tiler/compute) active"/>
+    <event counter="ARM_Mali-T76x_JS2_ACTIVE" title="Mali Job Manager Cycles" name="JS2 cycles" description="Number of cycles JS2 (vertex/compute) active"/>
+
+    <event counter="ARM_Mali-T76x_JS0_JOBS" title="Mali Job Manager Work" name="JS0 jobs" description="Number of Jobs (fragment) completed in JS0"/>
+    <event counter="ARM_Mali-T76x_JS0_TASKS" title="Mali Job Manager Work" name="JS0 tasks" description="Number of Tasks completed in JS0"/>
+    <event counter="ARM_Mali-T76x_JS1_JOBS" title="Mali Job Manager Work" name="JS1 jobs" description="Number of Jobs (vertex/tiler/compute) completed in JS1"/>
+    <event counter="ARM_Mali-T76x_JS1_TASKS" title="Mali Job Manager Work" name="JS1 tasks" description="Number of Tasks completed in JS1"/>
+    <event counter="ARM_Mali-T76x_JS2_TASKS" title="Mali Job Manager Work" name="JS2 tasks" description="Number of Tasks completed in JS2"/>
+    <event counter="ARM_Mali-T76x_JS2_JOBS" title="Mali Job Manager Work" name="JS2 jobs" description="Number of Jobs (vertex/compute) completed in JS2"/>
+
+  </category>
+
+  <category name="Mali Tiler" per_cpu="no">
+
+    <event counter="ARM_Mali-T76x_TI_ACTIVE" title="Mali Tiler Cycles" name="Tiler cycles" description="Number of cycles Tiler active"/>
+
+    <event counter="ARM_Mali-T76x_TI_POLYGONS" title="Mali Tiler Primitives" name="Polygons" description="Number of polygons processed"/>
+    <event counter="ARM_Mali-T76x_TI_QUADS" title="Mali Tiler Primitives" name="Quads" description="Number of quads processed"/>
+    <event counter="ARM_Mali-T76x_TI_TRIANGLES" title="Mali Tiler Primitives" name="Triangles" description="Number of triangles processed"/>
+    <event counter="ARM_Mali-T76x_TI_LINES" title="Mali Tiler Primitives" name="Lines" description="Number of lines processed"/>
+    <event counter="ARM_Mali-T76x_TI_POINTS" title="Mali Tiler Primitives" name="Points" description="Number of points processed"/>
+
+    <event counter="ARM_Mali-T76x_TI_FRONT_FACING" title="Mali Tiler Culling" name="Front facing prims" description="Number of front facing primitives"/>
+    <event counter="ARM_Mali-T76x_TI_BACK_FACING" title="Mali Tiler Culling" name="Back facing prims" description="Number of back facing primitives"/>
+    <event counter="ARM_Mali-T76x_TI_PRIM_VISIBLE" title="Mali Tiler Culling" name="Visible prims" description="Number of visible primitives"/>
+    <event counter="ARM_Mali-T76x_TI_PRIM_CULLED" title="Mali Tiler Culling" name="Culled prims" description="Number of culled primitives"/>
+    <event counter="ARM_Mali-T76x_TI_PRIM_CLIPPED" title="Mali Tiler Culling" name="Clipped prims" description="Number of clipped primitives"/>
+
+    <event counter="ARM_Mali-T76x_TI_LEVEL0" title="Mali Tiler Hierarchy" name="L0 prims" description="Number of primitives in hierarchy level 0"/>
+    <event counter="ARM_Mali-T76x_TI_LEVEL1" title="Mali Tiler Hierarchy" name="L1 prims" description="Number of primitives in hierarchy level 1"/>
+    <event counter="ARM_Mali-T76x_TI_LEVEL2" title="Mali Tiler Hierarchy" name="L2 prims" description="Number of primitives in hierarchy level 2"/>
+    <event counter="ARM_Mali-T76x_TI_LEVEL3" title="Mali Tiler Hierarchy" name="L3 prims" description="Number of primitives in hierarchy level 3"/>
+    <event counter="ARM_Mali-T76x_TI_LEVEL4" title="Mali Tiler Hierarchy" name="L4 prims" description="Number of primitives in hierarchy level 4"/>
+    <event counter="ARM_Mali-T76x_TI_LEVEL5" title="Mali Tiler Hierarchy" name="L5 prims" description="Number of primitives in hierarchy level 5"/>
+    <event counter="ARM_Mali-T76x_TI_LEVEL6" title="Mali Tiler Hierarchy" name="L6 prims" description="Number of primitives in hierarchy level 6"/>
+    <event counter="ARM_Mali-T76x_TI_LEVEL7" title="Mali Tiler Hierarchy" name="L7 prims" description="Number of primitives in hierarchy level 7"/>
+
+  </category>
+
+  <category name="Mali Shader Core" per_cpu="no">
+
+    <event counter="ARM_Mali-T76x_TRIPIPE_ACTIVE" title="Mali Core Cycles" name="Tripipe cycles" description="Number of cycles tripipe was active"/>
+    <event counter="ARM_Mali-T76x_FRAG_ACTIVE" title="Mali Core Cycles" name="Fragment cycles" description="Number of cycles fragment processing was active"/>
+    <event counter="ARM_Mali-T76x_COMPUTE_ACTIVE" title="Mali Core Cycles" name="Compute cycles" description="Number of cycles vertex\compute processing was active"/>
+    <event counter="ARM_Mali-T76x_FRAG_CYCLES_NO_TILE" title="Mali Core Cycles" name="Fragment cycles waiting for tile" description="Number of cycles spent waiting for a physical tile buffer"/>
+    <event counter="ARM_Mali-T76x_FRAG_CYCLES_FPKQ_ACTIVE" title="Mali Core Cycles" name="Fragment cycles pre-pipe buffer not empty" description="Number of cycles the pre-pipe queue contains quads"/>
+
+    <event counter="ARM_Mali-T76x_FRAG_THREADS" title="Mali Fragment Threads" name="Fragment threads" description="Number of fragment threads started"/>
+    <event counter="ARM_Mali-T76x_FRAG_DUMMY_THREADS" title="Mali Fragment Threads" name="Dummy fragment threads" description="Number of dummy fragment threads started"/>
+    <event counter="ARM_Mali-T76x_FRAG_THREADS_LZS_TEST" title="Mali Fragment Threads" name="Fragment threads doing late ZS" description="Number of threads doing late ZS test"/>
+    <event counter="ARM_Mali-T76x_FRAG_THREADS_LZS_KILLED" title="Mali Fragment Threads" name="Fragment threads killed late ZS" description="Number of threads killed by late ZS test"/>
+
+    <event counter="ARM_Mali-T76x_COMPUTE_TASKS" title="Mali Compute Tasks" name="Compute tasks" description="Number of compute tasks"/>
+    <event counter="ARM_Mali-T76x_COMPUTE_THREADS" title="Mali Compute Threads" name="Compute threads" description="Number of compute threads started"/>
+
+    <event counter="ARM_Mali-T76x_FRAG_PRIMITIVES" title="Mali Fragment Primitives" name="Primitives loaded" description="Number of primitives loaded from tiler"/>
+    <event counter="ARM_Mali-T76x_FRAG_PRIMITIVES_DROPPED" title="Mali Fragment Primitives" name="Primitives dropped" description="Number of primitives dropped because out of tile"/>
+
+    <event counter="ARM_Mali-T76x_FRAG_QUADS_RAST" title="Mali Fragment Quads" name="Quads rasterized" description="Number of quads rasterized"/>
+    <event counter="ARM_Mali-T76x_FRAG_QUADS_EZS_TEST" title="Mali Fragment Quads" name="Quads doing early ZS" description="Number of quads doing early ZS test"/>
+    <event counter="ARM_Mali-T76x_FRAG_QUADS_EZS_KILLED" title="Mali Fragment Quads" name="Quads killed early Z" description="Number of quads killed by early ZS test"/>
+
+    <event counter="ARM_Mali-T76x_FRAG_NUM_TILES" title="Mali Fragment Tasks" name="Tiles rendered" description="Number of tiles rendered"/>
+    <event counter="ARM_Mali-T76x_FRAG_TRANS_ELIM" title="Mali Fragment Tasks" name="Tile writes killed by TE" description="Number of tile writes skipped by transaction elimination"/>
+
+    <event counter="ARM_Mali-T76x_ARITH_WORDS" title="Mali Arithmetic Pipe" name="A instructions" description="Number of instructions completed by the the A-pipe (normalized per pipeline)"/>
+
+    <event counter="ARM_Mali-T76x_LS_WORDS" title="Mali Load/Store Pipe" name="LS instructions" description="Number of instructions completed by the LS-pipe"/>
+    <event counter="ARM_Mali-T76x_LS_ISSUES" title="Mali Load/Store Pipe" name="LS instruction issues" description="Number of instructions issued to the LS-pipe, including restarts"/>
+
+    <event counter="ARM_Mali-T76x_TEX_WORDS" title="Mali Texture Pipe" name="T instructions" description="Number of instructions completed by the T-pipe"/>
+    <event counter="ARM_Mali-T76x_TEX_ISSUES" title="Mali Texture Pipe" name="T instruction issues" description="Number of threads through loop 2 address calculation"/>
+    <event counter="ARM_Mali-T76x_TEX_RECIRC_FMISS" title="Mali Texture Pipe" name="Cache misses" description="Number of instructions in the T-pipe, recirculated due to cache miss"/>
+
+    <event counter="ARM_Mali-T76x_LSC_READ_HITS" title="Mali Load/Store Cache" name="Read hits" description="Number of read hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T76x_LSC_READ_MISSES" title="Mali Load/Store Cache" name="Read misses" description="Number of read misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T76x_LSC_WRITE_HITS" title="Mali Load/Store Cache" name="Write hits" description="Number of write hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T76x_LSC_WRITE_MISSES" title="Mali Load/Store Cache" name="Write misses" description="Number of write misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T76x_LSC_ATOMIC_HITS" title="Mali Load/Store Cache" name="Atomic hits" description="Number of atomic hits in the Load/Store cache"/>
+    <event counter="ARM_Mali-T76x_LSC_ATOMIC_MISSES" title="Mali Load/Store Cache" name="Atomic misses" description="Number of atomic misses in the Load/Store cache"/>
+    <event counter="ARM_Mali-T76x_LSC_LINE_FETCHES" title="Mali Load/Store Cache" name="Line fetches" description="Number of line fetches in the Load/Store cache"/>
+    <event counter="ARM_Mali-T76x_LSC_DIRTY_LINE" title="Mali Load/Store Cache" name="Dirty line evictions" description="Number of dirty line evictions in the Load/Store cache"/>
+    <event counter="ARM_Mali-T76x_LSC_SNOOPS" title="Mali Load/Store Cache" name="Snoops in to LSC" description="Number of coherent memory snoops in to the Load/Store cache"/>
+
+  </category>
+
+  <category name="Mali L2 Cache" per_cpu="no">
+
+    <event counter="ARM_Mali-T76x_L2_EXT_WRITE_BEATS" title="Mali L2 Cache" name="External write beats" description="Number of external bus write beats"/>
+    <event counter="ARM_Mali-T76x_L2_EXT_READ_BEATS" title="Mali L2 Cache" name="External read beats" description="Number of external bus read beats"/>
+    <event counter="ARM_Mali-T76x_L2_READ_SNOOP" title="Mali L2 Cache" name="Read snoops" description="Number of read transaction snoops"/>
+    <event counter="ARM_Mali-T76x_L2_READ_HIT" title="Mali L2 Cache" name="L2 read hits" description="Number of reads hitting in the L2 cache"/>
+    <event counter="ARM_Mali-T76x_L2_WRITE_SNOOP" title="Mali L2 Cache" name="Write snoops" description="Number of write transaction snoops"/>
+    <event counter="ARM_Mali-T76x_L2_WRITE_HIT" title="Mali L2 Cache" name="L2 write hits" description="Number of writes hitting in the L2 cache"/>
+    <event counter="ARM_Mali-T76x_L2_EXT_AR_STALL" title="Mali L2 Cache" name="External bus stalls (AR)" description="Number of cycles a valid read address (AR) is stalled by the external interconnect"/>
+    <event counter="ARM_Mali-T76x_L2_EXT_W_STALL" title="Mali L2 Cache" name="External bus stalls (W)" description="Number of cycles a valid write data (W channel) is stalled by the external interconnect"/>
+    <event counter="ARM_Mali-T76x_L2_EXT_R_BUF_FULL" title="Mali L2 Cache" name="External bus response buffer full" description="Number of cycles a valid request is blocked by a full response buffer"/>
+    <event counter="ARM_Mali-T76x_L2_EXT_RD_BUF_FULL" title="Mali L2 Cache" name="External bus read data buffer full" description="Number of cycles a valid request is blocked by a full read data buffer"/>
+    <event counter="ARM_Mali-T76x_L2_EXT_W_BUF_FULL" title="Mali L2 Cache" name="External bus write buffer full" description="Number of cycles a valid request is blocked by a full write buffer"/>
+    <event counter="ARM_Mali-T76x_L2_READ_LOOKUP" title="Mali L2 Cache" name="L2 read lookups" description="Number of reads into the L2 cache"/>
+    <event counter="ARM_Mali-T76x_L2_WRITE_LOOKUP" title="Mali L2 Cache" name="L2 write lookups" description="Number of writes into the L2 cache"/>
+  </category>
diff --git a/tools/gator/daemon/events-Mali-V500.xml b/tools/gator/daemon/events-Mali-V500.xml
new file mode 100644 (file)
index 0000000..89bc7f4
--- /dev/null
@@ -0,0 +1,30 @@
+  <category name="Mali-V500">
+    <event counter="ARM_Mali-V500_cnt0" title="MVE-V500 Stats" name="Samples" class="absolute" description="The number of times we have taken a sample"/>
+    <event counter="ARM_Mali-V500_cnt1" title="MVE-V500 Input Totals" name="Queued input-buffers" class="absolute" description="The number of input-buffers that has been queued for consumption by the MVE"/>
+    <event counter="ARM_Mali-V500_cnt2" title="MVE-V500 Input Totals" name="Consumed input-buffers" class="absolute" description="The number of input-buffers that has been consumed by the MVE and returned to the application"/>
+    <event counter="ARM_Mali-V500_cnt3" title="MVE-V500 Output Totals" name="Queued output-buffers" class="absolute" description="The number of output-buffers that has been queued for usage by the MVE"/>
+    <event counter="ARM_Mali-V500_cnt4" title="MVE-V500 Output Totals" name="Consumed output-buffers" class="absolute" description="The number of output-buffers that has been consumed by the MVE and returned to the application"/>
+    <event counter="ARM_Mali-V500_cnt5" title="MVE-V500 Stats" name="Created Sessions" class="absolute" description="The number of created sessions throughout the lifetime of the process"/>
+    <event counter="ARM_Mali-V500_cnt6" title="MVE-V500 Sessions" name="Active Sessions" description="The number of currently existing sessions"/>
+    <event counter="ARM_Mali-V500_cnt7" title="MVE-V500 Stats" name="Processed Frames" class="absolute" description="The number of processed frames. A processed frame is one where the encode or decode is complete for that particular frame. Frames can be processed out of order so this is not the same as the number of output-buffers returned"/>
+    <event counter="ARM_Mali-V500_cnt8" title="MVE-V500 Input Totals" name="Input Flushes Requested" class="absolute" description="The number of requested flushes of the input queue"/>
+    <event counter="ARM_Mali-V500_cnt9" title="MVE-V500 Input Totals" name="Input Flushes Complete" class="absolute" description="The number of completed flushes of the input queue"/>
+    <event counter="ARM_Mali-V500_cnt10" title="MVE-V500 Output Totals" name="Output Flushes Requested" class="absolute" description="The number of requested flushes of the output queue"/>
+    <event counter="ARM_Mali-V500_cnt11" title="MVE-V500 Output Totals" name="Output Flushes Complete" class="absolute" description="The number of completed flushes of the output queue"/>
+    <event counter="ARM_Mali-V500_cnt12" title="MVE-V500 Output" name="Queued Output Buffers (current)" description="The number of output-buffers that are currently queued for usage by the MVE"/>
+    <event counter="ARM_Mali-V500_cnt13" title="MVE-V500 Input" name="Queued Input Buffers (current)" description="The number of input-buffers that are currently queued for consumption by the MVE"/>
+    <event counter="ARM_Mali-V500_cnt14" title="MVE-V500 Output" name="Output Queue Flushes" description="The number of pending flushes for the MVE output-queue"/>
+    <event counter="ARM_Mali-V500_cnt15" title="MVE-V500 Input" name="Input Queue Flushes" description="The number of pending flushes for the MVE input-queue"/>
+    <event counter="ARM_Mali-V500_cnt16" title="MVE-V500 Stats" name="Errors encountered" class="absolute" description="The number of errors encountered"/>
+    <event counter="ARM_Mali-V500_cnt17" title="MVE-V500 Bandwidth" name="Bits consumed" class="absolute" description="The number of bits consumed during decode"/>
+    <event counter="ARM_Mali-V500_cnt18" title="MVE-V500 Bandwidth" name="AFBC bandwidth" class="absolute" description="The amount of AFBC-encoded bytes read or written"/>
+    <event counter="ARM_Mali-V500_cnt19" title="MVE-V500 Bandwidth" name="Bandwidth (read)" class="absolute" description="The amount of bytes read over the AXI bus"/>
+    <event counter="ARM_Mali-V500_cnt20" title="MVE-V500 Bandwidth" name="Bandwidth (write)" class="absolute" description="The amount of bytes written over the AXI bus"/>
+    <event counter="ARM_Mali-V500_evn0" title="MVE-V500 Sessions" name="Session created" description="Generated when a session has been created"/>
+    <event counter="ARM_Mali-V500_evn1" title="MVE-V500 Sessions" name="Session destroyed" description="Generated when a session has been destroyed"/>
+    <event counter="ARM_Mali-V500_evn2" title="MVE-V500 Frames" name="Frame Processed" description="Generated when the MVE has finished processing a frame"/>
+    <event counter="ARM_Mali-V500_evn3" title="MVE-V500 Output" name="Output buffer received" description="Generated when an an output buffer is returned to us from the MVE"/>
+    <event counter="ARM_Mali-V500_evn4" title="MVE-V500 Input" name="Input buffer received" description="Generated when we an input buffer is returned to us from the MVE"/>
+    <event counter="ARM_Mali-V500_act0" title="MVE-V500 Parsed" name="Activity" class="activity" activity1="activity" activity_color1="0x000000ff" rendering_type="bar" average_selection="yes" average_cores="yes" percentage="yes" cores="8" description="Mali-V500 Activity"/>
+    <event counter="ARM_Mali-V500_act1" title="MVE-V500 Piped" name="Activity" class="activity" activity1="activity" activity_color1="0x0000ff00" rendering_type="bar" average_selection="yes" average_cores="yes" percentage="yes" cores="8" description="Mali-V500 Activity"/>
+  </category>
diff --git a/tools/gator/daemon/events-ftrace.xml b/tools/gator/daemon/events-ftrace.xml
new file mode 100644 (file)
index 0000000..33ab7aa
--- /dev/null
@@ -0,0 +1,7 @@
+  <category name="Ftrace">
+    <!-- counter attribute must start with ftrace_ and be unique -->
+    <!-- regex item in () is the value shown -->
+    <!--
+    <event counter="ftrace_trace_marker_numbers" title="ftrace" name="trace_marker" class="absolute" regex="([0-9]+)" description="Numbers written to /sys/kernel/debug/tracing/trace_marker, ex: echo 42 > /sys/kernel/debug/tracing/trace_marker"/>
+    -->
+  </category>
index 1275aef1cb7938ffe804f7168095c7cd47b6e187..fbce1e15d0d015da52111fb80245e88f84c4ab23 100644 (file)
 #include <sys/wait.h>
 #include <unistd.h>
 
+#include "CCNDriver.h"
 #include "Child.h"
-#include "KMod.h"
+#include "EventsXML.h"
 #include "Logging.h"
+#include "Monitor.h"
 #include "OlySocket.h"
 #include "OlyUtility.h"
 #include "SessionData.h"
-
-#define DEBUG false
+#include "Setup.h"
 
 extern Child* child;
 static int shutdownFilesystem();
 static pthread_mutex_t numSessions_mutex;
-static int numSessions = 0;
 static OlyServerSocket* sock = NULL;
+static Monitor monitor;
+static int numSessions = 0;
 static bool driverRunningAtStart = false;
 static bool driverMountedAtStart = false;
 
 struct cmdline_t {
+       char *module;
        int port;
-       char* module;
+       bool update;
 };
 
 #define DEFAULT_PORT 8080
@@ -102,42 +105,7 @@ static void child_exit(int) {
        }
 }
 
-static int udpPort(int port) {
-       int s;
-       struct sockaddr_in6 sockaddr;
-       int on;
-       int family = AF_INET6;
-
-       s = socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP);
-       if (s == -1) {
-               family = AF_INET;
-               s = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
-               if (s == -1) {
-                       logg->logError(__FILE__, __LINE__, "socket failed");
-                       handleException();
-               }
-       }
-
-       on = 1;
-       if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (const char*)&on, sizeof(on)) != 0) {
-               logg->logError(__FILE__, __LINE__, "setsockopt failed");
-               handleException();
-       }
-
-       memset((void*)&sockaddr, 0, sizeof(sockaddr));
-       sockaddr.sin6_family = family;
-       sockaddr.sin6_port = htons(port);
-       sockaddr.sin6_addr = in6addr_any;
-       if (bind(s, (struct sockaddr *)&sockaddr, sizeof(sockaddr)) < 0) {
-               logg->logError(__FILE__, __LINE__, "socket failed");
-               handleException();
-       }
-
-       return s;
-}
-
-#define UDP_ANS_PORT 30000
-#define UDP_REQ_PORT 30001
+static const int UDP_REQ_PORT = 30001;
 
 typedef struct {
        char rviHeader[8];
@@ -149,50 +117,97 @@ typedef struct {
        uint32_t ipAddress;
        uint32_t defaultGateway;
        uint32_t subnetMask;
-       uint32_t activeConnections; 
+       uint32_t activeConnections;
 } RVIConfigureInfo;
 
 static const char DST_REQ[] = { 'D', 'S', 'T', '_', 'R', 'E', 'Q', ' ', 0, 0, 0, 0x64 };
 
-static void* answerThread(void* pVoid) {
-       prctl(PR_SET_NAME, (unsigned long)&"gatord-discover", 0, 0, 0);
-       const struct cmdline_t * const cmdline = (struct cmdline_t *)pVoid;
-       RVIConfigureInfo dstAns;
-       int req = udpPort(UDP_REQ_PORT);
-       int ans = udpPort(UDP_ANS_PORT);
-
-       // Format the answer buffer
-       memset(&dstAns, 0, sizeof(dstAns));
-       memcpy(dstAns.rviHeader, "STR_ANS ", sizeof(dstAns.rviHeader));
-       if (gethostname(dstAns.dhcpName, sizeof(dstAns.dhcpName) - 1) != 0) {
-               logg->logError(__FILE__, __LINE__, "gethostname failed");
-               handleException();
+class UdpListener {
+public:
+       UdpListener() : mDstAns(), mReq(-1) {}
+
+       void setup(int port) {
+               mReq = udpPort(UDP_REQ_PORT);
+
+               // Format the answer buffer
+               memset(&mDstAns, 0, sizeof(mDstAns));
+               memcpy(mDstAns.rviHeader, "STR_ANS ", sizeof(mDstAns.rviHeader));
+               if (gethostname(mDstAns.dhcpName, sizeof(mDstAns.dhcpName) - 1) != 0) {
+                       logg->logError(__FILE__, __LINE__, "gethostname failed");
+                       handleException();
+               }
+               // Subvert the defaultGateway field for the port number
+               if (port != DEFAULT_PORT) {
+                       mDstAns.defaultGateway = port;
+               }
+               // Subvert the subnetMask field for the protocol version
+               mDstAns.subnetMask = PROTOCOL_VERSION;
        }
-       // Subvert the defaultGateway field for the port number
-       if (cmdline->port != DEFAULT_PORT) {
-               dstAns.defaultGateway = cmdline->port;
+
+       int getReq() const {
+               return mReq;
        }
-       // Subvert the subnetMask field for the protocol version
-       dstAns.subnetMask = PROTOCOL_VERSION;
 
-       for (;;) {
+       void handle() {
                char buf[128];
                struct sockaddr_in6 sockaddr;
                socklen_t addrlen;
                int read;
                addrlen = sizeof(sockaddr);
-               read = recvfrom(req, &buf, sizeof(buf), 0, (struct sockaddr *)&sockaddr, &addrlen);
+               read = recvfrom(mReq, &buf, sizeof(buf), 0, (struct sockaddr *)&sockaddr, &addrlen);
                if (read < 0) {
                        logg->logError(__FILE__, __LINE__, "recvfrom failed");
                        handleException();
                } else if ((read == 12) && (memcmp(buf, DST_REQ, sizeof(DST_REQ)) == 0)) {
-                       if (sendto(ans, &dstAns, sizeof(dstAns), 0, (struct sockaddr *)&sockaddr, addrlen) != sizeof(dstAns)) {
-                               logg->logError(__FILE__, __LINE__, "sendto failed");
+                       // Don't care if sendto fails - gatord shouldn't exit because of it and Streamline will retry
+                       sendto(mReq, &mDstAns, sizeof(mDstAns), 0, (struct sockaddr *)&sockaddr, addrlen);
+               }
+       }
+
+       void close() {
+               ::close(mReq);
+       }
+
+private:
+       int udpPort(int port) {
+               int s;
+               struct sockaddr_in6 sockaddr;
+               int on;
+               int family = AF_INET6;
+
+               s = socket_cloexec(AF_INET6, SOCK_DGRAM, IPPROTO_UDP);
+               if (s == -1) {
+                       family = AF_INET;
+                       s = socket_cloexec(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+                       if (s == -1) {
+                               logg->logError(__FILE__, __LINE__, "socket failed");
                                handleException();
                        }
                }
+
+               on = 1;
+               if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (const char*)&on, sizeof(on)) != 0) {
+                       logg->logError(__FILE__, __LINE__, "setsockopt failed");
+                       handleException();
+               }
+
+               memset((void*)&sockaddr, 0, sizeof(sockaddr));
+               sockaddr.sin6_family = family;
+               sockaddr.sin6_port = htons(port);
+               sockaddr.sin6_addr = in6addr_any;
+               if (bind(s, (struct sockaddr *)&sockaddr, sizeof(sockaddr)) < 0) {
+                       logg->logError(__FILE__, __LINE__, "socket failed");
+                       handleException();
+               }
+
+               return s;
        }
-}
+
+       RVIConfigureInfo mDstAns;
+       int mReq;
+};
+
+static UdpListener udpListener;
 
 // retval: -1 = failure; 0 = was already mounted; 1 = successfully mounted
 static int mountGatorFS() {
@@ -212,13 +227,13 @@ static int mountGatorFS() {
 
 static bool init_module (const char * const location) {
        bool ret(false);
-       const int fd = open(location, O_RDONLY);
+       const int fd = open(location, O_RDONLY | O_CLOEXEC);
        if (fd >= 0) {
                struct stat st;
                if (fstat(fd, &st) == 0) {
                        void * const p = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
                        if (p != MAP_FAILED) {
-                               if (syscall(__NR_init_module, p, st.st_size, "") == 0) {
+                               if (syscall(__NR_init_module, p, (unsigned long)st.st_size, "") == 0) {
                                        ret = true;
                                }
                                munmap(p, st.st_size);
@@ -264,8 +279,14 @@ static bool setupFilesystem(char* module) {
                }
 
                if (access(location, F_OK) == -1) {
-                       // The gator kernel is not already loaded and unable to locate gator.ko
-                       return false;
+                       if (module == NULL) {
+                               // The gator kernel is not already loaded and unable to locate gator.ko in the default location
+                               return false;
+                       } else {
+                               // gator location specified on the command line but it was not found
+                               logg->logError(__FILE__, __LINE__, "gator module not found at %s", location);
+                               handleException();
+                       }
                }
 
                // Load driver
@@ -305,10 +326,26 @@ static int shutdownFilesystem() {
        return 0; // success
 }
 
+static const char OPTSTRING[] = "hvudap:s:c:e:m:o:";
+
+static bool hasDebugFlag(int argc, char** argv) {
+       int c;
+
+       optind = 1;
+       while ((c = getopt(argc, argv, OPTSTRING)) != -1) {
+               if (c == 'd') {
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static struct cmdline_t parseCommandLine(int argc, char** argv) {
        struct cmdline_t cmdline;
        cmdline.port = DEFAULT_PORT;
        cmdline.module = NULL;
+       cmdline.update = false;
        char version_string[256]; // arbitrary length to hold the version information
        int c;
 
@@ -319,11 +356,15 @@ static struct cmdline_t parseCommandLine(int argc, char** argv) {
                snprintf(version_string, sizeof(version_string), "Streamline gatord development version %d", PROTOCOL_VERSION);
        }
 
-       while ((c = getopt(argc, argv, "hvp:s:c:e:m:o:")) != -1) {
-               switch(c) {
+       optind = 1;
+       while ((c = getopt(argc, argv, OPTSTRING)) != -1) {
+               switch (c) {
                        case 'c':
                                gSessionData->mConfigurationXMLPath = optarg;
                                break;
+                       case 'd':
+                               // Already handled
+                               break;
                        case 'e':
                                gSessionData->mEventsXMLPath = optarg;
                                break;
@@ -339,6 +380,12 @@ static struct cmdline_t parseCommandLine(int argc, char** argv) {
                        case 'o':
                                gSessionData->mTargetPath = optarg;
                                break;
+                       case 'u':
+                               cmdline.update = true;
+                               break;
+                       case 'a':
+                               gSessionData->mAllowCommands = true;
+                               break;
                        case 'h':
                        case '?':
                                logg->logError(__FILE__, __LINE__,
@@ -348,9 +395,11 @@ static struct cmdline_t parseCommandLine(int argc, char** argv) {
                                        "-h              this help page\n"
                                        "-m module       path and filename of gator.ko\n"
                                        "-p port_number  port upon which the server listens; default is 8080\n"
-                                       "-s session_xml  path and filename of a session xml used for local capture\n"
+                                       "-s session_xml  path and filename of a session.xml used for local capture\n"
                                        "-o apc_dir      path and name of the output for a local capture\n"
                                        "-v              version information\n"
+                                       "-d              enable debug messages\n"
+                                       "-a              allow the user user to provide a command to run at the start of a capture"
                                        , version_string);
                                handleException();
                                break;
@@ -380,18 +429,60 @@ static struct cmdline_t parseCommandLine(int argc, char** argv) {
        return cmdline;
 }
 
+static void handleClient() {
+       OlySocket client(sock->acceptConnection());
+
+       int pid = fork();
+       if (pid < 0) {
+               // Error
+               logg->logError(__FILE__, __LINE__, "Fork process failed. Please power cycle the target device if this error persists.");
+       } else if (pid == 0) {
+               // Child
+               sock->closeServerSocket();
+               udpListener.close();
+               monitor.close();
+               child = new Child(&client, numSessions + 1);
+               child->run();
+               delete child;
+               exit(0);
+       } else {
+               // Parent
+               client.closeSocket();
+
+               pthread_mutex_lock(&numSessions_mutex);
+               numSessions++;
+               pthread_mutex_unlock(&numSessions_mutex);
+
+               // Maximum number of connections is 2
+               int wait = 0;
+               while (numSessions > 1) {
+                       // Throttle until one of the children exits before continuing to accept another socket connection
+                       logg->logMessage("%d sessions active!", numSessions);
+                       if (wait++ >= 10) { // Wait no more than 10 seconds
+                               // Kill last created child
+                               kill(pid, SIGALRM);
+                               break;
+                       }
+                       sleep(1);
+               }
+       }
+}
+
 // Gator data flow: collector -> collector fifo -> sender
 int main(int argc, char** argv) {
        // Ensure proper signal handling by making gatord the process group leader
        //   e.g. it may not be the group leader when launched as 'sudo gatord'
        setsid();
 
-       logg = new Logging(DEBUG);  // Set up global thread-safe logging
-       gSessionData = new SessionData(); // Global data class
-       util = new OlyUtility();        // Set up global utility class
+  // Set up global thread-safe logging
+       logg = new Logging(hasDebugFlag(argc, argv));
+       // Global data class
+       gSessionData = new SessionData();
+       // Set up global utility class
+       util = new OlyUtility();
 
        // Initialize drivers
-       new KMod();
+       new CCNDriver();
 
        prctl(PR_SET_NAME, (unsigned long)&"gatord-main", 0, 0, 0);
        pthread_mutex_init(&numSessions_mutex, NULL);
@@ -408,6 +499,10 @@ int main(int argc, char** argv) {
        // Parse the command line parameters
        struct cmdline_t cmdline = parseCommandLine(argc, argv);
 
+       if (cmdline.update) {
+               return update(argv[0]);
+       }
+
        // Verify root permissions
        uid_t euid = geteuid();
        if (euid) {
@@ -420,16 +515,24 @@ int main(int argc, char** argv) {
                logg->logMessage("Unable to setup gatorfs, trying perf");
                if (!gSessionData->perf.setup()) {
                        logg->logError(__FILE__, __LINE__,
-                                                                                "Unable to locate gator.ko driver:\n"
-                                                                                "  >>> gator.ko should be co-located with gatord in the same directory\n"
-                                                                                "  >>> OR insmod gator.ko prior to launching gatord\n"
-                                                                                "  >>> OR specify the location of gator.ko on the command line\n"
-                                                                                "  >>> OR run Linux 3.12 or later with perf support to collect data via userspace only");
+                                      "Unable to locate gator.ko driver:\n"
+                                      "  >>> gator.ko should be co-located with gatord in the same directory\n"
+                                      "  >>> OR insmod gator.ko prior to launching gatord\n"
+                                      "  >>> OR specify the location of gator.ko on the command line\n"
+                                      "  >>> OR run Linux 3.4 or later with perf (CONFIG_PERF_EVENTS and CONFIG_HW_PERF_EVENTS) and tracing (CONFIG_TRACING and CONFIG_CONTEXT_SWITCH_TRACER) support to collect data via userspace only");
                        handleException();
                }
        }
 
-       gSessionData->hwmon.setup();
+       {
+               EventsXML eventsXML;
+               mxml_node_t *xml = eventsXML.getTree();
+               // Initialize all drivers
+               for (Driver *driver = Driver::getHead(); driver != NULL; driver = driver->getNext()) {
+                       driver->readEvents(xml);
+               }
+               mxmlDelete(xml);
+       }
 
        // Handle child exit codes
        signal(SIGCHLD, child_exit);
@@ -444,47 +547,33 @@ int main(int argc, char** argv) {
                child->run();
                delete child;
        } else {
-               pthread_t answerThreadID;
-               if (pthread_create(&answerThreadID, NULL, answerThread, &cmdline)) {
-                       logg->logError(__FILE__, __LINE__, "Failed to create answer thread");
+               gSessionData->annotateListener.setup();
+               sock = new OlyServerSocket(cmdline.port);
+               udpListener.setup(cmdline.port);
+               if (!monitor.init() ||
+                               !monitor.add(sock->getFd()) ||
+                               !monitor.add(udpListener.getReq()) ||
+                               !monitor.add(gSessionData->annotateListener.getFd()) ||
+                               false) {
+                       logg->logError(__FILE__, __LINE__, "Monitor setup failed");
                        handleException();
                }
-               sock = new OlyServerSocket(cmdline.port);
                // Forever loop, can be exited via a signal or exception
                while (1) {
+                       struct epoll_event events[2];
                        logg->logMessage("Waiting on connection...");
-                       OlySocket client(sock->acceptConnection());
-
-                       int pid = fork();
-                       if (pid < 0) {
-                               // Error
-                               logg->logError(__FILE__, __LINE__, "Fork process failed. Please power cycle the target device if this error persists.");
-                       } else if (pid == 0) {
-                               // Child
-                               sock->closeServerSocket();
-                               child = new Child(&client, numSessions + 1);
-                               child->run();
-                               delete child;
-                               exit(0);
-                       } else {
-                               // Parent
-                               client.closeSocket();
-
-                               pthread_mutex_lock(&numSessions_mutex);
-                               numSessions++;
-                               pthread_mutex_unlock(&numSessions_mutex);
-
-                               // Maximum number of connections is 2
-                               int wait = 0;
-                               while (numSessions > 1) {
-                                       // Throttle until one of the children exits before continuing to accept another socket connection
-                                       logg->logMessage("%d sessions active!", numSessions);
-                                       if (wait++ >= 10) { // Wait no more than 10 seconds
-                                               // Kill last created child
-                                               kill(pid, SIGALRM);
-                                               break;
-                                       }
-                                       sleep(1);
+                       int ready = monitor.wait(events, ARRAY_LENGTH(events), -1);
+                       if (ready < 0) {
+                               logg->logError(__FILE__, __LINE__, "Monitor::wait failed");
+                               handleException();
+                       }
+                       for (int i = 0; i < ready; ++i) {
+                               if (events[i].data.fd == sock->getFd()) {
+                                       handleClient();
+                               } else if (events[i].data.fd == udpListener.getReq()) {
+                                       udpListener.handle();
+                               } else if (events[i].data.fd == gSessionData->annotateListener.getFd()) {
+                                       gSessionData->annotateListener.handle();
                                }
                        }
                }
index 1f59ba34a474dcfb566c4b1cd5001f9cb9ab6e4f..ad6df1d7debec4eccef162f6c9dbcbfd8a655725 100644 (file)
@@ -1,10 +1,10 @@
 /* config.h.  Generated from config.h.in by configure.  */
 /*
- * "$Id: config.h.in 408 2010-09-19 05:26:46Z mike $"
+ * "$Id: config.h.in 451 2014-01-04 21:50:06Z msweet $"
  *
  * Configuration file for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2003-2010 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
@@ -12,7 +12,7 @@
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -30,7 +30,7 @@
  * Version number...
  */
 
-#define MXML_VERSION "Mini-XML v2.7"
+#define MXML_VERSION "Mini-XML v2.8"
 
 
 /*
@@ -92,5 +92,5 @@ extern int    _mxml_vsnprintf(char *, size_t, const char *, va_list);
 #  endif /* !HAVE_VSNPRINTF */
 
 /*
- * End of "$Id: config.h.in 408 2010-09-19 05:26:46Z mike $".
+ * End of "$Id: config.h.in 451 2014-01-04 21:50:06Z msweet $".
  */
index c9950f5fb73279c0f162b73fbff8937538ec9870..8e89cc1474f8819e27100bafa83d6ad57926236a 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * "$Id: mxml-attr.c 408 2010-09-19 05:26:46Z mike $"
+ * "$Id: mxml-attr.c 451 2014-01-04 21:50:06Z msweet $"
  *
  * Attribute support code for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2003-2010 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
- *
- * Contents:
- *
- *   mxmlElementDeleteAttr() - Delete an attribute.
- *   mxmlElementGetAttr()    - Get an attribute.
- *   mxmlElementSetAttr()    - Set an attribute.
- *   mxmlElementSetAttrf()   - Set an attribute with a formatted value.
- *   mxml_set_attr()         - Set or add an attribute name/value pair.
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -90,6 +82,9 @@ mxmlElementDeleteAttr(mxml_node_t *node,/* I - Element */
         memmove(attr, attr + 1, i * sizeof(mxml_attr_t));
 
       node->value.element.num_attrs --;
+
+      if (node->value.element.num_attrs == 0)
+        free(node->value.element.attrs);
       return;
     }
   }
@@ -315,5 +310,5 @@ mxml_set_attr(mxml_node_t *node,    /* I - Element node */
 
 
 /*
- * End of "$Id: mxml-attr.c 408 2010-09-19 05:26:46Z mike $".
+ * End of "$Id: mxml-attr.c 451 2014-01-04 21:50:06Z msweet $".
  */
index c5c9f61f73c24e9fe7d82c5befcaefc4900e7b71..0d11df6a70bc12d74219516bd2fb26e83b93cf5f 100644 (file)
@@ -1,10 +1,10 @@
 /*
- * "$Id: mxml-entity.c 408 2010-09-19 05:26:46Z mike $"
+ * "$Id: mxml-entity.c 451 2014-01-04 21:50:06Z msweet $"
  *
  * Character entity support code for Mini-XML, a small XML-like
  * file parsing library.
  *
- * Copyright 2003-2010 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
- *
- * Contents:
- *
- *   mxmlEntityAddCallback()    - Add a callback to convert entities to
- *                                Unicode.
- *   mxmlEntityGetName()        - Get the name that corresponds to the
- *                                character value.
- *   mxmlEntityGetValue()       - Get the character corresponding to a named
- *                                entity.
- *   mxmlEntityRemoveCallback() - Remove a callback.
- *   _mxml_entity_cb()          - Lookup standard (X)HTML entities.
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -456,5 +445,5 @@ _mxml_entity_cb(const char *name)   /* I - Entity name */
 
 
 /*
- * End of "$Id: mxml-entity.c 408 2010-09-19 05:26:46Z mike $".
+ * End of "$Id: mxml-entity.c 451 2014-01-04 21:50:06Z msweet $".
  */
index 7860ee5f8370664eddc7b9211bec69bb359aaee0..3812c253fc3e3788b72fbf75a0e2e29f4c0f78cd 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * "$Id: mxml-file.c 438 2011-03-24 05:47:51Z mike $"
+ * "$Id: mxml-file.c 455 2014-01-05 03:28:03Z msweet $"
  *
  * File loading code for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2003-2011 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
- *
- * Contents:
- *
- *   mxmlLoadFd()            - Load a file descriptor into an XML node tree.
- *   mxmlLoadFile()          - Load a file into an XML node tree.
- *   mxmlLoadString()        - Load a string into an XML node tree.
- *   mxmlSaveAllocString()   - Save an XML tree to an allocated string.
- *   mxmlSaveFd()            - Save an XML tree to a file descriptor.
- *   mxmlSaveFile()          - Save an XML tree to a file.
- *   mxmlSaveString()        - Save an XML node tree to a string.
- *   mxmlSAXLoadFd()         - Load a file descriptor into an XML node tree
- *                             using a SAX callback.
- *   mxmlSAXLoadFile()       - Load a file into an XML node tree
- *                             using a SAX callback.
- *   mxmlSAXLoadString()     - Load a string into an XML node tree
- *                             using a SAX callback.
- *   mxmlSetCustomHandlers() - Set the handling functions for custom data.
- *   mxmlSetErrorCallback()  - Set the error message callback.
- *   mxmlSetWrapMargin()     - Set the wrap margin when saving XML data.
- *   mxml_add_char()         - Add a character to a buffer, expanding as needed.
- *   mxml_fd_getc()          - Read a character from a file descriptor.
- *   mxml_fd_putc()          - Write a character to a file descriptor.
- *   mxml_fd_read()          - Read a buffer of data from a file descriptor.
- *   mxml_fd_write()         - Write a buffer of data to a file descriptor.
- *   mxml_file_getc()        - Get a character from a file.
- *   mxml_file_putc()        - Write a character to a file.
- *   mxml_get_entity()       - Get the character corresponding to an entity...
- *   mxml_load_data()        - Load data into an XML node tree.
- *   mxml_parse_element()    - Parse an element for any attributes...
- *   mxml_string_getc()      - Get a character from a string.
- *   mxml_string_putc()      - Write a character to a string.
- *   mxml_write_name()       - Write a name string.
- *   mxml_write_node()       - Save an XML node to a file.
- *   mxml_write_string()     - Write a string, escaping & and < as needed.
- *   mxml_write_ws()         - Do whitespace callback...
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
+/*** This file modified by ARM on 25 Aug 2014 to avoid pointer overflow when checking if the write position is beyond the end of the buffer in mxmlSaveString and mxml_string_putc ***/
+
 /*
  * Include necessary headers...
  */
@@ -128,7 +95,7 @@ static int           mxml_write_node(mxml_node_t *node, void *p,
                                        _mxml_global_t *global);
 static int             mxml_write_string(const char *s, void *p,
                                          _mxml_putc_cb_t putc_cb);
-static int             mxml_write_ws(mxml_node_t *node, void *p, 
+static int             mxml_write_ws(mxml_node_t *node, void *p,
                                      mxml_save_cb_t cb, int ws,
                                      int col, _mxml_putc_cb_t putc_cb);
 
@@ -400,7 +367,7 @@ mxmlSaveString(mxml_node_t    *node,        /* I - Node to write */
                mxml_save_cb_t cb)      /* I - Whitespace callback or MXML_NO_CALLBACK */
 {
   int  col;                            /* Final column */
-  char *ptr[2];                        /* Pointers for putc_cb */
+  char *ptr[3];                        /* Pointers for putc_cb */
   _mxml_global_t *global = _mxml_global();
                                        /* Global data */
 
@@ -411,6 +378,7 @@ mxmlSaveString(mxml_node_t    *node,        /* I - Node to write */
 
   ptr[0] = buffer;
   ptr[1] = buffer + bufsize;
+  ptr[2] = 0;
 
   if ((col = mxml_write_node(node, ptr, cb, 0, mxml_string_putc, global)) < 0)
     return (-1);
@@ -422,7 +390,7 @@ mxmlSaveString(mxml_node_t    *node,        /* I - Node to write */
   * Nul-terminate the buffer...
   */
 
-  if (ptr[0] >= ptr[1])
+  if (ptr[2] != 0)
     buffer[bufsize - 1] = '\0';
   else
     ptr[0][0] = '\0';
@@ -567,7 +535,7 @@ mxmlSAXLoadString(
  *
  * The save function accepts a node pointer and must return a malloc'd
  * string on success and NULL on error.
- * 
+ *
  */
 
 void
@@ -756,7 +724,7 @@ mxml_fd_getc(void *p,                       /* I  - File descriptor buffer */
              return (EOF);
 
          ch = *(buf->current)++;
-          
+
          if (ch != 0xff)
            return (EOF);
 
@@ -775,7 +743,7 @@ mxml_fd_getc(void *p,                       /* I  - File descriptor buffer */
              return (EOF);
 
          ch = *(buf->current)++;
-          
+
          if (ch != 0xfe)
            return (EOF);
 
@@ -1287,8 +1255,8 @@ mxml_file_getc(void *p,                   /* I  - Pointer to file */
          * Multi-word UTF-16 char...
          */
 
-          int lch = (getc(fp) << 8);
-          lch |= getc(fp);
+          int lch = getc(fp);
+          lch = (lch << 8) | getc(fp);
 
           if (lch < 0xdc00 || lch >= 0xdfff)
            return (EOF);
@@ -1317,7 +1285,7 @@ mxml_file_getc(void *p,                   /* I  - Pointer to file */
          */
 
           int lch = getc(fp);
-                 lch |= (getc(fp) << 8);
+          lch |= (getc(fp) << 8);
 
           if (lch < 0xdc00 || lch >= 0xdfff)
            return (EOF);
@@ -1463,8 +1431,10 @@ mxml_load_data(
 
   if (cb && parent)
     type = (*cb)(parent);
-  else
+  else if (parent)
     type = MXML_TEXT;
+  else
+    type = MXML_IGNORE;
 
   while ((ch = (*getc_cb)(p, &encoding)) != EOF)
   {
@@ -1518,7 +1488,7 @@ mxml_load_data(
         default : /* Ignore... */
            node = NULL;
            break;
-      }          
+      }
 
       if (*bufptr)
       {
@@ -1661,9 +1631,9 @@ mxml_load_data(
          * There can only be one root element!
          */
 
-         mxml_error("<%s> cannot be a second root node after <%s>", 
+         mxml_error("<%s> cannot be a second root node after <%s>",
                     buffer, first->value.element.name);
-          goto error;               
+          goto error;
        }
 
        if ((node = mxmlNewElement(parent, buffer)) == NULL)
@@ -1729,9 +1699,9 @@ mxml_load_data(
          * There can only be one root element!
          */
 
-         mxml_error("<%s> cannot be a second root node after <%s>", 
+         mxml_error("<%s> cannot be a second root node after <%s>",
                     buffer, first->value.element.name);
-          goto error;               
+          goto error;
        }
 
        if ((node = mxmlNewElement(parent, buffer)) == NULL)
@@ -1796,9 +1766,9 @@ mxml_load_data(
          * There can only be one root element!
          */
 
-         mxml_error("<%s> cannot be a second root node after <%s>", 
+         mxml_error("<%s> cannot be a second root node after <%s>",
                     buffer, first->value.element.name);
-          goto error;               
+          goto error;
        }
 
        if ((node = mxmlNewElement(parent, buffer)) == NULL)
@@ -1882,9 +1852,9 @@ mxml_load_data(
          * There can only be one root element!
          */
 
-         mxml_error("<%s> cannot be a second root node after <%s>", 
+         mxml_error("<%s> cannot be a second root node after <%s>",
                     buffer, first->value.element.name);
-          goto error;               
+          goto error;
        }
 
        if ((node = mxmlNewElement(parent, buffer)) == NULL)
@@ -1974,9 +1944,9 @@ mxml_load_data(
          * There can only be one root element!
          */
 
-         mxml_error("<%s> cannot be a second root node after <%s>", 
+         mxml_error("<%s> cannot be a second root node after <%s>",
                     buffer, first->value.element.name);
-          goto error;               
+          goto error;
        }
 
         if ((node = mxmlNewElement(parent, buffer)) == NULL)
@@ -2076,7 +2046,7 @@ mxml_load_data(
   {
     node = parent;
 
-    while (parent->parent != top && parent->parent)
+    while (parent != top && parent->parent)
       parent = parent->parent;
 
     if (node != parent)
@@ -2286,7 +2256,7 @@ mxml_parse_element(
            if (ch == '&')
              if ((ch = mxml_get_entity(node, p, encoding, getc_cb)) == EOF)
                goto error;
-             
+
            if (mxml_add_char(ch, &ptr, &value, &valsize))
              goto error;
          }
@@ -2310,7 +2280,7 @@ mxml_parse_element(
            if (ch == '&')
              if ((ch = mxml_get_entity(node, p, encoding, getc_cb)) == EOF)
                goto error;
-             
+
            if (mxml_add_char(ch, &ptr, &value, &valsize))
              goto error;
          }
@@ -2643,8 +2613,12 @@ mxml_string_putc(int  ch,                /* I - Character to write */
 
   pp = (char **)p;
 
-  if (pp[0] < pp[1])
-    pp[0][0] = ch;
+  if (pp[2] == 0) {
+    if (pp[0] < pp[1])
+      pp[0][0] = ch;
+    else
+      pp[2] = (char *)1;
+  }
 
   pp[0] ++;
 
@@ -3078,5 +3052,5 @@ mxml_write_ws(mxml_node_t     *node,      /* I - Current node */
 
 
 /*
- * End of "$Id: mxml-file.c 438 2011-03-24 05:47:51Z mike $".
+ * End of "$Id: mxml-file.c 455 2014-01-05 03:28:03Z msweet $".
  */
index a5356d57e186bb127ac0c6914a77bb926948f552..40ed3d0839b4151c8b1a8f2a3b4d1c565304435e 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * "$Id: mxml-get.c 427 2011-01-03 02:03:29Z mike $"
+ * "$Id: mxml-get.c 451 2014-01-04 21:50:06Z msweet $"
  *
  * Node get functions for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2011 by Michael R Sweet.
+ * Copyright 2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
- *
- * Contents:
- *
- *   mxmlGetCDATA()       - Get the value for a CDATA node.
- *   mxmlGetCustom()      - Get the value for a custom node.
- *   mxmlGetElement()     - Get the name for an element node.
- *   mxmlGetFirstChild()  - Get the first child of an element node.
- *   mxmlGetInteger()     - Get the integer value from the specified node or its
- *                          first child.
- *   mxmlGetLastChild()   - Get the last child of an element node.
- *   mxmlGetNextSibling() - Get the next node for the current parent.
- *   mxmlGetOpaque()      - Get an opaque string value for a node or its first
- *                          child.
- *   mxmlGetParent()      - Get the parent node.
- *   mxmlGetPrevSibling() - Get the previous node for the current parent.
- *   mxmlGetReal()        - Get the real value for a node or its first child.
- *   mxmlGetText()        - Get the text value for a node or its first child.
- *   mxmlGetType()        - Get the node type.
- *   mxmlGetUserData()    - Get the user data pointer for a node.
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -467,5 +448,5 @@ mxmlGetUserData(mxml_node_t *node)  /* I - Node to get */
 
 
 /*
- * End of "$Id: mxml-get.c 427 2011-01-03 02:03:29Z mike $".
+ * End of "$Id: mxml-get.c 451 2014-01-04 21:50:06Z msweet $".
  */
index b6efc66f055cee93136d51fd2f39e49b0ba69496..10814390d3a067a0c8a46247b4bbd747ac39c86e 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * "$Id: mxml-index.c 426 2011-01-01 23:42:17Z mike $"
+ * "$Id: mxml-index.c 451 2014-01-04 21:50:06Z msweet $"
  *
  * Index support code for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2003-2011 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
- *
- * Contents:
- *
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -658,5 +655,5 @@ index_sort(mxml_index_t *ind,               /* I - Index to sort */
 
 
 /*
- * End of "$Id: mxml-index.c 426 2011-01-01 23:42:17Z mike $".
+ * End of "$Id: mxml-index.c 451 2014-01-04 21:50:06Z msweet $".
  */
index 44af759f9de3298d97697ebbbf331e9dd5a03d48..128cda1a4cf2f87f22bb7eda0a0a01a956871249 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * "$Id: mxml-node.c 436 2011-01-22 01:02:05Z mike $"
+ * "$Id: mxml-node.c 451 2014-01-04 21:50:06Z msweet $"
  *
  * Node support code for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2003-2011 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
- *
- * Contents:
- *
- *   mxmlAdd()         - Add a node to a tree.
- *   mxmlDelete()      - Delete a node and all of its children.
- *   mxmlGetRefCount() - Get the current reference (use) count for a node.
- *   mxmlNewCDATA()    - Create a new CDATA node.
- *   mxmlNewCustom()   - Create a new custom data node.
- *   mxmlNewElement()  - Create a new element node.
- *   mxmlNewInteger()  - Create a new integer node.
- *   mxmlNewOpaque()   - Create a new opaque string.
- *   mxmlNewReal()     - Create a new real number node.
- *   mxmlNewText()     - Create a new text fragment node.
- *   mxmlNewTextf()    - Create a new formatted text fragment node.
- *   mxmlRemove()      - Remove a node from its parent.
- *   mxmlNewXML()      - Create a new XML document tree.
- *   mxmlRelease()     - Release a node.
- *   mxmlRetain()      - Retain a node.
- *   mxml_new()        - Create a new node.
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -529,7 +510,7 @@ mxmlNewReal(mxml_node_t *parent,    /* I - Parent node or MXML_NO_PARENT */
  * list. The constant MXML_NO_PARENT can be used to specify that the new
  * text node has no parent. The whitespace parameter is used to specify
  * whether leading whitespace is present before the node. The text
- * string must be nul-terminated and is copied into the new node.  
+ * string must be nul-terminated and is copied into the new node.
  */
 
 mxml_node_t *                          /* O - New node */
@@ -573,7 +554,7 @@ mxmlNewText(mxml_node_t *parent,    /* I - Parent node or MXML_NO_PARENT */
  * list. The constant MXML_NO_PARENT can be used to specify that the new
  * text node has no parent. The whitespace parameter is used to specify
  * whether leading whitespace is present before the node. The format
- * string must be nul-terminated and is formatted into the new node.  
+ * string must be nul-terminated and is formatted into the new node.
  */
 
 mxml_node_t *                          /* O - New node */
@@ -803,5 +784,5 @@ mxml_new(mxml_node_t *parent,               /* I - Parent node */
 
 
 /*
- * End of "$Id: mxml-node.c 436 2011-01-22 01:02:05Z mike $".
+ * End of "$Id: mxml-node.c 451 2014-01-04 21:50:06Z msweet $".
  */
index 72f3e2320c7ca233dc4beae9ab430bbf72fc728b..bec4bbfbf37868f5bcc7c6fd307c30ff81b8c032 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * "$Id: mxml-private.c 422 2010-11-07 22:55:11Z mike $"
+ * "$Id: mxml-private.c 451 2014-01-04 21:50:06Z msweet $"
  *
  * Private functions for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2003-2010 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
- *
- * Contents:
- *
- *   mxml_error()      - Display an error message.
- *   mxml_integer_cb() - Default callback for integer values.
- *   mxml_opaque_cb()  - Default callback for opaque values.
- *   mxml_real_cb()    - Default callback for real number values.
- *   _mxml_global()    - Get global data.
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -238,7 +230,7 @@ static DWORD _mxml_tls_index;               /* Index for global storage */
 /*
  * 'DllMain()' - Main entry for library.
  */
+
 BOOL WINAPI                            /* O - Success/failure */
 DllMain(HINSTANCE hinst,               /* I - DLL module handle */
         DWORD     reason,              /* I - Reason */
@@ -250,28 +242,28 @@ DllMain(HINSTANCE hinst,          /* I - DLL module handle */
   (void)hinst;
   (void)reserved;
 
-  switch (reason) 
-  { 
+  switch (reason)
+  {
     case DLL_PROCESS_ATTACH :          /* Called on library initialization */
-        if ((_mxml_tls_index = TlsAlloc()) == TLS_OUT_OF_INDEXES) 
-          return (FALSE); 
-        break; 
+        if ((_mxml_tls_index = TlsAlloc()) == TLS_OUT_OF_INDEXES)
+          return (FALSE);
+        break;
 
     case DLL_THREAD_DETACH :           /* Called when a thread terminates */
         if ((global = (_mxml_global_t *)TlsGetValue(_mxml_tls_index)) != NULL)
           free(global);
-        break; 
+        break;
 
     case DLL_PROCESS_DETACH :          /* Called when library is unloaded */
         if ((global = (_mxml_global_t *)TlsGetValue(_mxml_tls_index)) != NULL)
           free(global);
 
-        TlsFree(_mxml_tls_index); 
-        break; 
+        TlsFree(_mxml_tls_index);
+        break;
 
-    default: 
-        break; 
-  } 
+    default:
+        break;
+  }
 
   return (TRUE);
 }
@@ -295,7 +287,7 @@ _mxml_global(void)
     global->entity_cbs[0]  = _mxml_entity_cb;
     global->wrap           = 72;
 
-    TlsSetValue(_mxml_tls_index, (LPVOID)global); 
+    TlsSetValue(_mxml_tls_index, (LPVOID)global);
   }
 
   return (global);
@@ -327,5 +319,5 @@ _mxml_global(void)
 
 
 /*
- * End of "$Id: mxml-private.c 422 2010-11-07 22:55:11Z mike $".
+ * End of "$Id: mxml-private.c 451 2014-01-04 21:50:06Z msweet $".
  */
index 8789e6c52cbd74c4443f341a4cd9c395a9df0cf5..c5e4e6b6f27a24aa294d023ca68113a69b17bd15 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * "$Id: mxml-private.h 408 2010-09-19 05:26:46Z mike $"
+ * "$Id: mxml-private.h 451 2014-01-04 21:50:06Z msweet $"
  *
  * Private definitions for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2003-2010 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
@@ -11,7 +11,7 @@
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -46,5 +46,5 @@ extern int            _mxml_entity_cb(const char *name);
 
 
 /*
- * End of "$Id: mxml-private.h 408 2010-09-19 05:26:46Z mike $".
+ * End of "$Id: mxml-private.h 451 2014-01-04 21:50:06Z msweet $".
  */
index f975af1543ca869b309eb9a6df7fc63b6b0f9266..313a52f0ce2f8cb4219e220b61c1d9f04050435c 100644 (file)
@@ -1,10 +1,10 @@
 /*
- * "$Id: mxml-search.c 427 2011-01-03 02:03:29Z mike $"
+ * "$Id: mxml-search.c 451 2014-01-04 21:50:06Z msweet $"
  *
  * Search/navigation functions for Mini-XML, a small XML-like file
  * parsing library.
  *
- * Copyright 2003-2010 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
- *
- * Contents:
- *
- *   mxmlFindElement() - Find the named element.
- *   mxmlFindValue()   - Find a value with the given path.
- *   mxmlWalkNext()    - Walk to the next logical node in the tree.
- *   mxmlWalkPrev()    - Walk to the previous logical node in the tree.
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -126,7 +119,7 @@ mxmlFindElement(mxml_node_t *node,  /* I - Current node */
  *
  * The first child node of the found node is returned if the given node has
  * children and the first child is a value node.
- * 
+ *
  * @since Mini-XML 2.7@
  */
 
@@ -283,5 +276,5 @@ mxmlWalkPrev(mxml_node_t *node,             /* I - Current node */
 
 
 /*
- * End of "$Id: mxml-search.c 427 2011-01-03 02:03:29Z mike $".
+ * End of "$Id: mxml-search.c 451 2014-01-04 21:50:06Z msweet $".
  */
index b0bd52790b2eeecf83ddef48acd8a65322032e0c..16d4bf1050dd0b2bc2bbdbb7174f9965837949c8 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * "$Id: mxml-set.c 441 2011-12-09 23:49:00Z mike $"
+ * "$Id: mxml-set.c 451 2014-01-04 21:50:06Z msweet $"
  *
  * Node set functions for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2003-2011 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
- *
- * Contents:
- *
- *   mxmlSetCDATA()    - Set the element name of a CDATA node.
- *   mxmlSetCustom()   - Set the data and destructor of a custom data node.
- *   mxmlSetElement()  - Set the name of an element node.
- *   mxmlSetInteger()  - Set the value of an integer node.
- *   mxmlSetOpaque()   - Set the value of an opaque node.
- *   mxmlSetReal()     - Set the value of a real number node.
- *   mxmlSetText()     - Set the value of a text node.
- *   mxmlSetTextf()    - Set the value of a text node to a formatted string.
- *   mxmlSetUserData() - Set the user data pointer for a node.
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -345,5 +333,5 @@ mxmlSetUserData(mxml_node_t *node,  /* I - Node to set */
 
 
 /*
- * End of "$Id: mxml-set.c 441 2011-12-09 23:49:00Z mike $".
+ * End of "$Id: mxml-set.c 451 2014-01-04 21:50:06Z msweet $".
  */
index 6be42523f95c306e62b457a018d2c5140f78b489..9d5b58e6adb70bd73644bef31c5153e3e4a46d5c 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * "$Id: mxml-string.c 424 2010-12-25 16:21:50Z mike $"
+ * "$Id: mxml-string.c 454 2014-01-05 03:25:07Z msweet $"
  *
  * String functions for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2003-2010 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
- *
- * Contents:
- *
- *   _mxml_snprintf()  - Format a string.
- *   _mxml_strdup()    - Duplicate a string.
- *   _mxml_strdupf()   - Format and duplicate a string.
- *   _mxml_vsnprintf() - Format a string into a fixed size buffer.
- *   _mxml_vstrdupf()  - Format and duplicate a string.
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -38,7 +30,7 @@
 #  ifdef __va_copy
 #    define va_copy(dst,src) __va_copy(dst,src)
 #  else
-#    define va_copy(dst,src) memcpy(&dst, &src, sizeof(va_list))
+#    define va_copy(dst,src) memcpy(&dst, src, sizeof(va_list))
 #  endif /* __va_copy */
 #endif /* va_copy */
 
@@ -157,7 +149,8 @@ _mxml_vsnprintf(char       *buffer, /* O - Output buffer */
 
       if (*format == '%')
       {
-        if (bufptr && bufptr < bufend) *bufptr++ = *format;
+        if (bufptr && bufptr < bufend)
+          *bufptr++ = *format;
         bytes ++;
         format ++;
        continue;
@@ -472,5 +465,5 @@ _mxml_vstrdupf(const char *format,  /* I - Printf-style format string */
 
 
 /*
- * End of "$Id: mxml-string.c 424 2010-12-25 16:21:50Z mike $".
+ * End of "$Id: mxml-string.c 454 2014-01-05 03:25:07Z msweet $".
  */
index 79c711f4c80fd59674e8273ea6866d37ec534476..bba5fd23a67b663fd432c683104f18909646d8e1 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * "$Id: mxml.h 427 2011-01-03 02:03:29Z mike $"
+ * "$Id: mxml.h 451 2014-01-04 21:50:06Z msweet $"
  *
  * Header file for Mini-XML, a small XML-like file parsing library.
  *
- * Copyright 2003-2011 by Michael R Sweet.
+ * Copyright 2003-2014 by Michael R Sweet.
  *
  * These coded instructions, statements, and computer programs are the
  * property of Michael R Sweet and are protected by Federal copyright
@@ -11,7 +11,7 @@
  * which should have been included with this file.  If this file is
  * missing or damaged, see the license at:
  *
- *     http://www.minixml.org/
+ *     http://www.msweet.org/projects.php/Mini-XML
  */
 
 /*
@@ -36,6 +36,9 @@
  * Constants...
  */
 
+#  define MXML_MAJOR_VERSION   2       /* Major version number */
+#  define MXML_MINOR_VERSION   8       /* Minor version number */
+
 #  define MXML_TAB             8       /* Tabs every N columns */
 
 #  define MXML_NO_CALLBACK     0       /* Don't use a type callback */
@@ -93,7 +96,7 @@ typedef enum mxml_type_e              /**** The XML node type. ****/
 typedef void (*mxml_custom_destroy_cb_t)(void *);
                                        /**** Custom data destructor ****/
 
-typedef void (*mxml_error_cb_t)(const char *);  
+typedef void (*mxml_error_cb_t)(const char *);
                                        /**** Error callback function ****/
 
 typedef struct mxml_attr_s             /**** An XML element attribute value. @private@ ****/
@@ -161,7 +164,7 @@ typedef struct mxml_index_s mxml_index_t;
 typedef int (*mxml_custom_load_cb_t)(mxml_node_t *, const char *);
                                        /**** Custom data load callback function ****/
 
-typedef char *(*mxml_custom_save_cb_t)(mxml_node_t *);  
+typedef char *(*mxml_custom_save_cb_t)(mxml_node_t *);
                                        /**** Custom data save callback function ****/
 
 typedef int (*mxml_entity_cb_t)(const char *);
@@ -173,7 +176,7 @@ typedef mxml_type_t (*mxml_load_cb_t)(mxml_node_t *);
 typedef const char *(*mxml_save_cb_t)(mxml_node_t *, int);
                                        /**** Save callback function ****/
 
-typedef void (*mxml_sax_cb_t)(mxml_node_t *, mxml_sax_event_t, void *);  
+typedef void (*mxml_sax_cb_t)(mxml_node_t *, mxml_sax_event_t, void *);
                                        /**** SAX callback function ****/
 
 
@@ -325,5 +328,5 @@ extern mxml_type_t  mxml_real_cb(mxml_node_t *node);
 
 
 /*
- * End of "$Id: mxml.h 427 2011-01-03 02:03:29Z mike $".
+ * End of "$Id: mxml.h 451 2014-01-04 21:50:06Z msweet $".
  */
index 46878daca5cc723b3200763e023744e87c7dc2fd..c9eac3edfe4d306fcf574fd1eab62b0738d5b774 100644 (file)
@@ -101,7 +101,7 @@ static int setup_cpunode_map(void)
 
        dir1 = opendir(PATH_SYS_NODE);
        if (!dir1)
-               return -1;
+               return 0;
 
        while ((dent1 = readdir(dir1)) != NULL) {
                if (dent1->d_type != DT_DIR ||
index 0a63658065f0e9e41285f521665b80b975cfe86c..2cee2b79b4defc8a5bf584d765d46906187b0462 100644 (file)
@@ -4,6 +4,7 @@ TARGETS += efivarfs
 TARGETS += kcmp
 TARGETS += memory-hotplug
 TARGETS += mqueue
+TARGETS += mount
 TARGETS += net
 TARGETS += ptrace
 TARGETS += vm
diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile
new file mode 100644 (file)
index 0000000..337d853
--- /dev/null
@@ -0,0 +1,17 @@
+# Makefile for mount selftests.
+
+all: unprivileged-remount-test
+
+unprivileged-remount-test: unprivileged-remount-test.c
+       gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test
+
+# Allow specific tests to be selected.
+test_unprivileged_remount: unprivileged-remount-test
+       @if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
+
+run_tests: all test_unprivileged_remount
+
+clean:
+       rm -f unprivileged-remount-test
+
+.PHONY: all test_unprivileged_remount
diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
new file mode 100644 (file)
index 0000000..1b3ff2f
--- /dev/null
@@ -0,0 +1,242 @@
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <grp.h>
+#include <stdbool.h>
+#include <stdarg.h>
+
+#ifndef CLONE_NEWNS
+# define CLONE_NEWNS 0x00020000
+#endif
+#ifndef CLONE_NEWUTS
+# define CLONE_NEWUTS 0x04000000
+#endif
+#ifndef CLONE_NEWIPC
+# define CLONE_NEWIPC 0x08000000
+#endif
+#ifndef CLONE_NEWNET
+# define CLONE_NEWNET 0x40000000
+#endif
+#ifndef CLONE_NEWUSER
+# define CLONE_NEWUSER 0x10000000
+#endif
+#ifndef CLONE_NEWPID
+# define CLONE_NEWPID 0x20000000
+#endif
+
+#ifndef MS_RELATIME
+#define MS_RELATIME (1 << 21)
+#endif
+#ifndef MS_STRICTATIME
+#define MS_STRICTATIME (1 << 24)
+#endif
+
+static void die(char *fmt, ...)
+{
+       va_list ap;
+       va_start(ap, fmt);
+       vfprintf(stderr, fmt, ap);
+       va_end(ap);
+       exit(EXIT_FAILURE);
+}
+
+static void write_file(char *filename, char *fmt, ...)
+{
+       char buf[4096];
+       int fd;
+       ssize_t written;
+       int buf_len;
+       va_list ap;
+
+       va_start(ap, fmt);
+       buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
+       va_end(ap);
+       if (buf_len < 0) {
+               die("vsnprintf failed: %s\n",
+                   strerror(errno));
+       }
+       if (buf_len >= sizeof(buf)) {
+               die("vsnprintf output truncated\n");
+       }
+
+       fd = open(filename, O_WRONLY);
+       if (fd < 0) {
+               die("open of %s failed: %s\n",
+                   filename, strerror(errno));
+       }
+       written = write(fd, buf, buf_len);
+       if (written != buf_len) {
+               if (written >= 0) {
+                       die("short write to %s\n", filename);
+               } else {
+                       die("write to %s failed: %s\n",
+                               filename, strerror(errno));
+               }
+       }
+       if (close(fd) != 0) {
+               die("close of %s failed: %s\n",
+                       filename, strerror(errno));
+       }
+}
+
+static void create_and_enter_userns(void)
+{
+       uid_t uid;
+       gid_t gid;
+
+       uid = getuid();
+       gid = getgid();
+
+       if (unshare(CLONE_NEWUSER) !=0) {
+               die("unshare(CLONE_NEWUSER) failed: %s\n",
+                       strerror(errno));
+       }
+
+       write_file("/proc/self/uid_map", "0 %d 1", uid);
+       write_file("/proc/self/gid_map", "0 %d 1", gid);
+
+       if (setgroups(0, NULL) != 0) {
+               die("setgroups failed: %s\n",
+                       strerror(errno));
+       }
+       if (setgid(0) != 0) {
+               die ("setgid(0) failed %s\n",
+                       strerror(errno));
+       }
+       if (setuid(0) != 0) {
+               die("setuid(0) failed %s\n",
+                       strerror(errno));
+       }
+}
+
+static
+bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
+{
+       pid_t child;
+
+       child = fork();
+       if (child == -1) {
+               die("fork failed: %s\n",
+                       strerror(errno));
+       }
+       if (child != 0) { /* parent */
+               pid_t pid;
+               int status;
+               pid = waitpid(child, &status, 0);
+               if (pid == -1) {
+                       die("waitpid failed: %s\n",
+                               strerror(errno));
+               }
+               if (pid != child) {
+                       die("waited for %d got %d\n",
+                               child, pid);
+               }
+               if (!WIFEXITED(status)) {
+                       die("child did not terminate cleanly\n");
+               }
+               return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
+       }
+
+       create_and_enter_userns();
+       if (unshare(CLONE_NEWNS) != 0) {
+               die("unshare(CLONE_NEWNS) failed: %s\n",
+                       strerror(errno));
+       }
+
+       if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
+               die("mount of /tmp failed: %s\n",
+                       strerror(errno));
+       }
+
+       create_and_enter_userns();
+
+       if (unshare(CLONE_NEWNS) != 0) {
+               die("unshare(CLONE_NEWNS) failed: %s\n",
+                       strerror(errno));
+       }
+
+       if (mount("/tmp", "/tmp", "none",
+                 MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) {
+               /* system("cat /proc/self/mounts"); */
+               die("remount of /tmp failed: %s\n",
+                   strerror(errno));
+       }
+
+       if (mount("/tmp", "/tmp", "none",
+                 MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) {
+               /* system("cat /proc/self/mounts"); */
+               die("remount of /tmp with invalid flags "
+                   "succeeded unexpectedly\n");
+       }
+       exit(EXIT_SUCCESS);
+}
+
+static bool test_unpriv_remount_simple(int mount_flags)
+{
+       return test_unpriv_remount(mount_flags, mount_flags, 0);
+}
+
+static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
+{
+       return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
+}
+
+int main(int argc, char **argv)
+{
+       if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
+               die("MS_RDONLY malfunctions\n");
+       }
+       if (!test_unpriv_remount_simple(MS_NODEV)) {
+               die("MS_NODEV malfunctions\n");
+       }
+       if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
+               die("MS_NOSUID malfunctions\n");
+       }
+       if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
+               die("MS_NOEXEC malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
+                                      MS_NOATIME|MS_NODEV))
+       {
+               die("MS_RELATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
+                                      MS_NOATIME|MS_NODEV))
+       {
+               die("MS_STRICTATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
+                                      MS_STRICTATIME|MS_NODEV))
+       {
+               die("MS_RELATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
+                                      MS_NOATIME|MS_NODEV))
+       {
+               die("MS_RELATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
+                                      MS_NOATIME|MS_NODEV))
+       {
+               die("MS_RELATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
+                                      MS_STRICTATIME|MS_NODEV))
+       {
+               die("MS_RELATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
+                                MS_NOATIME|MS_NODEV))
+       {
+               die("Default atime malfunctions\n");
+       }
+       return EXIT_SUCCESS;
+}
index 779262f59e252b458a67335635b59d0d1ca1932e..fc0c5e603eb42e7404d6060e81e4c20fa0003f39 100644 (file)
@@ -6,6 +6,9 @@ config HAVE_KVM
 config HAVE_KVM_IRQCHIP
        bool
 
+config HAVE_KVM_IRQFD
+       bool
+
 config HAVE_KVM_IRQ_ROUTING
        bool
 
@@ -22,8 +25,15 @@ config KVM_MMIO
 config KVM_ASYNC_PF
        bool
 
+# Toggle to switch between direct notification and batch job
+config KVM_ASYNC_PF_SYNC
+       bool
+
 config HAVE_KVM_MSI
        bool
 
 config HAVE_KVM_CPU_RELAX_INTERCEPT
        bool
+
+config KVM_VFIO
+       bool
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
new file mode 100644 (file)
index 0000000..5081e80
--- /dev/null
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/cpu.h>
+#include <linux/of_irq.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+
+#include <clocksource/arm_arch_timer.h>
+#include <asm/arch_timer.h>
+
+#include <kvm/arm_vgic.h>
+#include <kvm/arm_arch_timer.h>
+
+static struct timecounter *timecounter;
+static struct workqueue_struct *wqueue;
+static unsigned int host_vtimer_irq;
+
+static cycle_t kvm_phys_timer_read(void)
+{
+       return timecounter->cc->read(timecounter->cc);
+}
+
+static bool timer_is_armed(struct arch_timer_cpu *timer)
+{
+       return timer->armed;
+}
+
+/* timer_arm: as in "arm the timer", not as in ARM the company */
+static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
+{
+       timer->armed = true;
+       hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
+                     HRTIMER_MODE_ABS);
+}
+
+static void timer_disarm(struct arch_timer_cpu *timer)
+{
+       if (timer_is_armed(timer)) {
+               hrtimer_cancel(&timer->timer);
+               cancel_work_sync(&timer->expired);
+               timer->armed = false;
+       }
+}
+
+static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
+       kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+                           timer->irq->irq,
+                           timer->irq->level);
+}
+
+static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
+{
+       struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
+
+       /*
+        * We disable the timer in the world switch and let it be
+        * handled by kvm_timer_sync_hwstate(). Getting a timer
+        * interrupt at this point is a sure sign of some major
+        * breakage.
+        */
+       pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
+       return IRQ_HANDLED;
+}
+
+static void kvm_timer_inject_irq_work(struct work_struct *work)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
+       vcpu->arch.timer_cpu.armed = false;
+       kvm_timer_inject_irq(vcpu);
+}
+
+static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
+{
+       struct arch_timer_cpu *timer;
+       timer = container_of(hrt, struct arch_timer_cpu, timer);
+       queue_work(wqueue, &timer->expired);
+       return HRTIMER_NORESTART;
+}
+
+/**
+ * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
+ * @vcpu: The vcpu pointer
+ *
+ * Disarm any pending soft timers, since the world-switch code will write the
+ * virtual timer state back to the physical CPU.
+ */
+void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       /*
+        * We're about to run this vcpu again, so there is no need to
+        * keep the background timer running, as we're about to
+        * populate the CPU timer again.
+        */
+       timer_disarm(timer);
+}
+
+/**
+ * kvm_timer_sync_hwstate - sync timer state from cpu
+ * @vcpu: The vcpu pointer
+ *
+ * Check if the virtual timer was armed and either schedule a corresponding
+ * soft timer or inject directly if already expired.
+ */
+void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       cycle_t cval, now;
+       u64 ns;
+
+       if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
+               !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
+               return;
+
+       cval = timer->cntv_cval;
+       now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+       BUG_ON(timer_is_armed(timer));
+
+       if (cval <= now) {
+               /*
+                * Timer has already expired while we were not
+                * looking. Inject the interrupt and carry on.
+                */
+               kvm_timer_inject_irq(vcpu);
+               return;
+       }
+
+       ns = cyclecounter_cyc2ns(timecounter->cc, cval - now);
+       timer_arm(timer, ns);
+}
+
+void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+                         const struct kvm_irq_level *irq)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       /*
+        * The vcpu timer irq number cannot be determined in
+        * kvm_timer_vcpu_init() because it is called much before
+        * kvm_vcpu_set_target(). To handle this, we determine
+        * vcpu timer irq number when the vcpu is reset.
+        */
+       timer->irq = irq;
+}
+
+void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
+       hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       timer->timer.function = kvm_timer_expire;
+}
+
+static void kvm_timer_init_interrupt(void *info)
+{
+       enable_percpu_irq(host_vtimer_irq, 0);
+}
+
+int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       switch (regid) {
+       case KVM_REG_ARM_TIMER_CTL:
+               timer->cntv_ctl = value;
+               break;
+       case KVM_REG_ARM_TIMER_CNT:
+               vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
+               break;
+       case KVM_REG_ARM_TIMER_CVAL:
+               timer->cntv_cval = value;
+               break;
+       default:
+               return -1;
+       }
+       return 0;
+}
+
+u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       switch (regid) {
+       case KVM_REG_ARM_TIMER_CTL:
+               return timer->cntv_ctl;
+       case KVM_REG_ARM_TIMER_CNT:
+               return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+       case KVM_REG_ARM_TIMER_CVAL:
+               return timer->cntv_cval;
+       }
+       return (u64)-1;
+}
+
+static int kvm_timer_cpu_notify(struct notifier_block *self,
+                               unsigned long action, void *cpu)
+{
+       switch (action) {
+       case CPU_STARTING:
+       case CPU_STARTING_FROZEN:
+               kvm_timer_init_interrupt(NULL);
+               break;
+       case CPU_DYING:
+       case CPU_DYING_FROZEN:
+               disable_percpu_irq(host_vtimer_irq);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block kvm_timer_cpu_nb = {
+       .notifier_call = kvm_timer_cpu_notify,
+};
+
+static const struct of_device_id arch_timer_of_match[] = {
+       { .compatible   = "arm,armv7-timer",    },
+       { .compatible   = "arm,armv8-timer",    },
+       {},
+};
+
+int kvm_timer_hyp_init(void)
+{
+       struct device_node *np;
+       unsigned int ppi;
+       int err;
+
+       timecounter = arch_timer_get_timecounter();
+       if (!timecounter)
+               return -ENODEV;
+
+       np = of_find_matching_node(NULL, arch_timer_of_match);
+       if (!np) {
+               kvm_err("kvm_arch_timer: can't find DT node\n");
+               return -ENODEV;
+       }
+
+       ppi = irq_of_parse_and_map(np, 2);
+       if (!ppi) {
+               kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = request_percpu_irq(ppi, kvm_arch_timer_handler,
+                                "kvm guest timer", kvm_get_running_vcpus());
+       if (err) {
+               kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
+                       ppi, err);
+               goto out;
+       }
+
+       host_vtimer_irq = ppi;
+
+       err = register_cpu_notifier(&kvm_timer_cpu_nb);
+       if (err) {
+               kvm_err("Cannot register timer CPU notifier\n");
+               goto out_free;
+       }
+
+       wqueue = create_singlethread_workqueue("kvm_arch_timer");
+       if (!wqueue) {
+               err = -ENOMEM;
+               goto out_free;
+       }
+
+       kvm_info("%s IRQ%d\n", np->name, ppi);
+       on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
+
+       goto out;
+out_free:
+       free_percpu_irq(ppi, kvm_get_running_vcpus());
+out:
+       of_node_put(np);
+       return err;
+}
+
+void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       timer_disarm(timer);
+}
+
+int kvm_timer_init(struct kvm *kvm)
+{
+       if (timecounter && wqueue) {
+               kvm->arch.timer.cntvoff = kvm_phys_timer_read();
+               kvm->arch.timer.enabled = 1;
+       }
+
+       return 0;
+}
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
new file mode 100644 (file)
index 0000000..01124ef
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
+{
+       struct vgic_lr lr_desc;
+       u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
+
+       lr_desc.irq     = val & GICH_LR_VIRTUALID;
+       if (lr_desc.irq <= 15)
+               lr_desc.source  = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
+       else
+               lr_desc.source = 0;
+       lr_desc.state   = 0;
+
+       if (val & GICH_LR_PENDING_BIT)
+               lr_desc.state |= LR_STATE_PENDING;
+       if (val & GICH_LR_ACTIVE_BIT)
+               lr_desc.state |= LR_STATE_ACTIVE;
+       if (val & GICH_LR_EOI)
+               lr_desc.state |= LR_EOI_INT;
+
+       return lr_desc;
+}
+
+static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
+                          struct vgic_lr lr_desc)
+{
+       u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq;
+
+       if (lr_desc.state & LR_STATE_PENDING)
+               lr_val |= GICH_LR_PENDING_BIT;
+       if (lr_desc.state & LR_STATE_ACTIVE)
+               lr_val |= GICH_LR_ACTIVE_BIT;
+       if (lr_desc.state & LR_EOI_INT)
+               lr_val |= GICH_LR_EOI;
+
+       vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
+}
+
+static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
+                                 struct vgic_lr lr_desc)
+{
+       if (!(lr_desc.state & LR_STATE_MASK))
+               set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
+}
+
+static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
+{
+       u64 val;
+
+#if BITS_PER_LONG == 64
+       val  = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1];
+       val <<= 32;
+       val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0];
+#else
+       val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
+#endif
+       return val;
+}
+
+static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
+{
+       u64 val;
+
+#if BITS_PER_LONG == 64
+       val  = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1];
+       val <<= 32;
+       val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0];
+#else
+       val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
+#endif
+       return val;
+}
+
+static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
+{
+       u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
+       u32 ret = 0;
+
+       if (misr & GICH_MISR_EOI)
+               ret |= INT_STATUS_EOI;
+       if (misr & GICH_MISR_U)
+               ret |= INT_STATUS_UNDERFLOW;
+
+       return ret;
+}
+
+static void vgic_v2_enable_underflow(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
+}
+
+static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
+}
+
+static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+       u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
+
+       vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
+       vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
+       vmcrp->bpr  = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
+       vmcrp->pmr  = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
+}
+
+static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+       u32 vmcr;
+
+       vmcr  = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
+       vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
+       vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
+       vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
+
+       vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
+}
+
+static void vgic_v2_enable(struct kvm_vcpu *vcpu)
+{
+       /*
+        * By forcing VMCR to zero, the GIC will restore the binary
+        * points to their reset values. Anything else resets to zero
+        * anyway.
+        */
+       vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
+
+       /* Get the show on the road... */
+       vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
+}
+
+static const struct vgic_ops vgic_v2_ops = {
+       .get_lr                 = vgic_v2_get_lr,
+       .set_lr                 = vgic_v2_set_lr,
+       .sync_lr_elrsr          = vgic_v2_sync_lr_elrsr,
+       .get_elrsr              = vgic_v2_get_elrsr,
+       .get_eisr               = vgic_v2_get_eisr,
+       .get_interrupt_status   = vgic_v2_get_interrupt_status,
+       .enable_underflow       = vgic_v2_enable_underflow,
+       .disable_underflow      = vgic_v2_disable_underflow,
+       .get_vmcr               = vgic_v2_get_vmcr,
+       .set_vmcr               = vgic_v2_set_vmcr,
+       .enable                 = vgic_v2_enable,
+};
+
+static struct vgic_params vgic_v2_params;
+
+/**
+ * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
+ * @node:      pointer to the DT node
+ * @ops:       address of a pointer to the GICv2 operations
+ * @params:    address of a pointer to HW-specific parameters
+ *
+ * Returns 0 if a GICv2 has been found, with the low level operations
+ * in *ops and the HW parameters in *params. Returns an error code
+ * otherwise.
+ */
+int vgic_v2_probe(struct device_node *vgic_node,
+                 const struct vgic_ops **ops,
+                 const struct vgic_params **params)
+{
+       int ret;
+       struct resource vctrl_res;
+       struct resource vcpu_res;
+       struct vgic_params *vgic = &vgic_v2_params;
+
+       vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
+       if (!vgic->maint_irq) {
+               kvm_err("error getting vgic maintenance irq from DT\n");
+               ret = -ENXIO;
+               goto out;
+       }
+
+       ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
+       if (ret) {
+               kvm_err("Cannot obtain GICH resource\n");
+               goto out;
+       }
+
+       vgic->vctrl_base = of_iomap(vgic_node, 2);
+       if (!vgic->vctrl_base) {
+               kvm_err("Cannot ioremap GICH\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
+       vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
+
+       ret = create_hyp_io_mappings(vgic->vctrl_base,
+                                    vgic->vctrl_base + resource_size(&vctrl_res),
+                                    vctrl_res.start);
+       if (ret) {
+               kvm_err("Cannot map VCTRL into hyp\n");
+               goto out_unmap;
+       }
+
+       if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
+               kvm_err("Cannot obtain GICV resource\n");
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
+       if (!PAGE_ALIGNED(vcpu_res.start)) {
+               kvm_err("GICV physical address 0x%llx not page aligned\n",
+                       (unsigned long long)vcpu_res.start);
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
+       if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+               kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+                       (unsigned long long)resource_size(&vcpu_res),
+                       PAGE_SIZE);
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
+       vgic->vcpu_base = vcpu_res.start;
+
+       kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+                vctrl_res.start, vgic->maint_irq);
+
+       vgic->type = VGIC_V2;
+       *ops = &vgic_v2_ops;
+       *params = vgic;
+       goto out;
+
+out_unmap:
+       iounmap(vgic->vctrl_base);
+out:
+       of_node_put(vgic_node);
+       return ret;
+}
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
new file mode 100644 (file)
index 0000000..1c2c8ee
--- /dev/null
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 2013 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+/* These are for GICv2 emulation only */
+#define GICH_LR_VIRTUALID              (0x3ffUL << 0)
+#define GICH_LR_PHYSID_CPUID_SHIFT     (10)
+#define GICH_LR_PHYSID_CPUID           (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
+
+/*
+ * LRs are stored in reverse order in memory. make sure we index them
+ * correctly.
+ */
+#define LR_INDEX(lr)                   (VGIC_V3_MAX_LRS - 1 - lr)
+
+static u32 ich_vtr_el2;
+
+static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
+{
+       struct vgic_lr lr_desc;
+       u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
+
+       lr_desc.irq     = val & GICH_LR_VIRTUALID;
+       if (lr_desc.irq <= 15)
+               lr_desc.source  = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
+       else
+               lr_desc.source = 0;
+       lr_desc.state   = 0;
+
+       if (val & ICH_LR_PENDING_BIT)
+               lr_desc.state |= LR_STATE_PENDING;
+       if (val & ICH_LR_ACTIVE_BIT)
+               lr_desc.state |= LR_STATE_ACTIVE;
+       if (val & ICH_LR_EOI)
+               lr_desc.state |= LR_EOI_INT;
+
+       return lr_desc;
+}
+
+static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
+                          struct vgic_lr lr_desc)
+{
+       u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) |
+                     lr_desc.irq);
+
+       if (lr_desc.state & LR_STATE_PENDING)
+               lr_val |= ICH_LR_PENDING_BIT;
+       if (lr_desc.state & LR_STATE_ACTIVE)
+               lr_val |= ICH_LR_ACTIVE_BIT;
+       if (lr_desc.state & LR_EOI_INT)
+               lr_val |= ICH_LR_EOI;
+
+       vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
+}
+
+static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
+                                 struct vgic_lr lr_desc)
+{
+       if (!(lr_desc.state & LR_STATE_MASK))
+               vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
+}
+
+static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr;
+}
+
+static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
+}
+
+static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
+{
+       u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
+       u32 ret = 0;
+
+       if (misr & ICH_MISR_EOI)
+               ret |= INT_STATUS_EOI;
+       if (misr & ICH_MISR_U)
+               ret |= INT_STATUS_UNDERFLOW;
+
+       return ret;
+}
+
+static void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+       u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr;
+
+       vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT;
+       vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
+       vmcrp->bpr  = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
+       vmcrp->pmr  = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
+}
+
+static void vgic_v3_enable_underflow(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= ICH_HCR_UIE;
+}
+
+static void vgic_v3_disable_underflow(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~ICH_HCR_UIE;
+}
+
+static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+       u32 vmcr;
+
+       vmcr  = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK;
+       vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
+       vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
+       vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
+
+       vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr;
+}
+
+static void vgic_v3_enable(struct kvm_vcpu *vcpu)
+{
+       /*
+        * By forcing VMCR to zero, the GIC will restore the binary
+        * points to their reset values. Anything else resets to zero
+        * anyway.
+        */
+       vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0;
+
+       /* Get the show on the road... */
+       vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = ICH_HCR_EN;
+}
+
+static const struct vgic_ops vgic_v3_ops = {
+       .get_lr                 = vgic_v3_get_lr,
+       .set_lr                 = vgic_v3_set_lr,
+       .sync_lr_elrsr          = vgic_v3_sync_lr_elrsr,
+       .get_elrsr              = vgic_v3_get_elrsr,
+       .get_eisr               = vgic_v3_get_eisr,
+       .get_interrupt_status   = vgic_v3_get_interrupt_status,
+       .enable_underflow       = vgic_v3_enable_underflow,
+       .disable_underflow      = vgic_v3_disable_underflow,
+       .get_vmcr               = vgic_v3_get_vmcr,
+       .set_vmcr               = vgic_v3_set_vmcr,
+       .enable                 = vgic_v3_enable,
+};
+
+static struct vgic_params vgic_v3_params;
+
+/**
+ * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
+ * @node:      pointer to the DT node
+ * @ops:       address of a pointer to the GICv3 operations
+ * @params:    address of a pointer to HW-specific parameters
+ *
+ * Returns 0 if a GICv3 has been found, with the low level operations
+ * in *ops and the HW parameters in *params. Returns an error code
+ * otherwise.
+ */
+int vgic_v3_probe(struct device_node *vgic_node,
+                 const struct vgic_ops **ops,
+                 const struct vgic_params **params)
+{
+       int ret = 0;
+       u32 gicv_idx;
+       struct resource vcpu_res;
+       struct vgic_params *vgic = &vgic_v3_params;
+
+       vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
+       if (!vgic->maint_irq) {
+               kvm_err("error getting vgic maintenance irq from DT\n");
+               ret = -ENXIO;
+               goto out;
+       }
+
+       ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
+
+       /*
+        * The ListRegs field is 5 bits, but there is a architectural
+        * maximum of 16 list registers. Just ignore bit 4...
+        */
+       vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
+
+       if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
+               gicv_idx = 1;
+
+       gicv_idx += 3; /* Also skip GICD, GICC, GICH */
+       if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
+               kvm_err("Cannot obtain GICV region\n");
+               ret = -ENXIO;
+               goto out;
+       }
+
+       if (!PAGE_ALIGNED(vcpu_res.start)) {
+               kvm_err("GICV physical address 0x%llx not page aligned\n",
+                       (unsigned long long)vcpu_res.start);
+               ret = -ENXIO;
+               goto out;
+       }
+
+       if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+               kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+                       (unsigned long long)resource_size(&vcpu_res),
+                       PAGE_SIZE);
+               ret = -ENXIO;
+               goto out;
+       }
+
+       vgic->vcpu_base = vcpu_res.start;
+       vgic->vctrl_base = NULL;
+       vgic->type = VGIC_V3;
+
+       kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+                vcpu_res.start, vgic->maint_irq);
+
+       *ops = &vgic_v3_ops;
+       *params = vgic;
+
+out:
+       of_node_put(vgic_node);
+       return ret;
+}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
new file mode 100644 (file)
index 0000000..8e1dc03
--- /dev/null
@@ -0,0 +1,2464 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/uaccess.h>
+
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+/*
+ * How the whole thing works (courtesy of Christoffer Dall):
+ *
+ * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
+ *   something is pending on the CPU interface.
+ * - Interrupts that are pending on the distributor are stored on the
+ *   vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
+ *   ioctls and guest mmio ops, and other in-kernel peripherals such as the
+ *   arch. timers).
+ * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
+ *   recalculated
+ * - To calculate the oracle, we need info for each cpu from
+ *   compute_pending_for_cpu, which considers:
+ *   - PPI: dist->irq_pending & dist->irq_enable
+ *   - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
+ *   - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
+ *     registers, stored on each vcpu. We only keep one bit of
+ *     information per interrupt, making sure that only one vcpu can
+ *     accept the interrupt.
+ * - If any of the above state changes, we must recalculate the oracle.
+ * - The same is true when injecting an interrupt, except that we only
+ *   consider a single interrupt at a time. The irq_spi_cpu array
+ *   contains the target CPU for each SPI.
+ *
+ * The handling of level interrupts adds some extra complexity. We
+ * need to track when the interrupt has been EOIed, so we can sample
+ * the 'line' again. This is achieved as such:
+ *
+ * - When a level interrupt is moved onto a vcpu, the corresponding
+ *   bit in irq_queued is set. As long as this bit is set, the line
+ *   will be ignored for further interrupts. The interrupt is injected
+ *   into the vcpu with the GICH_LR_EOI bit set (generate a
+ *   maintenance interrupt on EOI).
+ * - When the interrupt is EOIed, the maintenance interrupt fires,
+ *   and clears the corresponding bit in irq_queued. This allows the
+ *   interrupt line to be sampled again.
+ * - Note that level-triggered interrupts can also be set to pending from
+ *   writes to GICD_ISPENDRn and lowering the external input line does not
+ *   cause the interrupt to become inactive in such a situation.
+ *   Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
+ *   inactive as long as the external input line is held high.
+ */
+
+#define VGIC_ADDR_UNDEF                (-1)
+#define IS_VGIC_ADDR_UNDEF(_x)  ((_x) == VGIC_ADDR_UNDEF)
+
+#define PRODUCT_ID_KVM         0x4b    /* ASCII code K */
+#define IMPLEMENTER_ARM                0x43b
+#define GICC_ARCH_VERSION_V2   0x2
+
+#define ACCESS_READ_VALUE      (1 << 0)
+#define ACCESS_READ_RAZ                (0 << 0)
+#define ACCESS_READ_MASK(x)    ((x) & (1 << 0))
+#define ACCESS_WRITE_IGNORED   (0 << 1)
+#define ACCESS_WRITE_SETBIT    (1 << 1)
+#define ACCESS_WRITE_CLEARBIT  (2 << 1)
+#define ACCESS_WRITE_VALUE     (3 << 1)
+#define ACCESS_WRITE_MASK(x)   ((x) & (3 << 1))
+
+static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
+static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
+static void vgic_update_state(struct kvm *kvm);
+static void vgic_kick_vcpus(struct kvm *kvm);
+static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
+static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
+static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
+static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+
+static const struct vgic_ops *vgic_ops;
+static const struct vgic_params *vgic;
+
+/*
+ * struct vgic_bitmap contains a bitmap made of unsigned longs, but
+ * extracts u32s out of them.
+ *
+ * This does not work on 64-bit BE systems, because the bitmap access
+ * will store two consecutive 32-bit words with the higher-addressed
+ * register's bits at the lower index and the lower-addressed register's
+ * bits at the higher index.
+ *
+ * Therefore, swizzle the register index when accessing the 32-bit word
+ * registers to access the right register's value.
+ */
+#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
+#define REG_OFFSET_SWIZZLE     1
+#else
+#define REG_OFFSET_SWIZZLE     0
+#endif
+
+static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
+{
+       int nr_longs;
+
+       nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
+
+       b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
+       if (!b->private)
+               return -ENOMEM;
+
+       b->shared = b->private + nr_cpus;
+
+       return 0;
+}
+
+static void vgic_free_bitmap(struct vgic_bitmap *b)
+{
+       kfree(b->private);
+       b->private = NULL;
+       b->shared = NULL;
+}
+
+static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
+                               int cpuid, u32 offset)
+{
+       offset >>= 2;
+       if (!offset)
+               return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
+       else
+               return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
+}
+
+static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
+                                  int cpuid, int irq)
+{
+       if (irq < VGIC_NR_PRIVATE_IRQS)
+               return test_bit(irq, x->private + cpuid);
+
+       return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
+}
+
+static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
+                                   int irq, int val)
+{
+       unsigned long *reg;
+
+       if (irq < VGIC_NR_PRIVATE_IRQS) {
+               reg = x->private + cpuid;
+       } else {
+               reg = x->shared;
+               irq -= VGIC_NR_PRIVATE_IRQS;
+       }
+
+       if (val)
+               set_bit(irq, reg);
+       else
+               clear_bit(irq, reg);
+}
+
+static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
+{
+       return x->private + cpuid;
+}
+
+static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
+{
+       return x->shared;
+}
+
+static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
+{
+       int size;
+
+       size  = nr_cpus * VGIC_NR_PRIVATE_IRQS;
+       size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
+
+       x->private = kzalloc(size, GFP_KERNEL);
+       if (!x->private)
+               return -ENOMEM;
+
+       x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
+       return 0;
+}
+
+static void vgic_free_bytemap(struct vgic_bytemap *b)
+{
+       kfree(b->private);
+       b->private = NULL;
+       b->shared = NULL;
+}
+
+static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
+{
+       u32 *reg;
+
+       if (offset < VGIC_NR_PRIVATE_IRQS) {
+               reg = x->private;
+               offset += cpuid * VGIC_NR_PRIVATE_IRQS;
+       } else {
+               reg = x->shared;
+               offset -= VGIC_NR_PRIVATE_IRQS;
+       }
+
+       return reg + (offset / sizeof(u32));
+}
+
+#define VGIC_CFG_LEVEL 0
+#define VGIC_CFG_EDGE  1
+
+static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       int irq_val;
+
+       irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
+       return irq_val == VGIC_CFG_EDGE;
+}
+
+static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
+}
+
+static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
+}
+
+static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
+}
+
+static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
+}
+
+static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
+}
+
+static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
+}
+
+static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
+}
+
+static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
+}
+
+static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
+}
+
+static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
+}
+
+static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
+}
+
+static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
+}
+
+static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
+{
+       if (irq < VGIC_NR_PRIVATE_IRQS)
+               set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
+       else
+               set_bit(irq - VGIC_NR_PRIVATE_IRQS,
+                       vcpu->arch.vgic_cpu.pending_shared);
+}
+
+static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
+{
+       if (irq < VGIC_NR_PRIVATE_IRQS)
+               clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
+       else
+               clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
+                         vcpu->arch.vgic_cpu.pending_shared);
+}
+
+static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
+{
+       return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
+}
+
+static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
+{
+       return le32_to_cpu(*((u32 *)mmio->data)) & mask;
+}
+
+static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
+{
+       *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
+}
+
+/**
+ * vgic_reg_access - access vgic register
+ * @mmio:   pointer to the data describing the mmio access
+ * @reg:    pointer to the virtual backing of vgic distributor data
+ * @offset: least significant 2 bits used for word offset
+ * @mode:   ACCESS_ mode (see defines above)
+ *
+ * Helper to make vgic register access easier using one of the access
+ * modes defined for vgic register access
+ * (read,raz,write-ignored,setbit,clearbit,write)
+ */
+static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
+                           phys_addr_t offset, int mode)
+{
+       int word_offset = (offset & 3) * 8;
+       u32 mask = (1UL << (mmio->len * 8)) - 1;
+       u32 regval;
+
+       /*
+        * Any alignment fault should have been delivered to the guest
+        * directly (ARM ARM B3.12.7 "Prioritization of aborts").
+        */
+
+       if (reg) {
+               regval = *reg;
+       } else {
+               BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
+               regval = 0;
+       }
+
+       if (mmio->is_write) {
+               u32 data = mmio_data_read(mmio, mask) << word_offset;
+               switch (ACCESS_WRITE_MASK(mode)) {
+               case ACCESS_WRITE_IGNORED:
+                       return;
+
+               case ACCESS_WRITE_SETBIT:
+                       regval |= data;
+                       break;
+
+               case ACCESS_WRITE_CLEARBIT:
+                       regval &= ~data;
+                       break;
+
+               case ACCESS_WRITE_VALUE:
+                       regval = (regval & ~(mask << word_offset)) | data;
+                       break;
+               }
+               *reg = regval;
+       } else {
+               switch (ACCESS_READ_MASK(mode)) {
+               case ACCESS_READ_RAZ:
+                       regval = 0;
+                       /* fall through */
+
+               case ACCESS_READ_VALUE:
+                       mmio_data_write(mmio, mask, regval >> word_offset);
+               }
+       }
+}
+
+static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
+                            struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+       u32 reg;
+       u32 word_offset = offset & 3;
+
+       switch (offset & ~3) {
+       case 0:                 /* GICD_CTLR */
+               reg = vcpu->kvm->arch.vgic.enabled;
+               vgic_reg_access(mmio, &reg, word_offset,
+                               ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+               if (mmio->is_write) {
+                       vcpu->kvm->arch.vgic.enabled = reg & 1;
+                       vgic_update_state(vcpu->kvm);
+                       return true;
+               }
+               break;
+
+       case 4:                 /* GICD_TYPER */
+               reg  = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
+               reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
+               vgic_reg_access(mmio, &reg, word_offset,
+                               ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+               break;
+
+       case 8:                 /* GICD_IIDR */
+               reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+               vgic_reg_access(mmio, &reg, word_offset,
+                               ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+               break;
+       }
+
+       return false;
+}
+
+static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
+                              struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+       vgic_reg_access(mmio, NULL, offset,
+                       ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+       return false;
+}
+
+static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
+                                      struct kvm_exit_mmio *mmio,
+                                      phys_addr_t offset)
+{
+       u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
+                                      vcpu->vcpu_id, offset);
+       vgic_reg_access(mmio, reg, offset,
+                       ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+       if (mmio->is_write) {
+               vgic_update_state(vcpu->kvm);
+               return true;
+       }
+
+       return false;
+}
+
+static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
+                                        struct kvm_exit_mmio *mmio,
+                                        phys_addr_t offset)
+{
+       u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
+                                      vcpu->vcpu_id, offset);
+       vgic_reg_access(mmio, reg, offset,
+                       ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+       if (mmio->is_write) {
+               if (offset < 4) /* Force SGI enabled */
+                       *reg |= 0xffff;
+               vgic_retire_disabled_irqs(vcpu);
+               vgic_update_state(vcpu->kvm);
+               return true;
+       }
+
+       return false;
+}
+
+static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
+                                       struct kvm_exit_mmio *mmio,
+                                       phys_addr_t offset)
+{
+       u32 *reg, orig;
+       u32 level_mask;
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset);
+       level_mask = (~(*reg));
+
+       /* Mark both level and edge triggered irqs as pending */
+       reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
+       orig = *reg;
+       vgic_reg_access(mmio, reg, offset,
+                       ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+
+       if (mmio->is_write) {
+               /* Set the soft-pending flag only for level-triggered irqs */
+               reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
+                                         vcpu->vcpu_id, offset);
+               vgic_reg_access(mmio, reg, offset,
+                               ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+               *reg &= level_mask;
+
+               /* Ignore writes to SGIs */
+               if (offset < 2) {
+                       *reg &= ~0xffff;
+                       *reg |= orig & 0xffff;
+               }
+
+               vgic_update_state(vcpu->kvm);
+               return true;
+       }
+
+       return false;
+}
+
+static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
+                                         struct kvm_exit_mmio *mmio,
+                                         phys_addr_t offset)
+{
+       u32 *level_active;
+       u32 *reg, orig;
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
+       orig = *reg;
+       vgic_reg_access(mmio, reg, offset,
+                       ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+       if (mmio->is_write) {
+               /* Re-set level triggered level-active interrupts */
+               level_active = vgic_bitmap_get_reg(&dist->irq_level,
+                                         vcpu->vcpu_id, offset);
+               reg = vgic_bitmap_get_reg(&dist->irq_pending,
+                                         vcpu->vcpu_id, offset);
+               *reg |= *level_active;
+
+               /* Ignore writes to SGIs */
+               if (offset < 2) {
+                       *reg &= ~0xffff;
+                       *reg |= orig & 0xffff;
+               }
+
+               /* Clear soft-pending flags */
+               reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
+                                         vcpu->vcpu_id, offset);
+               vgic_reg_access(mmio, reg, offset,
+                               ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+
+               vgic_update_state(vcpu->kvm);
+               return true;
+       }
+
+       return false;
+}
+
+static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
+                                    struct kvm_exit_mmio *mmio,
+                                    phys_addr_t offset)
+{
+       u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+                                       vcpu->vcpu_id, offset);
+       vgic_reg_access(mmio, reg, offset,
+                       ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+       return false;
+}
+
+#define GICD_ITARGETSR_SIZE    32
+#define GICD_CPUTARGETS_BITS   8
+#define GICD_IRQS_PER_ITARGETSR        (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
+static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       int i;
+       u32 val = 0;
+
+       irq -= VGIC_NR_PRIVATE_IRQS;
+
+       for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
+               val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
+
+       return val;
+}
+
+static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       struct kvm_vcpu *vcpu;
+       int i, c;
+       unsigned long *bmap;
+       u32 target;
+
+       irq -= VGIC_NR_PRIVATE_IRQS;
+
+       /*
+        * Pick the LSB in each byte. This ensures we target exactly
+        * one vcpu per IRQ. If the byte is null, assume we target
+        * CPU0.
+        */
+       for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
+               int shift = i * GICD_CPUTARGETS_BITS;
+               target = ffs((val >> shift) & 0xffU);
+               target = target ? (target - 1) : 0;
+               dist->irq_spi_cpu[irq + i] = target;
+               kvm_for_each_vcpu(c, vcpu, kvm) {
+                       bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
+                       if (c == target)
+                               set_bit(irq + i, bmap);
+                       else
+                               clear_bit(irq + i, bmap);
+               }
+       }
+}
+
+static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
+                                  struct kvm_exit_mmio *mmio,
+                                  phys_addr_t offset)
+{
+       u32 reg;
+
+       /* We treat the banked interrupts targets as read-only */
+       if (offset < 32) {
+               u32 roreg = 1 << vcpu->vcpu_id;
+               roreg |= roreg << 8;
+               roreg |= roreg << 16;
+
+               vgic_reg_access(mmio, &roreg, offset,
+                               ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+               return false;
+       }
+
+       reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
+       vgic_reg_access(mmio, &reg, offset,
+                       ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+       if (mmio->is_write) {
+               vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
+               vgic_update_state(vcpu->kvm);
+               return true;
+       }
+
+       return false;
+}
+
+static u32 vgic_cfg_expand(u16 val)
+{
+       u32 res = 0;
+       int i;
+
+       /*
+        * Turn a 16bit value like abcd...mnop into a 32bit word
+        * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
+        */
+       for (i = 0; i < 16; i++)
+               res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
+
+       return res;
+}
+
+static u16 vgic_cfg_compress(u32 val)
+{
+       u16 res = 0;
+       int i;
+
+       /*
+        * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
+        * abcd...mnop which is what we really care about.
+        */
+       for (i = 0; i < 16; i++)
+               res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
+
+       return res;
+}
+
+/*
+ * The distributor uses 2 bits per IRQ for the CFG register, but the
+ * LSB is always 0. As such, we only keep the upper bit, and use the
+ * two above functions to compress/expand the bits
+ */
+static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+                               struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+       u32 val;
+       u32 *reg;
+
+       reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+                                 vcpu->vcpu_id, offset >> 1);
+
+       if (offset & 4)
+               val = *reg >> 16;
+       else
+               val = *reg & 0xffff;
+
+       val = vgic_cfg_expand(val);
+       vgic_reg_access(mmio, &val, offset,
+                       ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+       if (mmio->is_write) {
+               if (offset < 8) {
+                       *reg = ~0U; /* Force PPIs/SGIs to 1 */
+                       return false;
+               }
+
+               val = vgic_cfg_compress(val);
+               if (offset & 4) {
+                       *reg &= 0xffff;
+                       *reg |= val << 16;
+               } else {
+                       *reg &= 0xffff << 16;
+                       *reg |= val;
+               }
+       }
+
+       return false;
+}
+
+static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
+                               struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+       u32 reg;
+       vgic_reg_access(mmio, &reg, offset,
+                       ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
+       if (mmio->is_write) {
+               vgic_dispatch_sgi(vcpu, reg);
+               vgic_update_state(vcpu->kvm);
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
+ * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
+ *
+ * Move any pending IRQs that have already been assigned to LRs back to the
+ * emulated distributor state so that the complete emulated state can be read
+ * from the main emulation structures without investigating the LRs.
+ *
+ * Note that IRQs in the active state in the LRs get their pending state moved
+ * to the distributor but the active state stays in the LRs, because we don't
+ * track the active state on the distributor side.
+ */
+static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       int vcpu_id = vcpu->vcpu_id;
+       int i;
+
+       for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
+               struct vgic_lr lr = vgic_get_lr(vcpu, i);
+
+               /*
+                * There are three options for the state bits:
+                *
+                * 01: pending
+                * 10: active
+                * 11: pending and active
+                *
+                * If the LR holds only an active interrupt (not pending) then
+                * just leave it alone.
+                */
+               if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
+                       continue;
+
+               /*
+                * Reestablish the pending state on the distributor and the
+                * CPU interface.  It may have already been pending, but that
+                * is fine, then we are only setting a few bits that were
+                * already set.
+                */
+               vgic_dist_irq_set_pending(vcpu, lr.irq);
+               if (lr.irq < VGIC_NR_SGIS)
+                       *vgic_get_sgi_sources(dist, vcpu_id, lr.irq) |= 1 << lr.source;
+               lr.state &= ~LR_STATE_PENDING;
+               vgic_set_lr(vcpu, i, lr);
+
+               /*
+                * If there's no state left on the LR (it could still be
+                * active), then the LR does not hold any useful info and can
+                * be marked as free for other use.
+                */
+               if (!(lr.state & LR_STATE_MASK)) {
+                       vgic_retire_lr(i, lr.irq, vcpu);
+                       vgic_irq_clear_queued(vcpu, lr.irq);
+               }
+
+               /* Finally update the VGIC state. */
+               vgic_update_state(vcpu->kvm);
+       }
+}
+
+/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
+static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
+                                       struct kvm_exit_mmio *mmio,
+                                       phys_addr_t offset)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       int sgi;
+       int min_sgi = (offset & ~0x3);
+       int max_sgi = min_sgi + 3;
+       int vcpu_id = vcpu->vcpu_id;
+       u32 reg = 0;
+
+       /* Copy source SGIs from distributor side */
+       for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
+               int shift = 8 * (sgi - min_sgi);
+               reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
+       }
+
+       mmio_data_write(mmio, ~0, reg);
+       return false;
+}
+
+static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
+                                        struct kvm_exit_mmio *mmio,
+                                        phys_addr_t offset, bool set)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       int sgi;
+       int min_sgi = (offset & ~0x3);
+       int max_sgi = min_sgi + 3;
+       int vcpu_id = vcpu->vcpu_id;
+       u32 reg;
+       bool updated = false;
+
+       reg = mmio_data_read(mmio, ~0);
+
+       /* Clear pending SGIs on the distributor */
+       for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
+               u8 mask = reg >> (8 * (sgi - min_sgi));
+               u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
+               if (set) {
+                       if ((*src & mask) != mask)
+                               updated = true;
+                       *src |= mask;
+               } else {
+                       if (*src & mask)
+                               updated = true;
+                       *src &= ~mask;
+               }
+       }
+
+       if (updated)
+               vgic_update_state(vcpu->kvm);
+
+       return updated;
+}
+
+static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
+                               struct kvm_exit_mmio *mmio,
+                               phys_addr_t offset)
+{
+       if (!mmio->is_write)
+               return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
+       else
+               return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
+}
+
+static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
+                                 struct kvm_exit_mmio *mmio,
+                                 phys_addr_t offset)
+{
+       if (!mmio->is_write)
+               return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
+       else
+               return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
+}
+
+/*
+ * I would have liked to use the kvm_bus_io_*() API instead, but it
+ * cannot cope with banked registers (only the VM pointer is passed
+ * around, and we need the vcpu). One of these days, someone please
+ * fix it!
+ */
+struct mmio_range {
+       phys_addr_t base;
+       unsigned long len;
+       int bits_per_irq;
+       bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+                           phys_addr_t offset);
+};
+
+static const struct mmio_range vgic_dist_ranges[] = {
+       {
+               .base           = GIC_DIST_CTRL,
+               .len            = 12,
+               .bits_per_irq   = 0,
+               .handle_mmio    = handle_mmio_misc,
+       },
+       {
+               .base           = GIC_DIST_IGROUP,
+               .len            = VGIC_MAX_IRQS / 8,
+               .bits_per_irq   = 1,
+               .handle_mmio    = handle_mmio_raz_wi,
+       },
+       {
+               .base           = GIC_DIST_ENABLE_SET,
+               .len            = VGIC_MAX_IRQS / 8,
+               .bits_per_irq   = 1,
+               .handle_mmio    = handle_mmio_set_enable_reg,
+       },
+       {
+               .base           = GIC_DIST_ENABLE_CLEAR,
+               .len            = VGIC_MAX_IRQS / 8,
+               .bits_per_irq   = 1,
+               .handle_mmio    = handle_mmio_clear_enable_reg,
+       },
+       {
+               .base           = GIC_DIST_PENDING_SET,
+               .len            = VGIC_MAX_IRQS / 8,
+               .bits_per_irq   = 1,
+               .handle_mmio    = handle_mmio_set_pending_reg,
+       },
+       {
+               .base           = GIC_DIST_PENDING_CLEAR,
+               .len            = VGIC_MAX_IRQS / 8,
+               .bits_per_irq   = 1,
+               .handle_mmio    = handle_mmio_clear_pending_reg,
+       },
+       {
+               .base           = GIC_DIST_ACTIVE_SET,
+               .len            = VGIC_MAX_IRQS / 8,
+               .bits_per_irq   = 1,
+               .handle_mmio    = handle_mmio_raz_wi,
+       },
+       {
+               .base           = GIC_DIST_ACTIVE_CLEAR,
+               .len            = VGIC_MAX_IRQS / 8,
+               .bits_per_irq   = 1,
+               .handle_mmio    = handle_mmio_raz_wi,
+       },
+       {
+               .base           = GIC_DIST_PRI,
+               .len            = VGIC_MAX_IRQS,
+               .bits_per_irq   = 8,
+               .handle_mmio    = handle_mmio_priority_reg,
+       },
+       {
+               .base           = GIC_DIST_TARGET,
+               .len            = VGIC_MAX_IRQS,
+               .bits_per_irq   = 8,
+               .handle_mmio    = handle_mmio_target_reg,
+       },
+       {
+               .base           = GIC_DIST_CONFIG,
+               .len            = VGIC_MAX_IRQS / 4,
+               .bits_per_irq   = 2,
+               .handle_mmio    = handle_mmio_cfg_reg,
+       },
+       {
+               .base           = GIC_DIST_SOFTINT,
+               .len            = 4,
+               .handle_mmio    = handle_mmio_sgi_reg,
+       },
+       {
+               .base           = GIC_DIST_SGI_PENDING_CLEAR,
+               .len            = VGIC_NR_SGIS,
+               .handle_mmio    = handle_mmio_sgi_clear,
+       },
+       {
+               .base           = GIC_DIST_SGI_PENDING_SET,
+               .len            = VGIC_NR_SGIS,
+               .handle_mmio    = handle_mmio_sgi_set,
+       },
+       {}
+};
+
+static const
+struct mmio_range *find_matching_range(const struct mmio_range *ranges,
+                                      struct kvm_exit_mmio *mmio,
+                                      phys_addr_t offset)
+{
+       const struct mmio_range *r = ranges;
+
+       while (r->len) {
+               if (offset >= r->base &&
+                   (offset + mmio->len) <= (r->base + r->len))
+                       return r;
+               r++;
+       }
+
+       return NULL;
+}
+
+static bool vgic_validate_access(const struct vgic_dist *dist,
+                                const struct mmio_range *range,
+                                unsigned long offset)
+{
+       int irq;
+
+       if (!range->bits_per_irq)
+               return true;    /* Not an irq-based access */
+
+       irq = offset * 8 / range->bits_per_irq;
+       if (irq >= dist->nr_irqs)
+               return false;
+
+       return true;
+}
+
+/**
+ * vgic_handle_mmio - handle an in-kernel MMIO access
+ * @vcpu:      pointer to the vcpu performing the access
+ * @run:       pointer to the kvm_run structure
+ * @mmio:      pointer to the data describing the access
+ *
+ * returns true if the MMIO access has been performed in kernel space,
+ * and false if it needs to be emulated in user space.
+ */
+bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                     struct kvm_exit_mmio *mmio)
+{
+       const struct mmio_range *range;
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       unsigned long base = dist->vgic_dist_base;
+       bool updated_state;
+       unsigned long offset;
+
+       if (!irqchip_in_kernel(vcpu->kvm) ||
+           mmio->phys_addr < base ||
+           (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
+               return false;
+
+       /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
+       if (mmio->len > 4) {
+               kvm_inject_dabt(vcpu, mmio->phys_addr);
+               return true;
+       }
+
+       offset = mmio->phys_addr - base;
+       range = find_matching_range(vgic_dist_ranges, mmio, offset);
+       if (unlikely(!range || !range->handle_mmio)) {
+               pr_warn("Unhandled access %d %08llx %d\n",
+                       mmio->is_write, mmio->phys_addr, mmio->len);
+               return false;
+       }
+
+       spin_lock(&vcpu->kvm->arch.vgic.lock);
+       offset = mmio->phys_addr - range->base - base;
+       if (vgic_validate_access(dist, range, offset)) {
+               updated_state = range->handle_mmio(vcpu, mmio, offset);
+       } else {
+               vgic_reg_access(mmio, NULL, offset,
+                               ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+               updated_state = false;
+       }
+       spin_unlock(&vcpu->kvm->arch.vgic.lock);
+       kvm_prepare_mmio(run, mmio);
+       kvm_handle_mmio_return(vcpu, run);
+
+       if (updated_state)
+               vgic_kick_vcpus(vcpu->kvm);
+
+       return true;
+}
+
+static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
+{
+       return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
+}
+
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       int nrcpus = atomic_read(&kvm->online_vcpus);
+       u8 target_cpus;
+       int sgi, mode, c, vcpu_id;
+
+       vcpu_id = vcpu->vcpu_id;
+
+       sgi = reg & 0xf;
+       target_cpus = (reg >> 16) & 0xff;
+       mode = (reg >> 24) & 3;
+
+       switch (mode) {
+       case 0:
+               if (!target_cpus)
+                       return;
+               break;
+
+       case 1:
+               target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
+               break;
+
+       case 2:
+               target_cpus = 1 << vcpu_id;
+               break;
+       }
+
+       kvm_for_each_vcpu(c, vcpu, kvm) {
+               if (target_cpus & 1) {
+                       /* Flag the SGI as pending */
+                       vgic_dist_irq_set_pending(vcpu, sgi);
+                       *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
+                       kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
+               }
+
+               target_cpus >>= 1;
+       }
+}
+
+static int vgic_nr_shared_irqs(struct vgic_dist *dist)
+{
+       return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
+}
+
+static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
+       unsigned long pending_private, pending_shared;
+       int nr_shared = vgic_nr_shared_irqs(dist);
+       int vcpu_id;
+
+       vcpu_id = vcpu->vcpu_id;
+       pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
+       pend_shared = vcpu->arch.vgic_cpu.pending_shared;
+
+       pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
+       enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
+       bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
+
+       pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
+       enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
+       bitmap_and(pend_shared, pending, enabled, nr_shared);
+       bitmap_and(pend_shared, pend_shared,
+                  vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
+                  nr_shared);
+
+       pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
+       pending_shared = find_first_bit(pend_shared, nr_shared);
+       return (pending_private < VGIC_NR_PRIVATE_IRQS ||
+               pending_shared < vgic_nr_shared_irqs(dist));
+}
+
+/*
+ * Update the interrupt state and determine which CPUs have pending
+ * interrupts. Must be called with distributor lock held.
+ */
+static void vgic_update_state(struct kvm *kvm)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       struct kvm_vcpu *vcpu;
+       int c;
+
+       if (!dist->enabled) {
+               set_bit(0, dist->irq_pending_on_cpu);
+               return;
+       }
+
+       kvm_for_each_vcpu(c, vcpu, kvm) {
+               if (compute_pending_for_cpu(vcpu)) {
+                       pr_debug("CPU%d has pending interrupts\n", c);
+                       set_bit(c, dist->irq_pending_on_cpu);
+               }
+       }
+}
+
+static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
+{
+       return vgic_ops->get_lr(vcpu, lr);
+}
+
+static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
+                              struct vgic_lr vlr)
+{
+       vgic_ops->set_lr(vcpu, lr, vlr);
+}
+
+static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
+                              struct vgic_lr vlr)
+{
+       vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
+}
+
+static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
+{
+       return vgic_ops->get_elrsr(vcpu);
+}
+
+static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
+{
+       return vgic_ops->get_eisr(vcpu);
+}
+
+static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
+{
+       return vgic_ops->get_interrupt_status(vcpu);
+}
+
+static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
+{
+       vgic_ops->enable_underflow(vcpu);
+}
+
+static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
+{
+       vgic_ops->disable_underflow(vcpu);
+}
+
+static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+       vgic_ops->get_vmcr(vcpu, vmcr);
+}
+
+static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+       vgic_ops->set_vmcr(vcpu, vmcr);
+}
+
+static inline void vgic_enable(struct kvm_vcpu *vcpu)
+{
+       vgic_ops->enable(vcpu);
+}
+
+static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
+
+       vlr.state = 0;
+       vgic_set_lr(vcpu, lr_nr, vlr);
+       clear_bit(lr_nr, vgic_cpu->lr_used);
+       vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
+}
+
+/*
+ * An interrupt may have been disabled after being made pending on the
+ * CPU interface (the classic case is a timer running while we're
+ * rebooting the guest - the interrupt would kick as soon as the CPU
+ * interface gets enabled, with deadly consequences).
+ *
+ * The solution is to examine already active LRs, and check the
+ * interrupt is still enabled. If not, just retire it.
+ */
+static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       int lr;
+
+       for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
+               struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
+
+               if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
+                       vgic_retire_lr(lr, vlr.irq, vcpu);
+                       if (vgic_irq_is_queued(vcpu, vlr.irq))
+                               vgic_irq_clear_queued(vcpu, vlr.irq);
+               }
+       }
+}
+
+/*
+ * Queue an interrupt to a CPU virtual interface. Return true on success,
+ * or false if it wasn't possible to queue it.
+ */
+static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       struct vgic_lr vlr;
+       int lr;
+
+       /* Sanitize the input... */
+       BUG_ON(sgi_source_id & ~7);
+       BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
+       BUG_ON(irq >= dist->nr_irqs);
+
+       kvm_debug("Queue IRQ%d\n", irq);
+
+       lr = vgic_cpu->vgic_irq_lr_map[irq];
+
+       /* Do we have an active interrupt for the same CPUID? */
+       if (lr != LR_EMPTY) {
+               vlr = vgic_get_lr(vcpu, lr);
+               if (vlr.source == sgi_source_id) {
+                       kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
+                       BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
+                       vlr.state |= LR_STATE_PENDING;
+                       vgic_set_lr(vcpu, lr, vlr);
+                       return true;
+               }
+       }
+
+       /* Try to use another LR for this interrupt */
+       lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
+                              vgic->nr_lr);
+       if (lr >= vgic->nr_lr)
+               return false;
+
+       kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
+       vgic_cpu->vgic_irq_lr_map[irq] = lr;
+       set_bit(lr, vgic_cpu->lr_used);
+
+       vlr.irq = irq;
+       vlr.source = sgi_source_id;
+       vlr.state = LR_STATE_PENDING;
+       if (!vgic_irq_is_edge(vcpu, irq))
+               vlr.state |= LR_EOI_INT;
+
+       vgic_set_lr(vcpu, lr, vlr);
+
+       return true;
+}
+
+static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       unsigned long sources;
+       int vcpu_id = vcpu->vcpu_id;
+       int c;
+
+       sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
+
+       for_each_set_bit(c, &sources, dist->nr_cpus) {
+               if (vgic_queue_irq(vcpu, c, irq))
+                       clear_bit(c, &sources);
+       }
+
+       *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
+
+       /*
+        * If the sources bitmap has been cleared it means that we
+        * could queue all the SGIs onto link registers (see the
+        * clear_bit above), and therefore we are done with them in
+        * our emulated gic and can get rid of them.
+        */
+       if (!sources) {
+               vgic_dist_irq_clear_pending(vcpu, irq);
+               vgic_cpu_irq_clear(vcpu, irq);
+               return true;
+       }
+
+       return false;
+}
+
+static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
+{
+       if (!vgic_can_sample_irq(vcpu, irq))
+               return true; /* level interrupt, already queued */
+
+       if (vgic_queue_irq(vcpu, 0, irq)) {
+               if (vgic_irq_is_edge(vcpu, irq)) {
+                       vgic_dist_irq_clear_pending(vcpu, irq);
+                       vgic_cpu_irq_clear(vcpu, irq);
+               } else {
+                       vgic_irq_set_queued(vcpu, irq);
+               }
+
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * Fill the list registers with pending interrupts before running the
+ * guest.
+ */
+static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       int i, vcpu_id;
+       int overflow = 0;
+
+       vcpu_id = vcpu->vcpu_id;
+
+       /*
+        * We may not have any pending interrupt, or the interrupts
+        * may have been serviced from another vcpu. In all cases,
+        * move along.
+        */
+       if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
+               pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
+               goto epilog;
+       }
+
+       /* SGIs */
+       for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
+               if (!vgic_queue_sgi(vcpu, i))
+                       overflow = 1;
+       }
+
+       /* PPIs */
+       for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
+               if (!vgic_queue_hwirq(vcpu, i))
+                       overflow = 1;
+       }
+
+       /* SPIs */
+       for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) {
+               if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
+                       overflow = 1;
+       }
+
+epilog:
+       if (overflow) {
+               vgic_enable_underflow(vcpu);
+       } else {
+               vgic_disable_underflow(vcpu);
+               /*
+                * We're about to run this VCPU, and we've consumed
+                * everything the distributor had in store for
+                * us. Claim we don't have anything pending. We'll
+                * adjust that if needed while exiting.
+                */
+               clear_bit(vcpu_id, dist->irq_pending_on_cpu);
+       }
+}
+
+static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
+{
+       u32 status = vgic_get_interrupt_status(vcpu);
+       bool level_pending = false;
+
+       kvm_debug("STATUS = %08x\n", status);
+
+       if (status & INT_STATUS_EOI) {
+               /*
+                * Some level interrupts have been EOIed. Clear their
+                * active bit.
+                */
+               u64 eisr = vgic_get_eisr(vcpu);
+               unsigned long *eisr_ptr = (unsigned long *)&eisr;
+               int lr;
+
+               for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
+                       struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
+                       WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
+
+                       vgic_irq_clear_queued(vcpu, vlr.irq);
+                       WARN_ON(vlr.state & LR_STATE_MASK);
+                       vlr.state = 0;
+                       vgic_set_lr(vcpu, lr, vlr);
+
+                       /*
+                        * If the IRQ was EOIed it was also ACKed and we we
+                        * therefore assume we can clear the soft pending
+                        * state (should it had been set) for this interrupt.
+                        *
+                        * Note: if the IRQ soft pending state was set after
+                        * the IRQ was acked, it actually shouldn't be
+                        * cleared, but we have no way of knowing that unless
+                        * we start trapping ACKs when the soft-pending state
+                        * is set.
+                        */
+                       vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
+
+                       /* Any additional pending interrupt? */
+                       if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
+                               vgic_cpu_irq_set(vcpu, vlr.irq);
+                               level_pending = true;
+                       } else {
+                               vgic_dist_irq_clear_pending(vcpu, vlr.irq);
+                               vgic_cpu_irq_clear(vcpu, vlr.irq);
+                       }
+
+                       /*
+                        * Despite being EOIed, the LR may not have
+                        * been marked as empty.
+                        */
+                       vgic_sync_lr_elrsr(vcpu, lr, vlr);
+               }
+       }
+
+       if (status & INT_STATUS_UNDERFLOW)
+               vgic_disable_underflow(vcpu);
+
+       return level_pending;
+}
+
+/*
+ * Sync back the VGIC state after a guest run. The distributor lock is
+ * needed so we don't get preempted in the middle of the state processing.
+ */
+static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       u64 elrsr;
+       unsigned long *elrsr_ptr;
+       int lr, pending;
+       bool level_pending;
+
+       level_pending = vgic_process_maintenance(vcpu);
+       elrsr = vgic_get_elrsr(vcpu);
+       elrsr_ptr = (unsigned long *)&elrsr;
+
+       /* Clear mappings for empty LRs */
+       for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
+               struct vgic_lr vlr;
+
+               if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
+                       continue;
+
+               vlr = vgic_get_lr(vcpu, lr);
+
+               BUG_ON(vlr.irq >= dist->nr_irqs);
+               vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
+       }
+
+       /* Check if we still have something up our sleeve... */
+       pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
+       if (level_pending || pending < vgic->nr_lr)
+               set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
+}
+
+void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       if (!irqchip_in_kernel(vcpu->kvm))
+               return;
+
+       spin_lock(&dist->lock);
+       __kvm_vgic_flush_hwstate(vcpu);
+       spin_unlock(&dist->lock);
+}
+
+void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       if (!irqchip_in_kernel(vcpu->kvm))
+               return;
+
+       spin_lock(&dist->lock);
+       __kvm_vgic_sync_hwstate(vcpu);
+       spin_unlock(&dist->lock);
+}
+
+int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       if (!irqchip_in_kernel(vcpu->kvm))
+               return 0;
+
+       return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
+}
+
+static void vgic_kick_vcpus(struct kvm *kvm)
+{
+       struct kvm_vcpu *vcpu;
+       int c;
+
+       /*
+        * We've injected an interrupt, time to find out who deserves
+        * a good kick...
+        */
+       kvm_for_each_vcpu(c, vcpu, kvm) {
+               if (kvm_vgic_vcpu_pending_irq(vcpu))
+                       kvm_vcpu_kick(vcpu);
+       }
+}
+
+static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
+{
+       int edge_triggered = vgic_irq_is_edge(vcpu, irq);
+
+       /*
+        * Only inject an interrupt if:
+        * - edge triggered and we have a rising edge
+        * - level triggered and we change level
+        */
+       if (edge_triggered) {
+               int state = vgic_dist_irq_is_pending(vcpu, irq);
+               return level > state;
+       } else {
+               int state = vgic_dist_irq_get_level(vcpu, irq);
+               return level != state;
+       }
+}
+
+static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
+                                 unsigned int irq_num, bool level)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       struct kvm_vcpu *vcpu;
+       int edge_triggered, level_triggered;
+       int enabled;
+       bool ret = true;
+
+       spin_lock(&dist->lock);
+
+       vcpu = kvm_get_vcpu(kvm, cpuid);
+       edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
+       level_triggered = !edge_triggered;
+
+       if (!vgic_validate_injection(vcpu, irq_num, level)) {
+               ret = false;
+               goto out;
+       }
+
+       if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
+               cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
+               vcpu = kvm_get_vcpu(kvm, cpuid);
+       }
+
+       kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
+
+       if (level) {
+               if (level_triggered)
+                       vgic_dist_irq_set_level(vcpu, irq_num);
+               vgic_dist_irq_set_pending(vcpu, irq_num);
+       } else {
+               if (level_triggered) {
+                       vgic_dist_irq_clear_level(vcpu, irq_num);
+                       if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
+                               vgic_dist_irq_clear_pending(vcpu, irq_num);
+               } else {
+                       vgic_dist_irq_clear_pending(vcpu, irq_num);
+               }
+       }
+
+       enabled = vgic_irq_is_enabled(vcpu, irq_num);
+
+       if (!enabled) {
+               ret = false;
+               goto out;
+       }
+
+       if (!vgic_can_sample_irq(vcpu, irq_num)) {
+               /*
+                * Level interrupt in progress, will be picked up
+                * when EOId.
+                */
+               ret = false;
+               goto out;
+       }
+
+       if (level) {
+               vgic_cpu_irq_set(vcpu, irq_num);
+               set_bit(cpuid, dist->irq_pending_on_cpu);
+       }
+
+out:
+       spin_unlock(&dist->lock);
+
+       return ret;
+}
+
+/**
+ * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
+ * @kvm:     The VM structure pointer
+ * @cpuid:   The CPU for PPIs
+ * @irq_num: The IRQ number that is assigned to the device
+ * @level:   Edge-triggered:  true:  to trigger the interrupt
+ *                           false: to ignore the call
+ *          Level-sensitive  true:  activates an interrupt
+ *                           false: deactivates an interrupt
+ *
+ * The GIC is not concerned with devices being active-LOW or active-HIGH for
+ * level-sensitive interrupts.  You can think of the level parameter as 1
+ * being HIGH and 0 being LOW and all devices being active-HIGH.
+ */
+int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+                       bool level)
+{
+       if (likely(vgic_initialized(kvm)) &&
+           vgic_update_irq_pending(kvm, cpuid, irq_num, level))
+               vgic_kick_vcpus(kvm);
+
+       return 0;
+}
+
+static irqreturn_t vgic_maintenance_handler(int irq, void *data)
+{
+       /*
+        * We cannot rely on the vgic maintenance interrupt to be
+        * delivered synchronously. This means we can only use it to
+        * exit the VM, and we perform the handling of EOIed
+        * interrupts on the exit path (see vgic_process_maintenance).
+        */
+       return IRQ_HANDLED;
+}
+
+void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+       kfree(vgic_cpu->pending_shared);
+       kfree(vgic_cpu->vgic_irq_lr_map);
+       vgic_cpu->pending_shared = NULL;
+       vgic_cpu->vgic_irq_lr_map = NULL;
+}
+
+static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+       int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
+       vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
+       vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL);
+
+       if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
+               kvm_vgic_vcpu_destroy(vcpu);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/**
+ * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
+ * @vcpu: pointer to the vcpu struct
+ *
+ * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
+ * this vcpu and enable the VGIC for this VCPU
+ */
+static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       int i;
+
+       for (i = 0; i < dist->nr_irqs; i++) {
+               if (i < VGIC_NR_PPIS)
+                       vgic_bitmap_set_irq_val(&dist->irq_enabled,
+                                               vcpu->vcpu_id, i, 1);
+               if (i < VGIC_NR_PRIVATE_IRQS)
+                       vgic_bitmap_set_irq_val(&dist->irq_cfg,
+                                               vcpu->vcpu_id, i, VGIC_CFG_EDGE);
+
+               vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
+       }
+
+       /*
+        * Store the number of LRs per vcpu, so we don't have to go
+        * all the way to the distributor structure to find out. Only
+        * assembly code should use this one.
+        */
+       vgic_cpu->nr_lr = vgic->nr_lr;
+
+       vgic_enable(vcpu);
+}
+
+void kvm_vgic_destroy(struct kvm *kvm)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               kvm_vgic_vcpu_destroy(vcpu);
+
+       vgic_free_bitmap(&dist->irq_enabled);
+       vgic_free_bitmap(&dist->irq_level);
+       vgic_free_bitmap(&dist->irq_pending);
+       vgic_free_bitmap(&dist->irq_soft_pend);
+       vgic_free_bitmap(&dist->irq_queued);
+       vgic_free_bitmap(&dist->irq_cfg);
+       vgic_free_bytemap(&dist->irq_priority);
+       if (dist->irq_spi_target) {
+               for (i = 0; i < dist->nr_cpus; i++)
+                       vgic_free_bitmap(&dist->irq_spi_target[i]);
+       }
+       kfree(dist->irq_sgi_sources);
+       kfree(dist->irq_spi_cpu);
+       kfree(dist->irq_spi_target);
+       kfree(dist->irq_pending_on_cpu);
+       dist->irq_sgi_sources = NULL;
+       dist->irq_spi_cpu = NULL;
+       dist->irq_spi_target = NULL;
+       dist->irq_pending_on_cpu = NULL;
+}
+
+/*
+ * Allocate and initialize the various data structures. Must be called
+ * with kvm->lock held!
+ */
+static int vgic_init_maps(struct kvm *kvm)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       struct kvm_vcpu *vcpu;
+       int nr_cpus, nr_irqs;
+       int ret, i;
+
+       if (dist->nr_cpus)      /* Already allocated */
+               return 0;
+
+       nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
+       if (!nr_cpus)           /* No vcpus? Can't be good... */
+               return -EINVAL;
+
+       /*
+        * If nobody configured the number of interrupts, use the
+        * legacy one.
+        */
+       if (!dist->nr_irqs)
+               dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
+
+       nr_irqs = dist->nr_irqs;
+
+       ret  = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
+       ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
+       ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
+       ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
+       ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
+       ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
+       ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
+
+       if (ret)
+               goto out;
+
+       dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
+       dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
+       dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
+                                      GFP_KERNEL);
+       dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
+                                          GFP_KERNEL);
+       if (!dist->irq_sgi_sources ||
+           !dist->irq_spi_cpu ||
+           !dist->irq_spi_target ||
+           !dist->irq_pending_on_cpu) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       for (i = 0; i < nr_cpus; i++)
+               ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
+                                       nr_cpus, nr_irqs);
+
+       if (ret)
+               goto out;
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
+               if (ret) {
+                       kvm_err("VGIC: Failed to allocate vcpu memory\n");
+                       break;
+               }
+       }
+
+       for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
+               vgic_set_target_reg(kvm, 0, i);
+
+out:
+       if (ret)
+               kvm_vgic_destroy(kvm);
+
+       return ret;
+}
+
+/**
+ * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
+ * @kvm: pointer to the kvm struct
+ *
+ * Map the virtual CPU interface into the VM before running any VCPUs.  We
+ * can't do this at creation time, because user space must first set the
+ * virtual CPU interface address in the guest physical address space.  Also
+ * initialize the ITARGETSRn regs to 0 on the emulated distributor.
+ */
+int kvm_vgic_init(struct kvm *kvm)
+{
+       struct kvm_vcpu *vcpu;
+       int ret = 0, i;
+
+       if (!irqchip_in_kernel(kvm))
+               return 0;
+
+       mutex_lock(&kvm->lock);
+
+       if (vgic_initialized(kvm))
+               goto out;
+
+       if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
+           IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
+               kvm_err("Need to set vgic cpu and dist addresses first\n");
+               ret = -ENXIO;
+               goto out;
+       }
+
+       ret = vgic_init_maps(kvm);
+       if (ret) {
+               kvm_err("Unable to allocate maps\n");
+               goto out;
+       }
+
+       ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
+                                   vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
+       if (ret) {
+               kvm_err("Unable to remap VGIC CPU to VCPU\n");
+               goto out;
+       }
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               kvm_vgic_vcpu_init(vcpu);
+
+       kvm->arch.vgic.ready = true;
+out:
+       if (ret)
+               kvm_vgic_destroy(kvm);
+       mutex_unlock(&kvm->lock);
+       return ret;
+}
+
+int kvm_vgic_create(struct kvm *kvm)
+{
+       int i, vcpu_lock_idx = -1, ret = 0;
+       struct kvm_vcpu *vcpu;
+
+       mutex_lock(&kvm->lock);
+
+       if (kvm->arch.vgic.vctrl_base) {
+               ret = -EEXIST;
+               goto out;
+       }
+
+       /*
+        * Any time a vcpu is run, vcpu_load is called which tries to grab the
+        * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
+        * that no other VCPUs are run while we create the vgic.
+        */
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               if (!mutex_trylock(&vcpu->mutex))
+                       goto out_unlock;
+               vcpu_lock_idx = i;
+       }
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               if (vcpu->arch.has_run_once) {
+                       ret = -EBUSY;
+                       goto out_unlock;
+               }
+       }
+
+       spin_lock_init(&kvm->arch.vgic.lock);
+       kvm->arch.vgic.in_kernel = true;
+       kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
+       kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
+       kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+
+out_unlock:
+       for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+               vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+               mutex_unlock(&vcpu->mutex);
+       }
+
+out:
+       mutex_unlock(&kvm->lock);
+       return ret;
+}
+
+static int vgic_ioaddr_overlap(struct kvm *kvm)
+{
+       phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
+       phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
+
+       if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
+               return 0;
+       if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
+           (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
+               return -EBUSY;
+       return 0;
+}
+
+static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
+                             phys_addr_t addr, phys_addr_t size)
+{
+       int ret;
+
+       if (addr & ~KVM_PHYS_MASK)
+               return -E2BIG;
+
+       if (addr & (SZ_4K - 1))
+               return -EINVAL;
+
+       if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
+               return -EEXIST;
+       if (addr + size < addr)
+               return -EINVAL;
+
+       *ioaddr = addr;
+       ret = vgic_ioaddr_overlap(kvm);
+       if (ret)
+               *ioaddr = VGIC_ADDR_UNDEF;
+
+       return ret;
+}
+
+/**
+ * kvm_vgic_addr - set or get vgic VM base addresses
+ * @kvm:   pointer to the vm struct
+ * @type:  the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
+ * @addr:  pointer to address value
+ * @write: if true set the address in the VM address space, if false read the
+ *          address
+ *
+ * Set or get the vgic base addresses for the distributor and the virtual CPU
+ * interface in the VM physical address space.  These addresses are properties
+ * of the emulated core/SoC and therefore user space initially knows this
+ * information.
+ */
+int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
+{
+       int r = 0;
+       struct vgic_dist *vgic = &kvm->arch.vgic;
+
+       mutex_lock(&kvm->lock);
+       switch (type) {
+       case KVM_VGIC_V2_ADDR_TYPE_DIST:
+               if (write) {
+                       r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
+                                              *addr, KVM_VGIC_V2_DIST_SIZE);
+               } else {
+                       *addr = vgic->vgic_dist_base;
+               }
+               break;
+       case KVM_VGIC_V2_ADDR_TYPE_CPU:
+               if (write) {
+                       r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
+                                              *addr, KVM_VGIC_V2_CPU_SIZE);
+               } else {
+                       *addr = vgic->vgic_cpu_base;
+               }
+               break;
+       default:
+               r = -ENODEV;
+       }
+
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
+static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
+                                struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+       bool updated = false;
+       struct vgic_vmcr vmcr;
+       u32 *vmcr_field;
+       u32 reg;
+
+       vgic_get_vmcr(vcpu, &vmcr);
+
+       switch (offset & ~0x3) {
+       case GIC_CPU_CTRL:
+               vmcr_field = &vmcr.ctlr;
+               break;
+       case GIC_CPU_PRIMASK:
+               vmcr_field = &vmcr.pmr;
+               break;
+       case GIC_CPU_BINPOINT:
+               vmcr_field = &vmcr.bpr;
+               break;
+       case GIC_CPU_ALIAS_BINPOINT:
+               vmcr_field = &vmcr.abpr;
+               break;
+       default:
+               BUG();
+       }
+
+       if (!mmio->is_write) {
+               reg = *vmcr_field;
+               mmio_data_write(mmio, ~0, reg);
+       } else {
+               reg = mmio_data_read(mmio, ~0);
+               if (reg != *vmcr_field) {
+                       *vmcr_field = reg;
+                       vgic_set_vmcr(vcpu, &vmcr);
+                       updated = true;
+               }
+       }
+       return updated;
+}
+
+static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
+                            struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+       return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
+}
+
+static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
+                                 struct kvm_exit_mmio *mmio,
+                                 phys_addr_t offset)
+{
+       u32 reg;
+
+       if (mmio->is_write)
+               return false;
+
+       /* GICC_IIDR */
+       reg = (PRODUCT_ID_KVM << 20) |
+             (GICC_ARCH_VERSION_V2 << 16) |
+             (IMPLEMENTER_ARM << 0);
+       mmio_data_write(mmio, ~0, reg);
+       return false;
+}
+
+/*
+ * CPU Interface Register accesses - these are not accessed by the VM, but by
+ * user space for saving and restoring VGIC state.
+ */
+static const struct mmio_range vgic_cpu_ranges[] = {
+       {
+               .base           = GIC_CPU_CTRL,
+               .len            = 12,
+               .handle_mmio    = handle_cpu_mmio_misc,
+       },
+       {
+               .base           = GIC_CPU_ALIAS_BINPOINT,
+               .len            = 4,
+               .handle_mmio    = handle_mmio_abpr,
+       },
+       {
+               .base           = GIC_CPU_ACTIVEPRIO,
+               .len            = 16,
+               .handle_mmio    = handle_mmio_raz_wi,
+       },
+       {
+               .base           = GIC_CPU_IDENT,
+               .len            = 4,
+               .handle_mmio    = handle_cpu_mmio_ident,
+       },
+};
+
+static int vgic_attr_regs_access(struct kvm_device *dev,
+                                struct kvm_device_attr *attr,
+                                u32 *reg, bool is_write)
+{
+       const struct mmio_range *r = NULL, *ranges;
+       phys_addr_t offset;
+       int ret, cpuid, c;
+       struct kvm_vcpu *vcpu, *tmp_vcpu;
+       struct vgic_dist *vgic;
+       struct kvm_exit_mmio mmio;
+
+       offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+       cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+               KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+
+       mutex_lock(&dev->kvm->lock);
+
+       ret = vgic_init_maps(dev->kvm);
+       if (ret)
+               goto out;
+
+       if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+       vgic = &dev->kvm->arch.vgic;
+
+       mmio.len = 4;
+       mmio.is_write = is_write;
+       if (is_write)
+               mmio_data_write(&mmio, ~0, *reg);
+       switch (attr->group) {
+       case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+               mmio.phys_addr = vgic->vgic_dist_base + offset;
+               ranges = vgic_dist_ranges;
+               break;
+       case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+               mmio.phys_addr = vgic->vgic_cpu_base + offset;
+               ranges = vgic_cpu_ranges;
+               break;
+       default:
+               BUG();
+       }
+       r = find_matching_range(ranges, &mmio, offset);
+
+       if (unlikely(!r || !r->handle_mmio)) {
+               ret = -ENXIO;
+               goto out;
+       }
+
+
+       spin_lock(&vgic->lock);
+
+       /*
+        * Ensure that no other VCPU is running by checking the vcpu->cpu
+        * field.  If no other VPCUs are running we can safely access the VGIC
+        * state, because even if another VPU is run after this point, that
+        * VCPU will not touch the vgic state, because it will block on
+        * getting the vgic->lock in kvm_vgic_sync_hwstate().
+        */
+       kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
+               if (unlikely(tmp_vcpu->cpu != -1)) {
+                       ret = -EBUSY;
+                       goto out_vgic_unlock;
+               }
+       }
+
+       /*
+        * Move all pending IRQs from the LRs on all VCPUs so the pending
+        * state can be properly represented in the register state accessible
+        * through this API.
+        */
+       kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
+               vgic_unqueue_irqs(tmp_vcpu);
+
+       offset -= r->base;
+       r->handle_mmio(vcpu, &mmio, offset);
+
+       if (!is_write)
+               *reg = mmio_data_read(&mmio, ~0);
+
+       ret = 0;
+out_vgic_unlock:
+       spin_unlock(&vgic->lock);
+out:
+       mutex_unlock(&dev->kvm->lock);
+       return ret;
+}
+
+static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+       int r;
+
+       switch (attr->group) {
+       case KVM_DEV_ARM_VGIC_GRP_ADDR: {
+               u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+               u64 addr;
+               unsigned long type = (unsigned long)attr->attr;
+
+               if (copy_from_user(&addr, uaddr, sizeof(addr)))
+                       return -EFAULT;
+
+               r = kvm_vgic_addr(dev->kvm, type, &addr, true);
+               return (r == -ENODEV) ? -ENXIO : r;
+       }
+
+       case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+       case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
+               u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+               u32 reg;
+
+               if (get_user(reg, uaddr))
+                       return -EFAULT;
+
+               return vgic_attr_regs_access(dev, attr, &reg, true);
+       }
+       case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
+               u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+               u32 val;
+               int ret = 0;
+
+               if (get_user(val, uaddr))
+                       return -EFAULT;
+
+               /*
+                * We require:
+                * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
+                * - at most 1024 interrupts
+                * - a multiple of 32 interrupts
+                */
+               if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
+                   val > VGIC_MAX_IRQS ||
+                   (val & 31))
+                       return -EINVAL;
+
+               mutex_lock(&dev->kvm->lock);
+
+               if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
+                       ret = -EBUSY;
+               else
+                       dev->kvm->arch.vgic.nr_irqs = val;
+
+               mutex_unlock(&dev->kvm->lock);
+
+               return ret;
+       }
+
+       }
+
+       return -ENXIO;
+}
+
+static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+       int r = -ENXIO;
+
+       switch (attr->group) {
+       case KVM_DEV_ARM_VGIC_GRP_ADDR: {
+               u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+               u64 addr;
+               unsigned long type = (unsigned long)attr->attr;
+
+               r = kvm_vgic_addr(dev->kvm, type, &addr, false);
+               if (r)
+                       return (r == -ENODEV) ? -ENXIO : r;
+
+               if (copy_to_user(uaddr, &addr, sizeof(addr)))
+                       return -EFAULT;
+               break;
+       }
+
+       case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+       case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
+               u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+               u32 reg = 0;
+
+               r = vgic_attr_regs_access(dev, attr, &reg, false);
+               if (r)
+                       return r;
+               r = put_user(reg, uaddr);
+               break;
+       }
+       case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
+               u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+               r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
+               break;
+       }
+
+       }
+
+       return r;
+}
+
+static int vgic_has_attr_regs(const struct mmio_range *ranges,
+                             phys_addr_t offset)
+{
+       struct kvm_exit_mmio dev_attr_mmio;
+
+       dev_attr_mmio.len = 4;
+       if (find_matching_range(ranges, &dev_attr_mmio, offset))
+               return 0;
+       else
+               return -ENXIO;
+}
+
+static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+       phys_addr_t offset;
+
+       switch (attr->group) {
+       case KVM_DEV_ARM_VGIC_GRP_ADDR:
+               switch (attr->attr) {
+               case KVM_VGIC_V2_ADDR_TYPE_DIST:
+               case KVM_VGIC_V2_ADDR_TYPE_CPU:
+                       return 0;
+               }
+               break;
+       case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+               offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+               return vgic_has_attr_regs(vgic_dist_ranges, offset);
+       case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+               offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+               return vgic_has_attr_regs(vgic_cpu_ranges, offset);
+       case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+               return 0;
+       }
+       return -ENXIO;
+}
+
+static void vgic_destroy(struct kvm_device *dev)
+{
+       kfree(dev);
+}
+
+static int vgic_create(struct kvm_device *dev, u32 type)
+{
+       return kvm_vgic_create(dev->kvm);
+}
+
+static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
+       .name = "kvm-arm-vgic",
+       .create = vgic_create,
+       .destroy = vgic_destroy,
+       .set_attr = vgic_set_attr,
+       .get_attr = vgic_get_attr,
+       .has_attr = vgic_has_attr,
+};
+
+static void vgic_init_maintenance_interrupt(void *info)
+{
+       enable_percpu_irq(vgic->maint_irq, 0);
+}
+
+static int vgic_cpu_notify(struct notifier_block *self,
+                          unsigned long action, void *cpu)
+{
+       switch (action) {
+       case CPU_STARTING:
+       case CPU_STARTING_FROZEN:
+               vgic_init_maintenance_interrupt(NULL);
+               break;
+       case CPU_DYING:
+       case CPU_DYING_FROZEN:
+               disable_percpu_irq(vgic->maint_irq);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block vgic_cpu_nb = {
+       .notifier_call = vgic_cpu_notify,
+};
+
+static const struct of_device_id vgic_ids[] = {
+       { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
+       { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
+       {},
+};
+
+int kvm_vgic_hyp_init(void)
+{
+       const struct of_device_id *matched_id;
+       int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
+                         const struct vgic_params **);
+       struct device_node *vgic_node;
+       int ret;
+
+       vgic_node = of_find_matching_node_and_match(NULL,
+                                                   vgic_ids, &matched_id);
+       if (!vgic_node) {
+               kvm_err("error: no compatible GIC node found\n");
+               return -ENODEV;
+       }
+
+       vgic_probe = matched_id->data;
+       ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
+       if (ret)
+               return ret;
+
+       ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
+                                "vgic", kvm_get_running_vcpus());
+       if (ret) {
+               kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
+               return ret;
+       }
+
+       ret = register_cpu_notifier(&vgic_cpu_nb);
+       if (ret) {
+               kvm_err("Cannot register vgic CPU notifier\n");
+               goto out_free_irq;
+       }
+
+       /* Callback into for arch code for setup */
+       vgic_arch_setup(vgic);
+
+       on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+
+       return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
+                                      KVM_DEV_TYPE_ARM_VGIC_V2);
+
+out_free_irq:
+       free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
+       return ret;
+}
index ea475cd035112a9db93ffa028a552df9be0724af..d6a3d0993d8812c8527274d01e8c08ce942746a4 100644 (file)
 #include "async_pf.h"
 #include <trace/events/kvm.h>
 
+static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
+                                              struct kvm_async_pf *work)
+{
+#ifdef CONFIG_KVM_ASYNC_PF_SYNC
+       kvm_arch_async_page_present(vcpu, work);
+#endif
+}
+static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
+                                               struct kvm_async_pf *work)
+{
+#ifndef CONFIG_KVM_ASYNC_PF_SYNC
+       kvm_arch_async_page_present(vcpu, work);
+#endif
+}
+
 static struct kmem_cache *async_pf_cache;
 
 int kvm_async_pf_init(void)
@@ -56,7 +71,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
 
 static void async_pf_execute(struct work_struct *work)
 {
-       struct page *page = NULL;
        struct kvm_async_pf *apf =
                container_of(work, struct kvm_async_pf, work);
        struct mm_struct *mm = apf->mm;
@@ -66,16 +80,13 @@ static void async_pf_execute(struct work_struct *work)
 
        might_sleep();
 
-       use_mm(mm);
        down_read(&mm->mmap_sem);
-       get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
+       get_user_pages(NULL, mm, addr, 1, 1, 0, NULL, NULL);
        up_read(&mm->mmap_sem);
-       unuse_mm(mm);
+       kvm_async_page_present_sync(vcpu, apf);
 
        spin_lock(&vcpu->async_pf.lock);
        list_add_tail(&apf->link, &vcpu->async_pf.done);
-       apf->page = page;
-       apf->done = true;
        spin_unlock(&vcpu->async_pf.lock);
 
        /*
@@ -83,12 +94,12 @@ static void async_pf_execute(struct work_struct *work)
         * this point
         */
 
-       trace_kvm_async_pf_completed(addr, page, gva);
+       trace_kvm_async_pf_completed(addr, gva);
 
        if (waitqueue_active(&vcpu->wq))
                wake_up_interruptible(&vcpu->wq);
 
-       mmdrop(mm);
+       mmput(mm);
        kvm_put_kvm(vcpu->kvm);
 }
 
@@ -99,10 +110,17 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                struct kvm_async_pf *work =
                        list_entry(vcpu->async_pf.queue.next,
                                   typeof(*work), queue);
-               cancel_work_sync(&work->work);
                list_del(&work->queue);
-               if (!work->done) /* work was canceled */
+
+#ifdef CONFIG_KVM_ASYNC_PF_SYNC
+               flush_work(&work->work);
+#else
+               if (cancel_work_sync(&work->work)) {
+                       mmput(work->mm);
+                       kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
                        kmem_cache_free(async_pf_cache, work);
+               }
+#endif
        }
 
        spin_lock(&vcpu->async_pf.lock);
@@ -111,8 +129,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                        list_entry(vcpu->async_pf.done.next,
                                   typeof(*work), link);
                list_del(&work->link);
-               if (!is_error_page(work->page))
-                       kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
        spin_unlock(&vcpu->async_pf.lock);
@@ -132,19 +148,16 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
                list_del(&work->link);
                spin_unlock(&vcpu->async_pf.lock);
 
-               if (work->page)
-                       kvm_arch_async_page_ready(vcpu, work);
-               kvm_arch_async_page_present(vcpu, work);
+               kvm_arch_async_page_ready(vcpu, work);
+               kvm_async_page_present_async(vcpu, work);
 
                list_del(&work->queue);
                vcpu->async_pf.queued--;
-               if (!is_error_page(work->page))
-                       kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
 }
 
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
                       struct kvm_arch_async_pf *arch)
 {
        struct kvm_async_pf *work;
@@ -162,14 +175,13 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
        if (!work)
                return 0;
 
-       work->page = NULL;
-       work->done = false;
+       work->wakeup_all = false;
        work->vcpu = vcpu;
        work->gva = gva;
-       work->addr = gfn_to_hva(vcpu->kvm, gfn);
+       work->addr = hva;
        work->arch = *arch;
        work->mm = current->mm;
-       atomic_inc(&work->mm->mm_count);
+       atomic_inc(&work->mm->mm_users);
        kvm_get_kvm(work->vcpu->kvm);
 
        /* this can't really happen otherwise gfn_to_pfn_async
@@ -187,7 +199,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
        return 1;
 retry_sync:
        kvm_put_kvm(work->vcpu->kvm);
-       mmdrop(work->mm);
+       mmput(work->mm);
        kmem_cache_free(async_pf_cache, work);
        return 0;
 }
@@ -203,7 +215,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
        if (!work)
                return -ENOMEM;
 
-       work->page = KVM_ERR_PTR_BAD_PAGE;
+       work->wakeup_all = true;
        INIT_LIST_HEAD(&work->queue); /* for list_del to work */
 
        spin_lock(&vcpu->async_pf.lock);
index 64ee720b75c7ac4a80c4e1c06cd5cacf0a3fa961..71ed39941b9c60184ccab58e043d0747f017e49a 100644 (file)
 #include <linux/list.h>
 #include <linux/eventfd.h>
 #include <linux/kernel.h>
+#include <linux/srcu.h>
 #include <linux/slab.h>
+#include <linux/seqlock.h>
+#include <trace/events/kvm.h>
 
 #include "iodev.h"
 
-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+#ifdef CONFIG_HAVE_KVM_IRQFD
 /*
  * --------------------------------------------------------------------
  * irqfd: Allows an fd to be used to inject an interrupt to the guest
@@ -74,7 +77,8 @@ struct _irqfd {
        struct kvm *kvm;
        wait_queue_t wait;
        /* Update side is protected by irqfds.lock */
-       struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
+       struct kvm_kernel_irq_routing_entry irq_entry;
+       seqcount_t irq_entry_sc;
        /* Used for level IRQ fast-path */
        int gsi;
        struct work_struct inject;
@@ -118,19 +122,22 @@ static void
 irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
 {
        struct _irqfd_resampler *resampler;
+       struct kvm *kvm;
        struct _irqfd *irqfd;
+       int idx;
 
        resampler = container_of(kian, struct _irqfd_resampler, notifier);
+       kvm = resampler->kvm;
 
-       kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
+       kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
                    resampler->notifier.gsi, 0, false);
 
-       rcu_read_lock();
+       idx = srcu_read_lock(&kvm->irq_srcu);
 
        list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
                eventfd_signal(irqfd->resamplefd, 1);
 
-       rcu_read_unlock();
+       srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 
 static void
@@ -142,7 +149,7 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd)
        mutex_lock(&kvm->irqfds.resampler_lock);
 
        list_del_rcu(&irqfd->resampler_link);
-       synchronize_rcu();
+       synchronize_srcu(&kvm->irq_srcu);
 
        if (list_empty(&resampler->list)) {
                list_del(&resampler->link);
@@ -219,19 +226,24 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
 {
        struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
        unsigned long flags = (unsigned long)key;
-       struct kvm_kernel_irq_routing_entry *irq;
+       struct kvm_kernel_irq_routing_entry irq;
        struct kvm *kvm = irqfd->kvm;
+       unsigned seq;
+       int idx;
 
        if (flags & POLLIN) {
-               rcu_read_lock();
-               irq = rcu_dereference(irqfd->irq_entry);
+               idx = srcu_read_lock(&kvm->irq_srcu);
+               do {
+                       seq = read_seqcount_begin(&irqfd->irq_entry_sc);
+                       irq = irqfd->irq_entry;
+               } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
                /* An event has been signaled, inject an interrupt */
-               if (irq)
-                       kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
+               if (irq.type == KVM_IRQ_ROUTING_MSI)
+                       kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
                                        false);
                else
                        schedule_work(&irqfd->inject);
-               rcu_read_unlock();
+               srcu_read_unlock(&kvm->irq_srcu, idx);
        }
 
        if (flags & POLLHUP) {
@@ -267,34 +279,37 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
 }
 
 /* Must be called under irqfds.lock */
-static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
-                        struct kvm_irq_routing_table *irq_rt)
+static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd)
 {
        struct kvm_kernel_irq_routing_entry *e;
+       struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
+       int i, n_entries;
 
-       if (irqfd->gsi >= irq_rt->nr_rt_entries) {
-               rcu_assign_pointer(irqfd->irq_entry, NULL);
-               return;
-       }
+       n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
+
+       write_seqcount_begin(&irqfd->irq_entry_sc);
+
+       irqfd->irq_entry.type = 0;
 
-       hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) {
+       e = entries;
+       for (i = 0; i < n_entries; ++i, ++e) {
                /* Only fast-path MSI. */
                if (e->type == KVM_IRQ_ROUTING_MSI)
-                       rcu_assign_pointer(irqfd->irq_entry, e);
-               else
-                       rcu_assign_pointer(irqfd->irq_entry, NULL);
+                       irqfd->irq_entry = *e;
        }
+
+       write_seqcount_end(&irqfd->irq_entry_sc);
 }
 
 static int
 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
 {
-       struct kvm_irq_routing_table *irq_rt;
        struct _irqfd *irqfd, *tmp;
        struct file *file = NULL;
        struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
        int ret;
        unsigned int events;
+       int idx;
 
        irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
        if (!irqfd)
@@ -305,6 +320,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
        INIT_LIST_HEAD(&irqfd->list);
        INIT_WORK(&irqfd->inject, irqfd_inject);
        INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
+       seqcount_init(&irqfd->irq_entry_sc);
 
        file = eventfd_fget(args->fd);
        if (IS_ERR(file)) {
@@ -363,7 +379,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
                }
 
                list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
-               synchronize_rcu();
+               synchronize_srcu(&kvm->irq_srcu);
 
                mutex_unlock(&kvm->irqfds.resampler_lock);
        }
@@ -387,9 +403,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
                goto fail;
        }
 
-       irq_rt = rcu_dereference_protected(kvm->irq_routing,
-                                          lockdep_is_held(&kvm->irqfds.lock));
-       irqfd_update(kvm, irqfd, irq_rt);
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       irqfd_update(kvm, irqfd);
+       srcu_read_unlock(&kvm->irq_srcu, idx);
 
        events = file->f_op->poll(file, &irqfd->pt);
 
@@ -428,12 +444,73 @@ fail:
        kfree(irqfd);
        return ret;
 }
+
+bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
+       struct kvm_irq_ack_notifier *kian;
+       int gsi, idx;
+
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+       if (gsi != -1)
+               hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+                                        link)
+                       if (kian->gsi == gsi) {
+                               srcu_read_unlock(&kvm->irq_srcu, idx);
+                               return true;
+                       }
+
+       srcu_read_unlock(&kvm->irq_srcu, idx);
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+
+void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
+       struct kvm_irq_ack_notifier *kian;
+       int gsi, idx;
+
+       trace_kvm_ack_irq(irqchip, pin);
+
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+       if (gsi != -1)
+               hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+                                        link)
+                       if (kian->gsi == gsi)
+                               kian->irq_acked(kian);
+       srcu_read_unlock(&kvm->irq_srcu, idx);
+}
+
+void kvm_register_irq_ack_notifier(struct kvm *kvm,
+                                  struct kvm_irq_ack_notifier *kian)
+{
+       mutex_lock(&kvm->irq_lock);
+       hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
+       mutex_unlock(&kvm->irq_lock);
+#ifdef __KVM_HAVE_IOAPIC
+       kvm_vcpu_request_scan_ioapic(kvm);
+#endif
+}
+
+void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
+                                   struct kvm_irq_ack_notifier *kian)
+{
+       mutex_lock(&kvm->irq_lock);
+       hlist_del_init_rcu(&kian->link);
+       mutex_unlock(&kvm->irq_lock);
+       synchronize_srcu(&kvm->irq_srcu);
+#ifdef __KVM_HAVE_IOAPIC
+       kvm_vcpu_request_scan_ioapic(kvm);
+#endif
+}
 #endif
 
 void
 kvm_eventfd_init(struct kvm *kvm)
 {
-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+#ifdef CONFIG_HAVE_KVM_IRQFD
        spin_lock_init(&kvm->irqfds.lock);
        INIT_LIST_HEAD(&kvm->irqfds.items);
        INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
@@ -442,7 +519,7 @@ kvm_eventfd_init(struct kvm *kvm)
        INIT_LIST_HEAD(&kvm->ioeventfds);
 }
 
-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+#ifdef CONFIG_HAVE_KVM_IRQFD
 /*
  * shutdown any irqfd's that match fd+gsi
  */
@@ -461,14 +538,14 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
        list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
                if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
                        /*
-                        * This rcu_assign_pointer is needed for when
+                        * This clearing of irq_entry.type is needed for when
                         * another thread calls kvm_irq_routing_update before
                         * we flush workqueue below (we synchronize with
                         * kvm_irq_routing_update using irqfds.lock).
-                        * It is paired with synchronize_rcu done by caller
-                        * of that function.
                         */
-                       rcu_assign_pointer(irqfd->irq_entry, NULL);
+                       write_seqcount_begin(&irqfd->irq_entry_sc);
+                       irqfd->irq_entry.type = 0;
+                       write_seqcount_end(&irqfd->irq_entry_sc);
                        irqfd_deactivate(irqfd);
                }
        }
@@ -523,20 +600,17 @@ kvm_irqfd_release(struct kvm *kvm)
 }
 
 /*
- * Change irq_routing and irqfd.
- * Caller must invoke synchronize_rcu afterwards.
+ * Take note of a change in irq routing.
+ * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
  */
-void kvm_irq_routing_update(struct kvm *kvm,
-                           struct kvm_irq_routing_table *irq_rt)
+void kvm_irq_routing_update(struct kvm *kvm)
 {
        struct _irqfd *irqfd;
 
        spin_lock_irq(&kvm->irqfds.lock);
 
-       rcu_assign_pointer(kvm->irq_routing, irq_rt);
-
        list_for_each_entry(irqfd, &kvm->irqfds.items, list)
-               irqfd_update(kvm, irqfd, irq_rt);
+               irqfd_update(kvm, irqfd);
 
        spin_unlock_irq(&kvm->irqfds.lock);
 }
index 39dc5bc742e0596f4183f615c136205e49c91e18..b47541dd798f9acadd198a85eaf60579fbdb70fa 100644 (file)
@@ -203,10 +203,9 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
        spin_lock(&ioapic->lock);
        for (index = 0; index < IOAPIC_NUM_PINS; index++) {
                e = &ioapic->redirtbl[index];
-               if (!e->fields.mask &&
-                       (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
-                        kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
-                                index) || index == RTC_GSI)) {
+               if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
+                   kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
+                   index == RTC_GSI) {
                        if (kvm_apic_match_dest(vcpu, NULL, 0,
                                e->fields.dest_id, e->fields.dest_mode)) {
                                __set_bit(e->fields.vector,
@@ -520,7 +519,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
        return 0;
 }
 
-void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
+static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
 {
        int i;
 
index 615d8c995c3c1fec2bdfc4f875bcad477bee9e53..90d43e95dcf85151f8543b8faeb80419f844ba0b 100644 (file)
@@ -91,7 +91,6 @@ void kvm_ioapic_destroy(struct kvm *kvm);
 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
                       int level, bool line_status);
 void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
-void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
                struct kvm_lapic_irq *irq, unsigned long *dest_map);
 int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
index c329c8fc57f45807ad16729c97d5fa21d0fc81e8..36f4e82c6b249cd2671e46c325834888c420df45 100644 (file)
@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
                                gfn_t base_gfn, unsigned long npages);
 
 static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
-                          unsigned long size)
+                          unsigned long npages)
 {
        gfn_t end_gfn;
        pfn_t pfn;
 
        pfn     = gfn_to_pfn_memslot(slot, gfn);
-       end_gfn = gfn + (size >> PAGE_SHIFT);
+       end_gfn = gfn + npages;
        gfn    += 1;
 
        if (is_error_noslot_pfn(pfn))
@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
        return pfn;
 }
 
+static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
+{
+       unsigned long i;
+
+       for (i = 0; i < npages; ++i)
+               kvm_release_pfn_clean(pfn + i);
+}
+
 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
 {
        gfn_t gfn, end_gfn;
@@ -111,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
                 * Pin all pages we are about to map in memory. This is
                 * important because we unmap and unpin in 4kb steps later.
                 */
-               pfn = kvm_pin_pages(slot, gfn, page_size);
+               pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
                if (is_error_noslot_pfn(pfn)) {
                        gfn += 1;
                        continue;
@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
                if (r) {
                        printk(KERN_ERR "kvm_iommu_map_address:"
                               "iommu failed to map pfn=%llx\n", pfn);
+                       kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
                        goto unmap_pages;
                }
 
@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
        return 0;
 
 unmap_pages:
-       kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
+       kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
        return r;
 }
 
@@ -194,11 +203,7 @@ int kvm_assign_device(struct kvm *kvm,
 
        pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
 
-       printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
-               assigned_dev->host_segnr,
-               assigned_dev->host_busnr,
-               PCI_SLOT(assigned_dev->host_devfn),
-               PCI_FUNC(assigned_dev->host_devfn));
+       dev_info(&pdev->dev, "kvm assign device\n");
 
        return 0;
 out_unmap:
@@ -224,11 +229,7 @@ int kvm_deassign_device(struct kvm *kvm,
 
        pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
 
-       printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
-               assigned_dev->host_segnr,
-               assigned_dev->host_busnr,
-               PCI_SLOT(assigned_dev->host_devfn),
-               PCI_FUNC(assigned_dev->host_devfn));
+       dev_info(&pdev->dev, "kvm deassign device\n");
 
        return 0;
 }
@@ -272,14 +273,6 @@ out_unlock:
        return r;
 }
 
-static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
-{
-       unsigned long i;
-
-       for (i = 0; i < npages; ++i)
-               kvm_release_pfn_clean(pfn + i);
-}
-
 static void kvm_iommu_put_pages(struct kvm *kvm,
                                gfn_t base_gfn, unsigned long npages)
 {
index e2e6b4473a96fafc98dc85aa6c2e8fa49fcd1c09..963b8995a9e8a8035ad4df200afb20ca55d6fa1f 100644 (file)
@@ -160,9 +160,10 @@ static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
  */
 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
 {
+       struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
        struct kvm_kernel_irq_routing_entry *e;
        int ret = -EINVAL;
-       struct kvm_irq_routing_table *irq_rt;
+       int idx;
 
        trace_kvm_set_irq(irq, level, irq_source_id);
 
@@ -174,17 +175,15 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
         * Since there's no easy way to do this, we only support injecting MSI
         * which is limited to 1:1 GSI mapping.
         */
-       rcu_read_lock();
-       irq_rt = rcu_dereference(kvm->irq_routing);
-       if (irq < irq_rt->nr_rt_entries)
-               hlist_for_each_entry(e, &irq_rt->map[irq], link) {
-                       if (likely(e->type == KVM_IRQ_ROUTING_MSI))
-                               ret = kvm_set_msi_inatomic(e, kvm);
-                       else
-                               ret = -EWOULDBLOCK;
-                       break;
-               }
-       rcu_read_unlock();
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       if (kvm_irq_map_gsi(kvm, entries, irq) > 0) {
+               e = &entries[0];
+               if (likely(e->type == KVM_IRQ_ROUTING_MSI))
+                       ret = kvm_set_msi_inatomic(e, kvm);
+               else
+                       ret = -EWOULDBLOCK;
+       }
+       srcu_read_unlock(&kvm->irq_srcu, idx);
        return ret;
 }
 
@@ -253,26 +252,25 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
        mutex_lock(&kvm->irq_lock);
        hlist_del_rcu(&kimn->link);
        mutex_unlock(&kvm->irq_lock);
-       synchronize_rcu();
+       synchronize_srcu(&kvm->irq_srcu);
 }
 
 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
                             bool mask)
 {
        struct kvm_irq_mask_notifier *kimn;
-       int gsi;
+       int idx, gsi;
 
-       rcu_read_lock();
-       gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
        if (gsi != -1)
                hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
                        if (kimn->irq == gsi)
                                kimn->func(kimn, mask);
-       rcu_read_unlock();
+       srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 
-int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
-                         struct kvm_kernel_irq_routing_entry *e,
+int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
                          const struct kvm_irq_routing_entry *ue)
 {
        int r = -EINVAL;
@@ -303,7 +301,6 @@ int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
                e->irqchip.pin = ue->u.irqchip.pin + delta;
                if (e->irqchip.pin >= max_pin)
                        goto out;
-               rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
                break;
        case KVM_IRQ_ROUTING_MSI:
                e->set = kvm_set_msi;
@@ -322,13 +319,13 @@ out:
 
 #define IOAPIC_ROUTING_ENTRY(irq) \
        { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,  \
-         .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
+         .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
 
 #ifdef CONFIG_X86
 #  define PIC_ROUTING_ENTRY(irq) \
        { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,  \
-         .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
+         .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
 #  define ROUTING_ENTRY2(irq) \
        IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
 #else
index 20dc9e4a8f6cee7b6d86a6decc92410715c9264c..7f256f31df102e36da59a8ebed636f1c9615cb00 100644 (file)
 
 #include <linux/kvm_host.h>
 #include <linux/slab.h>
+#include <linux/srcu.h>
 #include <linux/export.h>
 #include <trace/events/kvm.h>
 #include "irq.h"
 
-bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
-{
-       struct kvm_irq_ack_notifier *kian;
-       int gsi;
-
-       rcu_read_lock();
-       gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
-       if (gsi != -1)
-               hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
-                                        link)
-                       if (kian->gsi == gsi) {
-                               rcu_read_unlock();
-                               return true;
-                       }
-
-       rcu_read_unlock();
-
-       return false;
-}
-EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+struct kvm_irq_routing_table {
+       int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
+       struct kvm_kernel_irq_routing_entry *rt_entries;
+       u32 nr_rt_entries;
+       /*
+        * Array indexed by gsi. Each entry contains list of irq chips
+        * the gsi is connected to.
+        */
+       struct hlist_head map[0];
+};
 
-void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+int kvm_irq_map_gsi(struct kvm *kvm,
+                   struct kvm_kernel_irq_routing_entry *entries, int gsi)
 {
-       struct kvm_irq_ack_notifier *kian;
-       int gsi;
-
-       trace_kvm_ack_irq(irqchip, pin);
-
-       rcu_read_lock();
-       gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
-       if (gsi != -1)
-               hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
-                                        link)
-                       if (kian->gsi == gsi)
-                               kian->irq_acked(kian);
-       rcu_read_unlock();
-}
+       struct kvm_irq_routing_table *irq_rt;
+       struct kvm_kernel_irq_routing_entry *e;
+       int n = 0;
+
+       irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
+                                       lockdep_is_held(&kvm->irq_lock));
+       if (gsi < irq_rt->nr_rt_entries) {
+               hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
+                       entries[n] = *e;
+                       ++n;
+               }
+       }
 
-void kvm_register_irq_ack_notifier(struct kvm *kvm,
-                                  struct kvm_irq_ack_notifier *kian)
-{
-       mutex_lock(&kvm->irq_lock);
-       hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
-       mutex_unlock(&kvm->irq_lock);
-#ifdef __KVM_HAVE_IOAPIC
-       kvm_vcpu_request_scan_ioapic(kvm);
-#endif
+       return n;
 }
 
-void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
-                                   struct kvm_irq_ack_notifier *kian)
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
 {
-       mutex_lock(&kvm->irq_lock);
-       hlist_del_init_rcu(&kian->link);
-       mutex_unlock(&kvm->irq_lock);
-       synchronize_rcu();
-#ifdef __KVM_HAVE_IOAPIC
-       kvm_vcpu_request_scan_ioapic(kvm);
-#endif
+       struct kvm_irq_routing_table *irq_rt;
+
+       irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+       return irq_rt->chip[irqchip][pin];
 }
 
 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
@@ -114,9 +92,8 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
                bool line_status)
 {
-       struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
-       int ret = -1, i = 0;
-       struct kvm_irq_routing_table *irq_rt;
+       struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS];
+       int ret = -1, i, idx;
 
        trace_kvm_set_irq(irq, level, irq_source_id);
 
@@ -124,12 +101,9 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
         * IOAPIC.  So set the bit in both. The guest will ignore
         * writes to the unused one.
         */
-       rcu_read_lock();
-       irq_rt = rcu_dereference(kvm->irq_routing);
-       if (irq < irq_rt->nr_rt_entries)
-               hlist_for_each_entry(e, &irq_rt->map[irq], link)
-                       irq_set[i++] = *e;
-       rcu_read_unlock();
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       i = kvm_irq_map_gsi(kvm, irq_set, irq);
+       srcu_read_unlock(&kvm->irq_srcu, idx);
 
        while(i--) {
                int r;
@@ -170,9 +144,11 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
 
        e->gsi = ue->gsi;
        e->type = ue->type;
-       r = kvm_set_routing_entry(rt, e, ue);
+       r = kvm_set_routing_entry(e, ue);
        if (r)
                goto out;
+       if (e->type == KVM_IRQ_ROUTING_IRQCHIP)
+               rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi;
 
        hlist_add_head(&e->link, &rt->map[e->gsi]);
        r = 0;
@@ -223,10 +199,11 @@ int kvm_set_irq_routing(struct kvm *kvm,
 
        mutex_lock(&kvm->irq_lock);
        old = kvm->irq_routing;
-       kvm_irq_routing_update(kvm, new);
+       rcu_assign_pointer(kvm->irq_routing, new);
+       kvm_irq_routing_update(kvm);
        mutex_unlock(&kvm->irq_lock);
 
-       synchronize_rcu();
+       synchronize_srcu_expedited(&kvm->irq_srcu);
 
        new = old;
        r = 0;
index 8cf1cd2fadaab1260df022ba4248c92a47e82724..4eed4cd9b58b36cc54a9328460db616f2e4e3362 100644 (file)
@@ -52,6 +52,7 @@
 
 #include <asm/processor.h>
 #include <asm/io.h>
+#include <asm/ioctl.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 
@@ -70,7 +71,8 @@ MODULE_LICENSE("GPL");
  *             kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  */
 
-DEFINE_RAW_SPINLOCK(kvm_lock);
+DEFINE_SPINLOCK(kvm_lock);
+static DEFINE_RAW_SPINLOCK(kvm_count_lock);
 LIST_HEAD(vm_list);
 
 static cpumask_var_t cpus_hardware_enabled;
@@ -94,36 +96,22 @@ static int hardware_enable_all(void);
 static void hardware_disable_all(void);
 
 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
+static void update_memslots(struct kvm_memslots *slots,
+                           struct kvm_memory_slot *new, u64 last_generation);
 
-bool kvm_rebooting;
+static void kvm_release_pfn_dirty(pfn_t pfn);
+static void mark_page_dirty_in_slot(struct kvm *kvm,
+                                   struct kvm_memory_slot *memslot, gfn_t gfn);
+
+__visible bool kvm_rebooting;
 EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 static bool largepages_enabled = true;
 
 bool kvm_is_mmio_pfn(pfn_t pfn)
 {
-       if (pfn_valid(pfn)) {
-               int reserved;
-               struct page *tail = pfn_to_page(pfn);
-               struct page *head = compound_head(tail);
-               reserved = PageReserved(head);
-               if (head != tail) {
-                       /*
-                        * "head" is not a dangling pointer
-                        * (compound_head takes care of that)
-                        * but the hugepage may have been splitted
-                        * from under us (and we may not hold a
-                        * reference count on the head page so it can
-                        * be reused before we run PageReferenced), so
-                        * we've to check PageTail before returning
-                        * what we just read.
-                        */
-                       smp_rmb();
-                       if (PageTail(tail))
-                               return reserved;
-               }
-               return PageReserved(tail);
-       }
+       if (pfn_valid(pfn))
+               return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
 
        return true;
 }
@@ -142,7 +130,8 @@ int vcpu_load(struct kvm_vcpu *vcpu)
                struct pid *oldpid = vcpu->pid;
                struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
                rcu_assign_pointer(vcpu->pid, newpid);
-               synchronize_rcu();
+               if (oldpid)
+                       synchronize_rcu();
                put_pid(oldpid);
        }
        cpu = get_cpu();
@@ -469,14 +458,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
 
        r = kvm_arch_init_vm(kvm, type);
        if (r)
-               goto out_err_nodisable;
+               goto out_err_no_disable;
 
        r = hardware_enable_all();
        if (r)
-               goto out_err_nodisable;
+               goto out_err_no_disable;
 
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
        INIT_HLIST_HEAD(&kvm->mask_notifier_list);
+#endif
+#ifdef CONFIG_HAVE_KVM_IRQFD
        INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
 #endif
 
@@ -485,10 +476,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
        r = -ENOMEM;
        kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
        if (!kvm->memslots)
-               goto out_err_nosrcu;
+               goto out_err_no_srcu;
        kvm_init_memslots_id(kvm);
        if (init_srcu_struct(&kvm->srcu))
-               goto out_err_nosrcu;
+               goto out_err_no_srcu;
+       if (init_srcu_struct(&kvm->irq_srcu))
+               goto out_err_no_irq_srcu;
        for (i = 0; i < KVM_NR_BUSES; i++) {
                kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
                                        GFP_KERNEL);
@@ -510,17 +503,19 @@ static struct kvm *kvm_create_vm(unsigned long type)
        if (r)
                goto out_err;
 
-       raw_spin_lock(&kvm_lock);
+       spin_lock(&kvm_lock);
        list_add(&kvm->vm_list, &vm_list);
-       raw_spin_unlock(&kvm_lock);
+       spin_unlock(&kvm_lock);
 
        return kvm;
 
 out_err:
+       cleanup_srcu_struct(&kvm->irq_srcu);
+out_err_no_irq_srcu:
        cleanup_srcu_struct(&kvm->srcu);
-out_err_nosrcu:
+out_err_no_srcu:
        hardware_disable_all();
-out_err_nodisable:
+out_err_no_disable:
        for (i = 0; i < KVM_NR_BUSES; i++)
                kfree(kvm->buses[i]);
        kfree(kvm->memslots);
@@ -560,24 +555,24 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
 /*
  * Free any memory in @free but not in @dont.
  */
-static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
+static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free,
                                  struct kvm_memory_slot *dont)
 {
        if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
                kvm_destroy_dirty_bitmap(free);
 
-       kvm_arch_free_memslot(free, dont);
+       kvm_arch_free_memslot(kvm, free, dont);
 
        free->npages = 0;
 }
 
-void kvm_free_physmem(struct kvm *kvm)
+static void kvm_free_physmem(struct kvm *kvm)
 {
        struct kvm_memslots *slots = kvm->memslots;
        struct kvm_memory_slot *memslot;
 
        kvm_for_each_memslot(memslot, slots)
-               kvm_free_physmem_slot(memslot, NULL);
+               kvm_free_physmem_slot(kvm, memslot, NULL);
 
        kfree(kvm->memslots);
 }
@@ -601,9 +596,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
        struct mm_struct *mm = kvm->mm;
 
        kvm_arch_sync_events(kvm);
-       raw_spin_lock(&kvm_lock);
+       spin_lock(&kvm_lock);
        list_del(&kvm->vm_list);
-       raw_spin_unlock(&kvm_lock);
+       spin_unlock(&kvm_lock);
        kvm_free_irq_routing(kvm);
        for (i = 0; i < KVM_NR_BUSES; i++)
                kvm_io_bus_destroy(kvm->buses[i]);
@@ -616,6 +611,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
        kvm_arch_destroy_vm(kvm);
        kvm_destroy_devices(kvm);
        kvm_free_physmem(kvm);
+       cleanup_srcu_struct(&kvm->irq_srcu);
        cleanup_srcu_struct(&kvm->srcu);
        kvm_arch_free_vm(kvm);
        hardware_disable_all();
@@ -693,8 +689,9 @@ static void sort_memslots(struct kvm_memslots *slots)
                slots->id_to_index[slots->memslots[i].id] = i;
 }
 
-void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
-                    u64 last_generation)
+static void update_memslots(struct kvm_memslots *slots,
+                           struct kvm_memory_slot *new,
+                           u64 last_generation)
 {
        if (new) {
                int id = new->id;
@@ -713,7 +710,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
 {
        u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
 
-#ifdef KVM_CAP_READONLY_MEM
+#ifdef __KVM_HAVE_READONLY_MEM
        valid_flags |= KVM_MEM_READONLY;
 #endif
 
@@ -731,7 +728,10 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
        update_memslots(slots, new, kvm->memslots->generation);
        rcu_assign_pointer(kvm->memslots, slots);
        synchronize_srcu_expedited(&kvm->srcu);
-       return old_memslots; 
+
+       kvm_arch_memslots_updated(kvm);
+
+       return old_memslots;
 }
 
 /*
@@ -779,7 +779,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
        base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
        npages = mem->memory_size >> PAGE_SHIFT;
 
-       r = -EINVAL;
        if (npages > KVM_MEM_MAX_NR_PAGES)
                goto out;
 
@@ -793,7 +792,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
        new.npages = npages;
        new.flags = mem->flags;
 
-       r = -EINVAL;
        if (npages) {
                if (!old.npages)
                        change = KVM_MR_CREATE;
@@ -838,7 +836,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
        if (change == KVM_MR_CREATE) {
                new.userspace_addr = mem->userspace_addr;
 
-               if (kvm_arch_create_memslot(&new, npages))
+               if (kvm_arch_create_memslot(kvm, &new, npages))
                        goto out_free;
        }
 
@@ -849,7 +847,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
        }
 
        if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
-               r = -ENOMEM;
                slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
                                GFP_KERNEL);
                if (!slots)
@@ -889,6 +886,19 @@ int __kvm_set_memory_region(struct kvm *kvm,
                        goto out_free;
        }
 
+       /* actual memory is freed via old in kvm_free_physmem_slot below */
+       if (change == KVM_MR_DELETE) {
+               new.dirty_bitmap = NULL;
+               memset(&new.arch, 0, sizeof(new.arch));
+       }
+
+       old_memslots = install_new_memslots(kvm, slots, &new);
+
+       kvm_arch_commit_memory_region(kvm, mem, &old, change);
+
+       kvm_free_physmem_slot(kvm, &old, &new);
+       kfree(old_memslots);
+
        /*
         * IOMMU mapping:  New slots need to be mapped.  Old slots need to be
         * un-mapped and re-mapped if their base changes.  Since base change
@@ -900,29 +910,15 @@ int __kvm_set_memory_region(struct kvm *kvm,
         */
        if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
                r = kvm_iommu_map_pages(kvm, &new);
-               if (r)
-                       goto out_slots;
-       }
-
-       /* actual memory is freed via old in kvm_free_physmem_slot below */
-       if (change == KVM_MR_DELETE) {
-               new.dirty_bitmap = NULL;
-               memset(&new.arch, 0, sizeof(new.arch));
+               return r;
        }
 
-       old_memslots = install_new_memslots(kvm, slots, &new);
-
-       kvm_arch_commit_memory_region(kvm, mem, &old, change);
-
-       kvm_free_physmem_slot(&old, &new);
-       kfree(old_memslots);
-
        return 0;
 
 out_slots:
        kfree(slots);
 out_free:
-       kvm_free_physmem_slot(&new, &old);
+       kvm_free_physmem_slot(kvm, &new, &old);
 out:
        return r;
 }
@@ -940,8 +936,8 @@ int kvm_set_memory_region(struct kvm *kvm,
 }
 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
 
-int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
-                                  struct kvm_userspace_memory_region *mem)
+static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
+                                         struct kvm_userspace_memory_region *mem)
 {
        if (mem->slot >= KVM_USER_MEM_SLOTS)
                return -EINVAL;
@@ -1062,7 +1058,7 @@ static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
 }
 
 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
-                                gfn_t gfn)
+                                       gfn_t gfn)
 {
        return gfn_to_hva_many(slot, gfn, NULL);
 }
@@ -1075,12 +1071,25 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
 EXPORT_SYMBOL_GPL(gfn_to_hva);
 
 /*
- * The hva returned by this function is only allowed to be read.
- * It should pair with kvm_read_hva() or kvm_read_hva_atomic().
+ * If writable is set to false, the hva returned by this function is only
+ * allowed to be read.
  */
-static unsigned long gfn_to_hva_read(struct kvm *kvm, gfn_t gfn)
+unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
+                                     gfn_t gfn, bool *writable)
+{
+       unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
+
+       if (!kvm_is_error_hva(hva) && writable)
+               *writable = !memslot_is_readonly(slot);
+
+       return hva;
+}
+
+unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
 {
-       return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false);
+       struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+
+       return gfn_to_hva_memslot_prot(slot, gfn, writable);
 }
 
 static int kvm_read_hva(void *data, void __user *hva, int len)
@@ -1396,18 +1405,11 @@ void kvm_release_page_dirty(struct page *page)
 }
 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
 
-void kvm_release_pfn_dirty(pfn_t pfn)
+static void kvm_release_pfn_dirty(pfn_t pfn)
 {
        kvm_set_pfn_dirty(pfn);
        kvm_release_pfn_clean(pfn);
 }
-EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
-
-void kvm_set_page_dirty(struct page *page)
-{
-       kvm_set_pfn_dirty(page_to_pfn(page));
-}
-EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
 
 void kvm_set_pfn_dirty(pfn_t pfn)
 {
@@ -1447,7 +1449,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
        int r;
        unsigned long addr;
 
-       addr = gfn_to_hva_read(kvm, gfn);
+       addr = gfn_to_hva_prot(kvm, gfn, NULL);
        if (kvm_is_error_hva(addr))
                return -EFAULT;
        r = kvm_read_hva(data, (void __user *)addr + offset, len);
@@ -1485,7 +1487,7 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
        gfn_t gfn = gpa >> PAGE_SHIFT;
        int offset = offset_in_page(gpa);
 
-       addr = gfn_to_hva_read(kvm, gfn);
+       addr = gfn_to_hva_prot(kvm, gfn, NULL);
        if (kvm_is_error_hva(addr))
                return -EFAULT;
        pagefault_disable();
@@ -1624,8 +1626,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
 
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
 {
-       return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
-                                   offset, len);
+       const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
+
+       return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
 }
 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
 
@@ -1648,8 +1651,9 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
 }
 EXPORT_SYMBOL_GPL(kvm_clear_guest);
 
-void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
-                            gfn_t gfn)
+static void mark_page_dirty_in_slot(struct kvm *kvm,
+                                   struct kvm_memory_slot *memslot,
+                                   gfn_t gfn)
 {
        if (memslot && memslot->dirty_bitmap) {
                unsigned long rel_gfn = gfn - memslot->base_gfn;
@@ -1716,14 +1720,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
 #endif /* !CONFIG_S390 */
 
-void kvm_resched(struct kvm_vcpu *vcpu)
-{
-       if (!need_resched())
-               return;
-       cond_resched();
-}
-EXPORT_SYMBOL_GPL(kvm_resched);
-
 bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
 {
        struct pid *pid;
@@ -1733,7 +1729,7 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
        rcu_read_lock();
        pid = rcu_dereference(target->pid);
        if (pid)
-               task = get_pid_task(target->pid, PIDTYPE_PID);
+               task = get_pid_task(pid, PIDTYPE_PID);
        rcu_read_unlock();
        if (!task)
                return ret;
@@ -1748,7 +1744,6 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
 
-#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
 /*
  * Helper that checks whether a VCPU is eligible for directed yield.
  * Most eligible candidate to yield is decided by following heuristics:
@@ -1771,20 +1766,22 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
  *  locking does not harm. It may result in trying to yield to  same VCPU, fail
  *  and continue with next VCPU and so on.
  */
-bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
+static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
 {
+#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
        bool eligible;
 
        eligible = !vcpu->spin_loop.in_spin_loop ||
-                       (vcpu->spin_loop.in_spin_loop &&
-                        vcpu->spin_loop.dy_eligible);
+                   vcpu->spin_loop.dy_eligible;
 
        if (vcpu->spin_loop.in_spin_loop)
                kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
 
        return eligible;
-}
+#else
+       return true;
 #endif
+}
 
 void kvm_vcpu_on_spin(struct kvm_vcpu *me)
 {
@@ -1815,7 +1812,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
                                continue;
                        if (vcpu == me)
                                continue;
-                       if (waitqueue_active(&vcpu->wq))
+                       if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
                                continue;
                        if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
                                continue;
@@ -1893,7 +1890,7 @@ static struct file_operations kvm_vcpu_fops = {
  */
 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
 {
-       return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
+       return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
 }
 
 /*
@@ -1981,6 +1978,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
        if (vcpu->kvm->mm != current->mm)
                return -EIO;
 
+       if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
+               return -EINVAL;
+
 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
        /*
         * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
@@ -2265,6 +2265,29 @@ struct kvm_device *kvm_device_from_filp(struct file *filp)
        return filp->private_data;
 }
 
+static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
+#ifdef CONFIG_KVM_MPIC
+       [KVM_DEV_TYPE_FSL_MPIC_20]      = &kvm_mpic_ops,
+       [KVM_DEV_TYPE_FSL_MPIC_42]      = &kvm_mpic_ops,
+#endif
+
+#ifdef CONFIG_KVM_XICS
+       [KVM_DEV_TYPE_XICS]             = &kvm_xics_ops,
+#endif
+};
+
+int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
+{
+       if (type >= ARRAY_SIZE(kvm_device_ops_table))
+               return -ENOSPC;
+
+       if (kvm_device_ops_table[type] != NULL)
+               return -EEXIST;
+
+       kvm_device_ops_table[type] = ops;
+       return 0;
+}
+
 static int kvm_ioctl_create_device(struct kvm *kvm,
                                   struct kvm_create_device *cd)
 {
@@ -2273,21 +2296,12 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
        int ret;
 
-       switch (cd->type) {
-#ifdef CONFIG_KVM_MPIC
-       case KVM_DEV_TYPE_FSL_MPIC_20:
-       case KVM_DEV_TYPE_FSL_MPIC_42:
-               ops = &kvm_mpic_ops;
-               break;
-#endif
-#ifdef CONFIG_KVM_XICS
-       case KVM_DEV_TYPE_XICS:
-               ops = &kvm_xics_ops;
-               break;
-#endif
-       default:
+       if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
+               return -ENODEV;
+
+       ops = kvm_device_ops_table[cd->type];
+       if (ops == NULL)
                return -ENODEV;
-       }
 
        if (test)
                return 0;
@@ -2305,7 +2319,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
                return ret;
        }
 
-       ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR);
+       ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
        if (ret < 0) {
                ops->destroy(dev);
                return ret;
@@ -2317,6 +2331,34 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        return 0;
 }
 
+static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
+{
+       switch (arg) {
+       case KVM_CAP_USER_MEMORY:
+       case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
+       case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
+#ifdef CONFIG_KVM_APIC_ARCHITECTURE
+       case KVM_CAP_SET_BOOT_CPU_ID:
+#endif
+       case KVM_CAP_INTERNAL_ERROR_DATA:
+#ifdef CONFIG_HAVE_KVM_MSI
+       case KVM_CAP_SIGNAL_MSI:
+#endif
+#ifdef CONFIG_HAVE_KVM_IRQFD
+       case KVM_CAP_IRQFD_RESAMPLE:
+#endif
+       case KVM_CAP_CHECK_EXTENSION_VM:
+               return 1;
+#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+       case KVM_CAP_IRQ_ROUTING:
+               return KVM_MAX_IRQ_ROUTES;
+#endif
+       default:
+               break;
+       }
+       return kvm_vm_ioctl_check_extension(kvm, arg);
+}
+
 static long kvm_vm_ioctl(struct file *filp,
                           unsigned int ioctl, unsigned long arg)
 {
@@ -2480,6 +2522,9 @@ static long kvm_vm_ioctl(struct file *filp,
                r = 0;
                break;
        }
+       case KVM_CHECK_EXTENSION:
+               r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
+               break;
        default:
                r = kvm_arch_vm_ioctl(filp, ioctl, arg);
                if (r == -ENOTTY)
@@ -2533,44 +2578,12 @@ out:
 }
 #endif
 
-static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-       struct page *page[1];
-       unsigned long addr;
-       int npages;
-       gfn_t gfn = vmf->pgoff;
-       struct kvm *kvm = vma->vm_file->private_data;
-
-       addr = gfn_to_hva(kvm, gfn);
-       if (kvm_is_error_hva(addr))
-               return VM_FAULT_SIGBUS;
-
-       npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
-                               NULL);
-       if (unlikely(npages != 1))
-               return VM_FAULT_SIGBUS;
-
-       vmf->page = page[0];
-       return 0;
-}
-
-static const struct vm_operations_struct kvm_vm_vm_ops = {
-       .fault = kvm_vm_fault,
-};
-
-static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       vma->vm_ops = &kvm_vm_vm_ops;
-       return 0;
-}
-
 static struct file_operations kvm_vm_fops = {
        .release        = kvm_vm_release,
        .unlocked_ioctl = kvm_vm_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = kvm_vm_compat_ioctl,
 #endif
-       .mmap           = kvm_vm_mmap,
        .llseek         = noop_llseek,
 };
 
@@ -2589,40 +2602,13 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
                return r;
        }
 #endif
-       r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
+       r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC);
        if (r < 0)
                kvm_put_kvm(kvm);
 
        return r;
 }
 
-static long kvm_dev_ioctl_check_extension_generic(long arg)
-{
-       switch (arg) {
-       case KVM_CAP_USER_MEMORY:
-       case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
-       case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-       case KVM_CAP_SET_BOOT_CPU_ID:
-#endif
-       case KVM_CAP_INTERNAL_ERROR_DATA:
-#ifdef CONFIG_HAVE_KVM_MSI
-       case KVM_CAP_SIGNAL_MSI:
-#endif
-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
-       case KVM_CAP_IRQFD_RESAMPLE:
-#endif
-               return 1;
-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
-       case KVM_CAP_IRQ_ROUTING:
-               return KVM_MAX_IRQ_ROUTES;
-#endif
-       default:
-               break;
-       }
-       return kvm_dev_ioctl_check_extension(arg);
-}
-
 static long kvm_dev_ioctl(struct file *filp,
                          unsigned int ioctl, unsigned long arg)
 {
@@ -2630,7 +2616,6 @@ static long kvm_dev_ioctl(struct file *filp,
 
        switch (ioctl) {
        case KVM_GET_API_VERSION:
-               r = -EINVAL;
                if (arg)
                        goto out;
                r = KVM_API_VERSION;
@@ -2639,10 +2624,9 @@ static long kvm_dev_ioctl(struct file *filp,
                r = kvm_dev_ioctl_create_vm(arg);
                break;
        case KVM_CHECK_EXTENSION:
-               r = kvm_dev_ioctl_check_extension_generic(arg);
+               r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
                break;
        case KVM_GET_VCPU_MMAP_SIZE:
-               r = -EINVAL;
                if (arg)
                        goto out;
                r = PAGE_SIZE;     /* struct kvm_run */
@@ -2687,7 +2671,7 @@ static void hardware_enable_nolock(void *junk)
 
        cpumask_set_cpu(cpu, cpus_hardware_enabled);
 
-       r = kvm_arch_hardware_enable(NULL);
+       r = kvm_arch_hardware_enable();
 
        if (r) {
                cpumask_clear_cpu(cpu, cpus_hardware_enabled);
@@ -2697,11 +2681,12 @@ static void hardware_enable_nolock(void *junk)
        }
 }
 
-static void hardware_enable(void *junk)
+static void hardware_enable(void)
 {
-       raw_spin_lock(&kvm_lock);
-       hardware_enable_nolock(junk);
-       raw_spin_unlock(&kvm_lock);
+       raw_spin_lock(&kvm_count_lock);
+       if (kvm_usage_count)
+               hardware_enable_nolock(NULL);
+       raw_spin_unlock(&kvm_count_lock);
 }
 
 static void hardware_disable_nolock(void *junk)
@@ -2711,14 +2696,15 @@ static void hardware_disable_nolock(void *junk)
        if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
                return;
        cpumask_clear_cpu(cpu, cpus_hardware_enabled);
-       kvm_arch_hardware_disable(NULL);
+       kvm_arch_hardware_disable();
 }
 
-static void hardware_disable(void *junk)
+static void hardware_disable(void)
 {
-       raw_spin_lock(&kvm_lock);
-       hardware_disable_nolock(junk);
-       raw_spin_unlock(&kvm_lock);
+       raw_spin_lock(&kvm_count_lock);
+       if (kvm_usage_count)
+               hardware_disable_nolock(NULL);
+       raw_spin_unlock(&kvm_count_lock);
 }
 
 static void hardware_disable_all_nolock(void)
@@ -2732,16 +2718,16 @@ static void hardware_disable_all_nolock(void)
 
 static void hardware_disable_all(void)
 {
-       raw_spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_count_lock);
        hardware_disable_all_nolock();
-       raw_spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_count_lock);
 }
 
 static int hardware_enable_all(void)
 {
        int r = 0;
 
-       raw_spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_count_lock);
 
        kvm_usage_count++;
        if (kvm_usage_count == 1) {
@@ -2754,7 +2740,7 @@ static int hardware_enable_all(void)
                }
        }
 
-       raw_spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_count_lock);
 
        return r;
 }
@@ -2764,20 +2750,17 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
 {
        int cpu = (long)v;
 
-       if (!kvm_usage_count)
-               return NOTIFY_OK;
-
        val &= ~CPU_TASKS_FROZEN;
        switch (val) {
        case CPU_DYING:
                printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
                       cpu);
-               hardware_disable(NULL);
+               hardware_disable();
                break;
        case CPU_STARTING:
                printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
                       cpu);
-               hardware_enable(NULL);
+               hardware_enable();
                break;
        }
        return NOTIFY_OK;
@@ -2990,10 +2973,10 @@ static int vm_stat_get(void *_offset, u64 *val)
        struct kvm *kvm;
 
        *val = 0;
-       raw_spin_lock(&kvm_lock);
+       spin_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
                *val += *(u32 *)((void *)kvm + offset);
-       raw_spin_unlock(&kvm_lock);
+       spin_unlock(&kvm_lock);
        return 0;
 }
 
@@ -3007,12 +2990,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
        int i;
 
        *val = 0;
-       raw_spin_lock(&kvm_lock);
+       spin_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
                kvm_for_each_vcpu(i, vcpu, kvm)
                        *val += *(u32 *)((void *)vcpu + offset);
 
-       raw_spin_unlock(&kvm_lock);
+       spin_unlock(&kvm_lock);
        return 0;
 }
 
@@ -3025,7 +3008,7 @@ static const struct file_operations *stat_fops[] = {
 
 static int kvm_init_debug(void)
 {
-       int r = -EFAULT;
+       int r = -EEXIST;
        struct kvm_stats_debugfs_item *p;
 
        kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
@@ -3067,7 +3050,7 @@ static int kvm_suspend(void)
 static void kvm_resume(void)
 {
        if (kvm_usage_count) {
-               WARN_ON(raw_spin_is_locked(&kvm_lock));
+               WARN_ON(raw_spin_is_locked(&kvm_count_lock));
                hardware_enable_nolock(NULL);
        }
 }
@@ -3089,6 +3072,8 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
        if (vcpu->preempted)
                vcpu->preempted = false;
 
+       kvm_arch_sched_in(vcpu, cpu);
+
        kvm_arch_vcpu_load(vcpu, cpu);
 }
 
@@ -3184,6 +3169,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 
 out_undebugfs:
        unregister_syscore_ops(&kvm_syscore_ops);
+       misc_deregister(&kvm_dev);
 out_unreg:
        kvm_async_pf_deinit();
 out_free:
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
new file mode 100644 (file)
index 0000000..475487e
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * VFIO-KVM bridge pseudo device
+ *
+ * Copyright (C) 2013 Red Hat, Inc.  All rights reserved.
+ *     Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+
+struct kvm_vfio_group {
+       struct list_head node;
+       struct vfio_group *vfio_group;
+};
+
+struct kvm_vfio {
+       struct list_head group_list;
+       struct mutex lock;
+};
+
+static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
+{
+       struct vfio_group *vfio_group;
+       struct vfio_group *(*fn)(struct file *);
+
+       fn = symbol_get(vfio_group_get_external_user);
+       if (!fn)
+               return ERR_PTR(-EINVAL);
+
+       vfio_group = fn(filep);
+
+       symbol_put(vfio_group_get_external_user);
+
+       return vfio_group;
+}
+
+static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
+{
+       void (*fn)(struct vfio_group *);
+
+       fn = symbol_get(vfio_group_put_external_user);
+       if (!fn)
+               return;
+
+       fn(vfio_group);
+
+       symbol_put(vfio_group_put_external_user);
+}
+
+static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
+{
+       struct kvm_vfio *kv = dev->private;
+       struct vfio_group *vfio_group;
+       struct kvm_vfio_group *kvg;
+       void __user *argp = (void __user *)arg;
+       struct fd f;
+       int32_t fd;
+       int ret;
+
+       switch (attr) {
+       case KVM_DEV_VFIO_GROUP_ADD:
+               if (get_user(fd, (int32_t __user *)argp))
+                       return -EFAULT;
+
+               f = fdget(fd);
+               if (!f.file)
+                       return -EBADF;
+
+               vfio_group = kvm_vfio_group_get_external_user(f.file);
+               fdput(f);
+
+               if (IS_ERR(vfio_group))
+                       return PTR_ERR(vfio_group);
+
+               mutex_lock(&kv->lock);
+
+               list_for_each_entry(kvg, &kv->group_list, node) {
+                       if (kvg->vfio_group == vfio_group) {
+                               mutex_unlock(&kv->lock);
+                               kvm_vfio_group_put_external_user(vfio_group);
+                               return -EEXIST;
+                       }
+               }
+
+               kvg = kzalloc(sizeof(*kvg), GFP_KERNEL);
+               if (!kvg) {
+                       mutex_unlock(&kv->lock);
+                       kvm_vfio_group_put_external_user(vfio_group);
+                       return -ENOMEM;
+               }
+
+               list_add_tail(&kvg->node, &kv->group_list);
+               kvg->vfio_group = vfio_group;
+
+               mutex_unlock(&kv->lock);
+
+               return 0;
+
+       case KVM_DEV_VFIO_GROUP_DEL:
+               if (get_user(fd, (int32_t __user *)argp))
+                       return -EFAULT;
+
+               f = fdget(fd);
+               if (!f.file)
+                       return -EBADF;
+
+               vfio_group = kvm_vfio_group_get_external_user(f.file);
+               fdput(f);
+
+               if (IS_ERR(vfio_group))
+                       return PTR_ERR(vfio_group);
+
+               ret = -ENOENT;
+
+               mutex_lock(&kv->lock);
+
+               list_for_each_entry(kvg, &kv->group_list, node) {
+                       if (kvg->vfio_group != vfio_group)
+                               continue;
+
+                       list_del(&kvg->node);
+                       kvm_vfio_group_put_external_user(kvg->vfio_group);
+                       kfree(kvg);
+                       ret = 0;
+                       break;
+               }
+
+               mutex_unlock(&kv->lock);
+
+               kvm_vfio_group_put_external_user(vfio_group);
+
+               return ret;
+       }
+
+       return -ENXIO;
+}
+
+static int kvm_vfio_set_attr(struct kvm_device *dev,
+                            struct kvm_device_attr *attr)
+{
+       switch (attr->group) {
+       case KVM_DEV_VFIO_GROUP:
+               return kvm_vfio_set_group(dev, attr->attr, attr->addr);
+       }
+
+       return -ENXIO;
+}
+
+static int kvm_vfio_has_attr(struct kvm_device *dev,
+                            struct kvm_device_attr *attr)
+{
+       switch (attr->group) {
+       case KVM_DEV_VFIO_GROUP:
+               switch (attr->attr) {
+               case KVM_DEV_VFIO_GROUP_ADD:
+               case KVM_DEV_VFIO_GROUP_DEL:
+                       return 0;
+               }
+
+               break;
+       }
+
+       return -ENXIO;
+}
+
+static void kvm_vfio_destroy(struct kvm_device *dev)
+{
+       struct kvm_vfio *kv = dev->private;
+       struct kvm_vfio_group *kvg, *tmp;
+
+       list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
+               kvm_vfio_group_put_external_user(kvg->vfio_group);
+               list_del(&kvg->node);
+               kfree(kvg);
+       }
+
+       kfree(kv);
+       kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
+}
+
+static int kvm_vfio_create(struct kvm_device *dev, u32 type);
+
+static struct kvm_device_ops kvm_vfio_ops = {
+       .name = "kvm-vfio",
+       .create = kvm_vfio_create,
+       .destroy = kvm_vfio_destroy,
+       .set_attr = kvm_vfio_set_attr,
+       .has_attr = kvm_vfio_has_attr,
+};
+
+static int kvm_vfio_create(struct kvm_device *dev, u32 type)
+{
+       struct kvm_device *tmp;
+       struct kvm_vfio *kv;
+
+       /* Only one VFIO "device" per VM */
+       list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
+               if (tmp->ops == &kvm_vfio_ops)
+                       return -EBUSY;
+
+       kv = kzalloc(sizeof(*kv), GFP_KERNEL);
+       if (!kv)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&kv->group_list);
+       mutex_init(&kv->lock);
+
+       dev->private = kv;
+
+       return 0;
+}
+
+static int __init kvm_vfio_ops_init(void)
+{
+       return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
+}
+module_init(kvm_vfio_ops_init);